From 49659507152dfc38145f9ab0984affa68c2cad50 Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki Date: Tue, 5 Jul 2022 17:39:52 +0900 Subject: [PATCH 1/2] inlining: follow #45272, improve the `finalizer` inlining implementation - added more comments to explain the purpose of code - properly handle `ConstantCase` - erase `Core.finalizer` call more aggressively - improved type stabilities by handling edge cases - renamed `AddFianlizerUse` to `FinalizerUse` - removed dead pieces of code --- base/compiler/optimize.jl | 1 - base/compiler/ssair/inlining.jl | 164 +++++++++++++++++--------------- base/compiler/ssair/passes.jl | 132 ++++++++++++------------- src/builtins.c | 1 + test/compiler/inline.jl | 85 ++++++++++++++++- 5 files changed, 238 insertions(+), 145 deletions(-) diff --git a/base/compiler/optimize.jl b/base/compiler/optimize.jl index 9f4b25c8f3326..f2b56fc493788 100644 --- a/base/compiler/optimize.jl +++ b/base/compiler/optimize.jl @@ -30,7 +30,6 @@ const IR_FLAG_EFFECT_FREE = 0x01 << 4 # This statement was proven not to throw const IR_FLAG_NOTHROW = 0x01 << 5 - const TOP_TUPLE = GlobalRef(Core, :tuple) ##################### diff --git a/base/compiler/ssair/inlining.jl b/base/compiler/ssair/inlining.jl index ca9eb818f0edb..3a6333470fabe 100644 --- a/base/compiler/ssair/inlining.jl +++ b/base/compiler/ssair/inlining.jl @@ -767,7 +767,7 @@ function rewrite_apply_exprargs!( elseif isa(new_info, MethodMatchInfo) || isa(new_info, UnionSplitInfo) new_infos = isa(new_info, MethodMatchInfo) ? MethodMatchInfo[new_info] : new_info.matches # See if we can inline this call to `iterate` - analyze_single_call!( + handle_call!( ir, state1.id, new_stmt, new_infos, flag, new_sig, istate, todo) end @@ -874,8 +874,7 @@ function validate_sparams(sparams::SimpleVector) end function analyze_method!(match::MethodMatch, argtypes::Vector{Any}, - flag::UInt8, state::InliningState, - do_resolve::Bool = true) + flag::UInt8, state::InliningState) method = match.method spec_types = match.spec_types @@ -909,7 +908,7 @@ function analyze_method!(match::MethodMatch, argtypes::Vector{Any}, todo = InliningTodo(mi, match, argtypes) # If we don't have caches here, delay resolving this MethodInstance # until the batch inlining step (or an external post-processing pass) - do_resolve && state.mi_cache === nothing && return todo + state.mi_cache === nothing && return todo return resolve_todo(todo, state, flag) end @@ -921,7 +920,7 @@ function retrieve_ir_for_inlining(mi::MethodInstance, src::Array{UInt8, 1}) src = ccall(:jl_uncompress_ir, Any, (Any, Ptr{Cvoid}, Any), mi.def, C_NULL, src::Vector{UInt8})::CodeInfo return inflate_ir!(src, mi) end -retrieve_ir_for_inlining(mi::MethodInstance, src::CodeInfo) = inflate_ir(src, mi)::IRCode +retrieve_ir_for_inlining(mi::MethodInstance, src::CodeInfo) = inflate_ir(src, mi) retrieve_ir_for_inlining(mi::MethodInstance, ir::IRCode) = copy(ir) function handle_single_case!( @@ -1225,10 +1224,8 @@ function process_simple!(ir::IRCode, idx::Int, state::InliningState, todo::Vecto end # TODO inline non-`isdispatchtuple`, union-split callsites? -function compute_inlining_cases( - infos::Vector{MethodMatchInfo}, flag::UInt8, - sig::Signature, state::InliningState, - do_resolve::Bool = true) +function compute_inlining_cases(infos::Vector{MethodMatchInfo}, + flag::UInt8, sig::Signature, state::InliningState) argtypes = sig.argtypes cases = InliningCase[] local any_fully_covered = false @@ -1245,7 +1242,7 @@ function compute_inlining_cases( continue end for match in meth - handled_all_cases &= handle_match!(match, argtypes, flag, state, cases, true, do_resolve) + handled_all_cases &= handle_match!(match, argtypes, flag, state, cases, #=allow_abstract=#true) any_fully_covered |= match.fully_covers end end @@ -1258,23 +1255,10 @@ function compute_inlining_cases( return cases, handled_all_cases & any_fully_covered end -function analyze_single_call!( - ir::IRCode, idx::Int, stmt::Expr, infos::Vector{MethodMatchInfo}, flag::UInt8, - sig::Signature, state::InliningState, todo::Vector{Pair{Int, Any}}) - - r = compute_inlining_cases(infos, flag, sig, state) - r === nothing && return nothing - cases, all_covered = r - handle_cases!(ir, idx, stmt, argtypes_to_type(sig.argtypes), cases, - all_covered, todo, state.params) -end - -# similar to `analyze_single_call!`, but with constant results -function handle_const_call!( - ir::IRCode, idx::Int, stmt::Expr, cinfo::ConstCallInfo, flag::UInt8, - sig::Signature, state::InliningState, todo::Vector{Pair{Int, Any}}) +function compute_inlining_cases(info::ConstCallInfo, + flag::UInt8, sig::Signature, state::InliningState) argtypes = sig.argtypes - (; call, results) = cinfo + (; call, results) = info infos = isa(call, MethodMatchInfo) ? MethodMatchInfo[call] : call.matches cases = InliningCase[] local any_fully_covered = false @@ -1302,7 +1286,7 @@ function handle_const_call!( handled_all_cases &= handle_const_prop_result!(result, argtypes, flag, state, cases, true) else @assert result === nothing - handled_all_cases &= handle_match!(match, argtypes, flag, state, cases, true) + handled_all_cases &= handle_match!(match, argtypes, flag, state, cases, #=allow_abstract=#true) end end end @@ -1312,21 +1296,39 @@ function handle_const_call!( filter!(case::InliningCase->isdispatchtuple(case.sig), cases) end - handle_cases!(ir, idx, stmt, argtypes_to_type(argtypes), cases, - handled_all_cases & any_fully_covered, todo, state.params) + return cases, handled_all_cases & any_fully_covered +end + +function handle_call!( + ir::IRCode, idx::Int, stmt::Expr, infos::Vector{MethodMatchInfo}, flag::UInt8, + sig::Signature, state::InliningState, todo::Vector{Pair{Int, Any}}) + cases = compute_inlining_cases(infos, flag, sig, state) + cases === nothing && return nothing + cases, all_covered = cases + handle_cases!(ir, idx, stmt, argtypes_to_type(sig.argtypes), cases, + all_covered, todo, state.params) +end + +function handle_const_call!( + ir::IRCode, idx::Int, stmt::Expr, info::ConstCallInfo, flag::UInt8, + sig::Signature, state::InliningState, todo::Vector{Pair{Int, Any}}) + cases = compute_inlining_cases(info, flag, sig, state) + cases === nothing && return nothing + cases, all_covered = cases + handle_cases!(ir, idx, stmt, argtypes_to_type(sig.argtypes), cases, + all_covered, todo, state.params) end function handle_match!( match::MethodMatch, argtypes::Vector{Any}, flag::UInt8, state::InliningState, - cases::Vector{InliningCase}, allow_abstract::Bool = false, - do_resolve::Bool = true) + cases::Vector{InliningCase}, allow_abstract::Bool = false) spec_types = match.spec_types allow_abstract || isdispatchtuple(spec_types) || return false # we may see duplicated dispatch signatures here when a signature gets widened # during abstract interpretation: for the purpose of inlining, we can just skip # processing this dispatch candidate _any(case->case.sig === spec_types, cases) && return true - item = analyze_method!(match, argtypes, flag, state, do_resolve) + item = analyze_method!(match, argtypes, flag, state) item === nothing && return false push!(cases, InliningCase(spec_types, item)) return true @@ -1384,6 +1386,54 @@ function handle_const_opaque_closure_call!( return nothing end +function handle_finalizer_call!( + ir::IRCode, stmt::Expr, info::FinalizerInfo, state::InliningState) + # Only inline finalizers that are known nothrow and notls. + # This avoids having to set up state for finalizer isolation + (is_nothrow(info.effects) && is_notaskstate(info.effects)) || return nothing + + info = info.info + if isa(info, MethodMatchInfo) + infos = MethodMatchInfo[info] + elseif isa(info, UnionSplitInfo) + infos = info.matches + # elseif isa(info, ConstCallInfo) + # # NOTE currently this code path isn't active as constant propagation won't happen + # # for `Core.finalizer` call because inference currently isn't able to fold a mutable + # # object as a constant + else + return nothing + end + + ft = argextype(stmt.args[2], ir) + has_free_typevars(ft) && return nothing + f = singleton_type(ft) + argtypes = Vector{Any}(undef, 2) + argtypes[1] = ft + argtypes[2] = argextype(stmt.args[3], ir) + sig = Signature(f, ft, argtypes) + + cases = compute_inlining_cases(infos, #=flag=#UInt8(0), sig, state) + cases === nothing && return nothing + cases, all_covered = cases + if all_covered && length(cases) == 1 + # NOTE we don't append `item1` to `stmt` here so that we don't serialize + # `Core.Compiler` data structure into the global cache + item1 = cases[1].item + if isa(item1, InliningTodo) + push!(stmt.args, true) + push!(stmt.args, item1.mi) + elseif isa(item1, InvokeCase) + push!(stmt.args, false) + push!(stmt.args, item1.invoke) + elseif isa(item1, ConstantCase) + push!(stmt.args, nothing) + push!(stmt.args, item1.val) + end + end + return nothing +end + function inline_const_if_inlineable!(inst::Instruction) rt = inst[:type] if rt isa Const && is_inlineable_constant(rt.val) @@ -1434,53 +1484,15 @@ function assemble_inline_todo!(ir::IRCode, state::InliningState) end # Handle invoke - if sig.f === Core.invoke - if isa(info, InvokeCallInfo) - inline_invoke!(ir, idx, stmt, info, flag, sig, state, todo) - end + if isa(info, InvokeCallInfo) + inline_invoke!(ir, idx, stmt, info, flag, sig, state, todo) continue end # Handle finalizer - if sig.f === Core.finalizer - if isa(info, FinalizerInfo) - # Only inline finalizers that are known nothrow and notls. - # This avoids having to set up state for finalizer isolation - (is_nothrow(info.effects) && is_notaskstate(info.effects)) || continue - - info = info.info - if isa(info, MethodMatchInfo) - infos = MethodMatchInfo[info] - elseif isa(info, UnionSplitInfo) - infos = info.matches - else - continue - end - - ft = argextype(stmt.args[2], ir) - has_free_typevars(ft) && return nothing - f = singleton_type(ft) - argtypes = Vector{Any}(undef, 2) - argtypes[1] = ft - argtypes[2] = argextype(stmt.args[3], ir) - sig = Signature(f, ft, argtypes) - - cases, all_covered = compute_inlining_cases(infos, UInt8(0), sig, state, false) - length(cases) == 0 && continue - if all_covered && length(cases) == 1 - if isa(cases[1], InliningCase) - case1 = cases[1].item - if isa(case1, InliningTodo) - push!(stmt.args, true) - push!(stmt.args, case1.mi) - elseif isa(case1, InvokeCase) - push!(stmt.args, false) - push!(stmt.args, case1.invoke) - end - end - end - continue - end + if isa(info, FinalizerInfo) + handle_finalizer_call!(ir, stmt, info, state) + continue end # if inference arrived here with constant-prop'ed result(s), @@ -1501,7 +1513,7 @@ function assemble_inline_todo!(ir::IRCode, state::InliningState) continue # isa(info, ReturnTypeCallInfo), etc. end - analyze_single_call!(ir, idx, stmt, infos, flag, sig, state, todo) + handle_call!(ir, idx, stmt, infos, flag, sig, state, todo) end return todo diff --git a/base/compiler/ssair/passes.jl b/base/compiler/ssair/passes.jl index a5ebcaa37efd4..70d0b918b367e 100644 --- a/base/compiler/ssair/passes.jl +++ b/base/compiler/ssair/passes.jl @@ -14,7 +14,7 @@ GetfieldUse(idx::Int) = SSAUse(:getfield, idx) PreserveUse(idx::Int) = SSAUse(:preserve, idx) NoPreserve() = SSAUse(:nopreserve, 0) IsdefinedUse(idx::Int) = SSAUse(:isdefined, idx) -AddFinalizerUse(idx::Int) = SSAUse(:add_finalizer, idx) +FinalizerUse(idx::Int) = SSAUse(:finalizer, idx) """ du::SSADefUse @@ -882,7 +882,7 @@ function sroa_pass!(ir::IRCode, inlining::Union{Nothing, InliningState} = nothin elseif is_isdefined push!(defuse.uses, IsdefinedUse(idx)) elseif is_finalizer - push!(defuse.uses, AddFinalizerUse(idx)) + push!(defuse.uses, FinalizerUse(idx)) else push!(defuse.uses, GetfieldUse(idx)) end @@ -948,12 +948,15 @@ function sroa_pass!(ir::IRCode, inlining::Union{Nothing, InliningState} = nothin end end +# NOTE we resolve the inlining source here as we don't want to serialize `Core.Compiler` +# data structure into the global cache (see the comment in `handle_finalizer_call!`) function try_inline_finalizer!(ir::IRCode, argexprs::Vector{Any}, idx::Int, mi::MethodInstance, inlining::InliningState) code = get(inlining.mi_cache, mi, nothing) + et = inlining.et if code isa CodeInstance if use_const_api(code) # No code in the function - Nothing to do - inlining.et !== nothing && push!(inlining.et, mi) + et !== nothing && push!(et, mi) return true end src = code.inferred @@ -969,15 +972,14 @@ function try_inline_finalizer!(ir::IRCode, argexprs::Vector{Any}, idx::Int, mi:: length(src.cfg.blocks) == 1 || return false # Ok, we're committed to inlining the finalizer - inlining.et !== nothing && push!(inlining.et, mi) + et !== nothing && push!(et, mi) linetable_offset, extra_coverage_line = ir_inline_linetable!(ir.linetable, src, mi.def, ir[SSAValue(idx)][:line]) if extra_coverage_line != 0 insert_node!(ir, idx, NewInstruction(Expr(:code_coverage_effect), Nothing, extra_coverage_line)) end - # TODO: Use the actual inliner here rather than open coding this special - # purpose inliner. + # TODO: Use the actual inliner here rather than open coding this special purpose inliner. spvals = mi.sparam_vals ssa_rename = Vector{Any}(undef, length(src.stmts)) for idx′ = 1:length(src.stmts) @@ -1001,6 +1003,58 @@ function try_inline_finalizer!(ir::IRCode, argexprs::Vector{Any}, idx::Int, mi:: end is_nothrow(ir::IRCode, pc::Int) = ir.stmts[pc][:flag] & (IR_FLAG_EFFECT_FREE | IR_FLAG_NOTHROW) ≠ 0 + +function try_resolve_finalizer!(ir::IRCode, idx::Int, finalizer_idx::Int, defuse::SSADefUse, inlining::InliningState) + # For now: Require that all uses and defs are in the same basic block, + # so that live range calculations are easy. + bb = ir.cfg.blocks[block_for_inst(ir.cfg, first(defuse.uses).idx)] + minval::Int = typemax(Int) + maxval::Int = 0 + + function check_in_range(x::Union{Int,SSAUse}) + if isa(x, SSAUse) + didx = x.idx + else + didx = x + end + didx in bb.stmts || return false + if didx < minval + minval = didx + end + if didx > maxval + maxval = didx + end + return true + end + + check_in_range(idx) || return nothing + all(check_in_range, defuse.uses) || return nothing + all(check_in_range, defuse.defs) || return nothing + + # For now: Require all statements in the basic block range to be nothrow. + all(minval:maxval) do idx::Int + return is_nothrow(ir, idx) || idx == finalizer_idx + end || return nothing + + # Ok, `finalizer` rewrite is legal. + finalizer_stmt = ir[SSAValue(finalizer_idx)][:inst] + argexprs = Any[finalizer_stmt.args[2], finalizer_stmt.args[3]] + inline = finalizer_stmt.args[4] + if inline === nothing + # No code in the function - Nothing to do + else + mi = finalizer_stmt.args[5]::MethodInstance + if inline::Bool && try_inline_finalizer!(ir, argexprs, maxval, mi, inlining) + # the finalizer body has been inlined + else + insert_node!(ir, maxval, NewInstruction(Expr(:invoke, mi, argexprs...), Nothing), true) + end + end + # Erase the call to `finalizer` + ir[SSAValue(finalizer_idx)][:inst] = nothing + return nothing +end + function sroa_mutables!(ir::IRCode, defuses::IdDict{Int, Tuple{SPCSet, SSADefUse}}, used_ssas::Vector{Int}, lazydomtree::LazyDomtree, inlining::Union{Nothing, InliningState}) for (idx, (intermediaries, defuse)) in defuses intermediaries = collect(intermediaries) @@ -1024,72 +1078,22 @@ function sroa_mutables!(ir::IRCode, defuses::IdDict{Int, Tuple{SPCSet, SSADefUse # error at runtime, but is not illegal to have in the IR. ismutabletype(typ) || continue typ = typ::DataType - # First check for any add_finalizer calls - add_finalizer_idx = nothing + # First check for any finalizer calls + finalizer_idx = nothing for use in defuse.uses - if use.kind === :add_finalizer - # For now: Only allow one add_finalizer per allocation - add_finalizer_idx !== nothing && @goto skip - add_finalizer_idx = use.idx + if use.kind === :finalizer + # For now: Only allow one finalizer per allocation + finalizer_idx !== nothing && @goto skip + finalizer_idx = use.idx end end - if add_finalizer_idx !== nothing - # For now: Require that all uses and defs are in the same basic block, - # so that live range calculations are easy. - bb = ir.cfg.blocks[block_for_inst(ir.cfg, first(defuse.uses).idx)] - minval::Int = typemax(Int) - maxval::Int = 0 - - check_in_range(defuse) = check_in_range(defuse.idx) - function check_in_range(didx::Int) - didx in bb.stmts || return false - if didx < minval - minval = didx - end - if didx > maxval - maxval = didx - end - return true - end - - check_in_range(idx) || continue - _all(check_in_range, defuse.uses) || continue - _all(check_in_range, defuse.defs) || continue - - # For now: Require all statements in the basic block range to be - # nothrow. - all_nothrow = _all(idx->is_nothrow(ir, idx) || idx == add_finalizer_idx, minval:maxval) - all_nothrow || continue - - # Ok, finalizer rewrite is legal. - add_finalizer_stmt = ir[SSAValue(add_finalizer_idx)][:inst] - argexprs = Any[add_finalizer_stmt.args[2], add_finalizer_stmt.args[3]] - may_inline = add_finalizer_stmt.args[4]::Bool - mi = add_finalizer_stmt.args[5]::Union{MethodInstance, Nothing} - if may_inline && mi !== nothing - if try_inline_finalizer!(ir, argexprs, maxval, add_finalizer_stmt.args[5], inlining) - @goto done_finalizer - end - mi = compileable_specialization(inlining.et, mi, Effects()).invoke - end - if mi !== nothing - insert_node!(ir, maxval, - NewInstruction(Expr(:invoke, mi, argexprs...), Nothing), - true) - else - insert_node!(ir, maxval, - NewInstruction(Expr(:call, argexprs...), Nothing), - true) - end - @label done_finalizer - # Erase call to add_finalizer - ir[SSAValue(add_finalizer_idx)][:inst] = nothing + if finalizer_idx !== nothing && inlining !== nothing + try_resolve_finalizer!(ir, idx, finalizer_idx, defuse, inlining) continue end # Partition defuses by field fielddefuse = SSADefUse[SSADefUse() for _ = 1:fieldcount(typ)] all_eliminated = all_forwarded = true - has_finalizer = false for use in defuse.uses if use.kind === :preserve for du in fielddefuse diff --git a/src/builtins.c b/src/builtins.c index 2e93a752c3d29..f4ed6b39e56bb 100644 --- a/src/builtins.c +++ b/src/builtins.c @@ -1602,6 +1602,7 @@ JL_CALLABLE(jl_f_donotdelete) JL_CALLABLE(jl_f_finalizer) { + // NOTE the compiler may temporarily insert additional argument for the later inlining pass JL_NARGS(finalizer, 2, 4); jl_task_t *ct = jl_current_task; jl_gc_add_finalizer_(ct->ptls, args[1], args[0]); diff --git a/test/compiler/inline.jl b/test/compiler/inline.jl index 94331da6a7d03..9b0f5cd1754c5 100644 --- a/test/compiler/inline.jl +++ b/test/compiler/inline.jl @@ -1300,7 +1300,6 @@ mutable struct DoAllocNoEscape end end end - let src = code_typed1() do for i = 1:1000 DoAllocNoEscape() @@ -1309,6 +1308,65 @@ let src = code_typed1() do @test count(isnew, src.code) == 0 end +# Test that a case when `Core.finalizer` is registered interprocedurally, +# but still eligible for SROA after inlining +mutable struct DoAllocNoEscapeInter end + +let src = code_typed1() do + for i = 1:1000 + obj = DoAllocNoEscapeInter() + finalizer(obj) do this + nothrow_side_effect(nothing) + end + end + end + @test count(isnew, src.code) == 0 +end + +function register_finalizer!(obj) + finalizer(obj) do this + nothrow_side_effect(nothing) + end +end +let src = code_typed1() do + for i = 1:1000 + obj = DoAllocNoEscapeInter() + register_finalizer!(obj) + end + end + @test count(isnew, src.code) == 0 +end + +function genfinalizer(val) + return function (this) + nothrow_side_effect(val) + end +end +let src = code_typed1() do + for i = 1:1000 + obj = DoAllocNoEscapeInter() + finalizer(genfinalizer(nothing), obj) + end + end + @test count(isnew, src.code) == 0 +end + +# Test that we can inline a finalizer that just returns a constant value +mutable struct DoAllocConst + function DoAllocConst() + finalizer(new()) do this + return nothing + end + end +end +let src = code_typed1() do + for i = 1:1000 + DoAllocConst() + end + end + @test count(isnew, src.code) == 0 +end + # Test that finalizer elision doesn't cause a throw to be inlined into a function # that shouldn't have it const finalizer_should_throw = Ref{Bool}(true) @@ -1334,7 +1392,6 @@ end @test f_finalizer_throws() # Test finalizers with static parameters -global last_finalizer_type::Type = Any mutable struct DoAllocNoEscapeSparam{T} x::T function finalizer_sparam(d::DoAllocNoEscapeSparam{T}) where {T} @@ -1346,7 +1403,6 @@ mutable struct DoAllocNoEscapeSparam{T} end end DoAllocNoEscapeSparam(x::T) where {T} = DoAllocNoEscapeSparam{T}(x) - let src = code_typed1(Tuple{Any}) do x for i = 1:1000 DoAllocNoEscapeSparam(x) @@ -1366,7 +1422,6 @@ mutable struct DoAllocNoEscapeNoInline finalizer(noinline_finalizer, new()) end end - let src = code_typed1() do for i = 1:1000 DoAllocNoEscapeNoInline() @@ -1376,6 +1431,28 @@ let src = code_typed1() do @test count(isinvoke(:noinline_finalizer), src.code) == 1 end +# Test that we resolve a `finalizer` call that we don't handle currently +mutable struct DoAllocNoEscapeBranch + val::Int + function DoAllocNoEscapeBranch(val::Int) + finalizer(new(val)) do this + if this.val > 500 + nothrow_side_effect(this.val) + else + nothrow_side_effect(nothing) + end + end + end +end +let src = code_typed1() do + for i = 1:1000 + DoAllocNoEscapeBranch(i) + end + end + @test !any(iscall((src, Core.finalizer)), src.code) + @test !any(isinvoke(:finalizer), src.code) +end + # optimize `[push!|pushfirst!](::Vector{Any}, x...)` @testset "optimize `$f(::Vector{Any}, x...)`" for f = Any[push!, pushfirst!] @eval begin From 7bab5d51c5892df7c5360a65b920f52b982adb23 Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki Date: Wed, 13 Jul 2022 10:35:18 +0900 Subject: [PATCH 2/2] compiler: NFC changes --- base/compiler/abstractinterpretation.jl | 6 +++--- base/compiler/ssair/passes.jl | 11 ++++------- base/show.jl | 4 ++-- 3 files changed, 9 insertions(+), 12 deletions(-) diff --git a/base/compiler/abstractinterpretation.jl b/base/compiler/abstractinterpretation.jl index e2802768d98ad..92b6f0b67b83c 100644 --- a/base/compiler/abstractinterpretation.jl +++ b/base/compiler/abstractinterpretation.jl @@ -496,13 +496,13 @@ function abstract_call_method(interp::AbstractInterpreter, method::Method, @nosp add_remark!(interp, sv, "Refusing to infer into `depwarn`") return MethodCallResult(Any, false, false, nothing, Effects()) end - topmost = nothing + # Limit argument type tuple growth of functions: # look through the parents list to see if there's a call to the same method # and from the same method. # Returns the topmost occurrence of that repeated edge. - edgecycle = false - edgelimited = false + edgecycle = edgelimited = false + topmost = nothing for infstate in InfStackUnwind(sv) if method === infstate.linfo.def diff --git a/base/compiler/ssair/passes.jl b/base/compiler/ssair/passes.jl index 70d0b918b367e..58626f5279dde 100644 --- a/base/compiler/ssair/passes.jl +++ b/base/compiler/ssair/passes.jl @@ -916,14 +916,11 @@ function sroa_pass!(ir::IRCode, inlining::Union{Nothing, InliningState} = nothin visited_phinodes, field, lifting_cache, result_t, lifted_leaves, val, lazydomtree) # Insert the undef check if necessary - if any_undef - if val === nothing - insert_node!(compact, SSAValue(idx), - non_effect_free(NewInstruction(Expr(:throw_undef_if_not, Symbol("##getfield##"), false), Nothing))) - else - # val must be defined - end + if any_undef && val === nothing + insert_node!(compact, SSAValue(idx), non_effect_free(NewInstruction( + Expr(:throw_undef_if_not, Symbol("##getfield##"), false), Nothing))) else + # val must be defined @assert val !== nothing end diff --git a/base/show.jl b/base/show.jl index 0ee4ef497ff1b..850f6a9bf5e2b 100644 --- a/base/show.jl +++ b/base/show.jl @@ -1192,8 +1192,8 @@ function print_fullname(io::IO, m::Module) end end -function sourceinfo_slotnames(src::CodeInfo) - slotnames = src.slotnames +sourceinfo_slotnames(src::CodeInfo) = sourceinfo_slotnames(src.slotnames) +function sourceinfo_slotnames(slotnames::Vector{Symbol}) names = Dict{String,Int}() printnames = Vector{String}(undef, length(slotnames)) for i in eachindex(slotnames)