diff --git a/base/client.jl b/base/client.jl index 86e8ba25e77ae9..fc34a8cfda5dc4 100644 --- a/base/client.jl +++ b/base/client.jl @@ -81,7 +81,6 @@ end # deprecated function--preserved for DocTests.jl function ip_matches_func(ip, func::Symbol) - ip isa InterpreterIP || (ip -= 1) for fr in StackTraces.lookupat(ip) if fr === StackTraces.UNKNOWN || fr.from_c return false diff --git a/base/deprecated.jl b/base/deprecated.jl index a6dca85091e558..e38bebac2d8167 100644 --- a/base/deprecated.jl +++ b/base/deprecated.jl @@ -102,7 +102,6 @@ function firstcaller(bt::Vector, funcsyms) # Identify the calling line found = false for ip in bt - ip isa Base.InterpreterIP || (ip -= 1) # convert from return stack to call stack (for inlining info) lkups = StackTraces.lookupat(ip) for lkup in lkups if lkup == StackTraces.UNKNOWN || lkup.from_c diff --git a/base/errorshow.jl b/base/errorshow.jl index 61ec32faee7767..241f46237fb249 100644 --- a/base/errorshow.jl +++ b/base/errorshow.jl @@ -611,10 +611,8 @@ function process_backtrace(t::Vector, limit::Int=typemax(Int); skipC = true) lkups = t[i] if lkups isa StackFrame lkups = [lkups] - elseif lkups isa Base.InterpreterIP - lkups = StackTraces.lookupat(lkups) else - lkups = StackTraces.lookupat(lkups - 1) + lkups = StackTraces.lookupat(lkups) end for lkup in lkups if lkup === StackTraces.UNKNOWN diff --git a/base/stacktraces.jl b/base/stacktraces.jl index cd0c0d45462f8b..8d2ba609e911d0 100644 --- a/base/stacktraces.jl +++ b/base/stacktraces.jl @@ -73,7 +73,7 @@ An alias for `Vector{StackFrame}` provided for convenience; returned by calls to const StackTrace = Vector{StackFrame} const empty_sym = Symbol("") -const UNKNOWN = StackFrame(empty_sym, empty_sym, -1, nothing, true, false, 0) # === lookup(C_NULL) +const UNKNOWN = StackFrame(empty_sym, empty_sym, -1, nothing, true, false, 0) # === lookupat(C_NULL) #= @@ -96,7 +96,7 @@ end """ - lookupat(pointer::Union{Ptr{Cvoid}, UInt}) -> Vector{StackFrame} + lookupat(pointer::Ptr{Cvoid}) -> Vector{StackFrame} Given a pointer to an execution context (usually generated by a call to `backtrace`), looks up stack frame context information. Returns an array of frame information for all functions @@ -188,7 +188,6 @@ trace, `stacktrace` first calls `backtrace`. function stacktrace(trace::Vector{<:Union{Base.InterpreterIP,Ptr{Cvoid}}}, c_funcs::Bool=false) stack = StackTrace() for ip in trace - ip isa Base.InterpreterIP || (ip -= 1) # convert from return stack to call stack (for inlining info) for frame in lookupat(ip) # Skip frames that come from C calls. if c_funcs || !frame.from_c diff --git a/doc/src/manual/stacktraces.md b/doc/src/manual/stacktraces.md index bfbe1ceb58c507..852bbcef18e9a3 100644 --- a/doc/src/manual/stacktraces.md +++ b/doc/src/manual/stacktraces.md @@ -305,7 +305,7 @@ s by passing them into [`StackTraces.lookupat`](@ref): ```julia-repl julia> pointer = backtrace()[1]; -julia> frame = StackTraces.lookupat(pointer - 1) +julia> frame = StackTraces.lookupat(pointer) 1-element Array{Base.StackTraces.StackFrame,1}: jl_apply_generic at gf.c:2167 diff --git a/src/interpreter-stacktrace.c b/src/interpreter-stacktrace.c index c15fbbc6f4ef95..18cd5f4243fc14 100644 --- a/src/interpreter-stacktrace.c +++ b/src/interpreter-stacktrace.c @@ -397,13 +397,14 @@ JL_DLLEXPORT size_t jl_capture_interp_frame(uintptr_t *data, uintptr_t sp, uintp #else interpreter_state *s = (interpreter_state *)(sp+TOTAL_STACK_PADDING); #endif - if (space_remaining <= 1) + int required_space = 3; + if (space_remaining < required_space) return 0; // Sentinel value to indicate an interpreter frame data[0] = JL_BT_INTERP_FRAME; data[1] = s->mi ? (uintptr_t)s->mi : s->src ? (uintptr_t)s->src : (uintptr_t)jl_nothing; data[2] = (uintptr_t)s->ip; - return 2; + return required_space; } extern void * CALLBACK_ABI enter_interpreter_frame(void * CALLBACK_ABI (*callback)(interpreter_state *, void *), void *arg); @@ -420,6 +421,7 @@ JL_DLLEXPORT int jl_is_enter_interpreter_frame(uintptr_t ip) JL_DLLEXPORT size_t jl_capture_interp_frame(uintptr_t *data, uintptr_t sp, uintptr_t fp, size_t space_remaining) { + // Leave bt_entry[0] as the native instruction ptr return 0; } #define CALLBACK_ABI diff --git a/src/julia_internal.h b/src/julia_internal.h index 8a48fed9b3f596..07f85dfa36fc58 100644 --- a/src/julia_internal.h +++ b/src/julia_internal.h @@ -636,10 +636,12 @@ typedef int bt_cursor_t; #endif // Special marker in backtrace data for encoding interpreter frames #define JL_BT_INTERP_FRAME (((uintptr_t)0)-1) -size_t rec_backtrace(uintptr_t *data, size_t maxsize) JL_NOTSAFEPOINT; -size_t rec_backtrace_ctx(uintptr_t *data, size_t maxsize, bt_context_t *ctx, int add_interp_frames) JL_NOTSAFEPOINT; +// Maximum number of elements of bt_data taken up by interpreter frame +#define JL_BT_MAX_ENTRY_SIZE 3 +size_t rec_backtrace(uintptr_t *bt_data, size_t maxsize) JL_NOTSAFEPOINT; +size_t rec_backtrace_ctx(uintptr_t *bt_data, size_t maxsize, bt_context_t *ctx, int add_interp_frames) JL_NOTSAFEPOINT; #ifdef LIBOSXUNWIND -size_t rec_backtrace_ctx_dwarf(uintptr_t *data, size_t maxsize, bt_context_t *ctx, int add_interp_frames) JL_NOTSAFEPOINT; +size_t rec_backtrace_ctx_dwarf(uintptr_t *bt_data, size_t maxsize, bt_context_t *ctx, int add_interp_frames) JL_NOTSAFEPOINT; #endif JL_DLLEXPORT void jl_get_backtrace(jl_array_t **bt, jl_array_t **bt2); void jl_critical_error(int sig, bt_context_t *context, uintptr_t *bt_data, size_t *bt_size); diff --git a/src/stackwalk.c b/src/stackwalk.c index 7dad4e0436a6c3..3d271728dbc0c3 100644 --- a/src/stackwalk.c +++ b/src/stackwalk.c @@ -27,12 +27,28 @@ extern "C" { static int jl_unw_init(bt_cursor_t *cursor, bt_context_t *context) JL_NOTSAFEPOINT; static int jl_unw_step(bt_cursor_t *cursor, uintptr_t *ip, uintptr_t *sp, uintptr_t *fp) JL_NOTSAFEPOINT; -size_t jl_unw_stepn(bt_cursor_t *cursor, uintptr_t *ip, uintptr_t *sp, size_t maxsize, int add_interp_frames) JL_NOTSAFEPOINT +// Record backtrace entries into bt_data by stepping cursor with jl_unw_step +// until the outermost frame is encountered or the buffer bt_data is (close to) +// full. Native instruction pointers are adjusted to point to the address of +// the call instruction. +// +// `maxsize` is the size of the buffer `bt_data` (and `sp` if non-NULL). It +// must be at least JL_BT_MAX_ENTRY_SIZE to accommodate extended backtrace +// entries. If `sp != NULL`, the stack pointer corresponding `bt_data[i]` is +// stored in `sp[i]`. +// +// jl_unw_stepn will return 1 if there are more frames to come. The number of +// elements of bt_data (and sp if non-NULL) which were used are returned in +// bt_size. +int jl_unw_stepn(bt_cursor_t *cursor, uintptr_t *bt_data, size_t *bt_size, + uintptr_t *sp, size_t maxsize, int add_interp_frames) JL_NOTSAFEPOINT { jl_ptls_t ptls = jl_get_ptls_states(); volatile size_t n = 0; - uintptr_t thesp; - uintptr_t thefp; + volatile int need_more_space = 0; + uintptr_t return_ip = 0; + uintptr_t thesp = 0; + uintptr_t thefp = 0; #if defined(_OS_WINDOWS_) && !defined(_CPU_X86_64_) assert(!jl_in_stackwalk); jl_in_stackwalk = 1; @@ -44,21 +60,52 @@ size_t jl_unw_stepn(bt_cursor_t *cursor, uintptr_t *ip, uintptr_t *sp, size_t ma ptls->safe_restore = &buf; #endif while (1) { - if (n >= maxsize) { - n = maxsize; // return maxsize + 1 if ran out of space - break; - } - if (!jl_unw_step(cursor, &ip[n], &thesp, &thefp)) - break; - if (sp) - sp[n] = thesp; - if (add_interp_frames && jl_is_enter_interpreter_frame(ip[n])) { - n += jl_capture_interp_frame(&ip[n], thesp, thefp, maxsize-n-1) + 1; - } else { - n++; - } + if (n + JL_BT_MAX_ENTRY_SIZE > maxsize) { + // Postpone advancing the cursor: may need more space + need_more_space = 1; + break; + } + int have_more_frames = jl_unw_step(cursor, &return_ip, &thesp, &thefp); + if (sp) + sp[n] = thesp; + // For the purposes of looking up debug info for functions, we want + // to harvest addresses for the *call* instruction `call_ip` during + // stack walking. However, this information isn't directly + // available. Instead, the stack walk discovers the address + // `return_ip` which would be *returned to* as the stack is + // unwound. + // + // To infer `call_ip` in full generality we would need to + // understand each platform ABI instruction pointer encoding and + // calling conventions, noting that these may vary per stack frame. + // (For example signal frames on linux x86_64 have `call_ip == + // return_ip`, and ARM thumb instruction pointers use the low bit + // as a flag which must be cleared before further use.) + // + // However for our current purposes it seems sufficient to assume + // that `call_ip = return_ip-1`. See also: + // + // * The LLVM unwinder functions step() and setInfoBasedOnIPRegister() + // https://github.com/llvm/llvm-project/blob/master/libunwind/src/UnwindCursor.hpp + // * The way that libunwind handles it in `unw_get_proc_name`: + // https://lists.nongnu.org/archive/html/libunwind-devel/2014-06/msg00025.html + uintptr_t call_ip = return_ip - 1; + if (call_ip == JL_BT_INTERP_FRAME) { + // Never leave special marker in the bt data as it can corrupt the GC. + call_ip = 0; + } + uintptr_t *bt_entry = bt_data + n; + size_t entry_sz = 0; + if (add_interp_frames && jl_is_enter_interpreter_frame(call_ip) && + (entry_sz = jl_capture_interp_frame(bt_entry, thesp, thefp, maxsize-n)) != 0) { + n += entry_sz; + } else { + *bt_entry = call_ip; + n++; + } + if (!have_more_frames) + break; } - n++; #if !defined(_OS_WINDOWS_) } else { @@ -73,26 +120,27 @@ size_t jl_unw_stepn(bt_cursor_t *cursor, uintptr_t *ip, uintptr_t *sp, size_t ma #if defined(_OS_WINDOWS_) && !defined(_CPU_X86_64_) jl_in_stackwalk = 0; #endif - return n; + *bt_size = n; + return need_more_space; } -size_t rec_backtrace_ctx(uintptr_t *data, size_t maxsize, +size_t rec_backtrace_ctx(uintptr_t *bt_data, size_t maxsize, bt_context_t *context, int add_interp_frames) { - size_t n = 0; + size_t bt_size = 0; bt_cursor_t cursor; if (!jl_unw_init(&cursor, context)) return 0; - n = jl_unw_stepn(&cursor, data, NULL, maxsize, add_interp_frames); - return n > maxsize ? maxsize : n; + jl_unw_stepn(&cursor, bt_data, &bt_size, NULL, maxsize, add_interp_frames); + return bt_size; } -size_t rec_backtrace(uintptr_t *data, size_t maxsize) +size_t rec_backtrace(uintptr_t *bt_data, size_t maxsize) { bt_context_t context; memset(&context, 0, sizeof(context)); jl_unw_get(&context); - return rec_backtrace_ctx(data, maxsize, &context, 1); + return rec_backtrace_ctx(bt_data, maxsize, &context, 1); } static jl_value_t *array_ptr_void_type JL_ALWAYS_LEAFTYPE = NULL; @@ -114,18 +162,27 @@ JL_DLLEXPORT jl_value_t *jl_backtrace_from_here(int returnsp) memset(&context, 0, sizeof(context)); jl_unw_get(&context); if (jl_unw_init(&cursor, &context)) { - size_t n = 0, offset = 0; - do { + size_t offset = 0; + while (1) { jl_array_grow_end(ip, maxincr); - if (returnsp) jl_array_grow_end(sp, maxincr); - n = jl_unw_stepn(&cursor, (uintptr_t*)jl_array_data(ip) + offset, - returnsp ? (uintptr_t*)jl_array_data(sp) + offset : NULL, maxincr, 1); - offset += maxincr; - } while (n > maxincr); - jl_array_del_end(ip, maxincr - n); - if (returnsp) jl_array_del_end(sp, maxincr - n); + uintptr_t *sp_ptr = NULL; + if (returnsp) { + sp_ptr = (uintptr_t*)jl_array_data(sp) + offset; + jl_array_grow_end(sp, maxincr); + } + size_t size_incr = 0; + int need_more_space = jl_unw_stepn(&cursor, (uintptr_t*)jl_array_data(ip) + offset, + &size_incr, sp_ptr, maxincr, 1); + offset += size_incr; + if (!need_more_space) { + jl_array_del_end(ip, jl_array_len(ip) - offset); + if (returnsp) + jl_array_del_end(sp, jl_array_len(sp) - offset); + break; + } + } - n = 0; + size_t n = 0; while (n < jl_array_len(ip)) { if ((uintptr_t)jl_array_ptr_ref(ip, n) == JL_BT_INTERP_FRAME) { jl_array_ptr_1d_push(bt2, jl_array_ptr_ref(ip, n+1)); @@ -331,9 +388,7 @@ static int jl_unw_step(bt_cursor_t *cursor, uintptr_t *ip, uintptr_t *sp, uintpt *sp = (uintptr_t)cursor->stackframe.AddrStack.Offset; if (fp) *fp = (uintptr_t)cursor->stackframe.AddrFrame.Offset; - if (*ip == 0 || *ip == JL_BT_INTERP_FRAME) { - // don't leave special marker in the bt data as it can corrupt the GC. - *ip = 0; + if (*ip == 0) { if (!readable_pointer((LPCVOID)*sp)) return 0; cursor->stackframe.AddrPC.Offset = *(DWORD32*)*sp; // POP EIP (aka RET) @@ -349,9 +404,7 @@ static int jl_unw_step(bt_cursor_t *cursor, uintptr_t *ip, uintptr_t *sp, uintpt *sp = (uintptr_t)cursor->Rsp; if (fp) *fp = (uintptr_t)cursor->Rbp; - if (*ip == 0 || *ip == JL_BT_INTERP_FRAME) { - // don't leave special marker in the bt data as it can corrupt the GC. - *ip = 0; + if (*ip == 0) { if (!readable_pointer((LPCVOID)*sp)) return 0; cursor->Rip = *(DWORD64*)*sp; // POP RIP (aka RET) @@ -404,8 +457,7 @@ static int jl_unw_step(bt_cursor_t *cursor, uintptr_t *ip, uintptr_t *sp, uintpt unw_word_t reg; if (unw_get_reg(cursor, UNW_REG_IP, ®) < 0) return 0; - // don't leave special marker in the bt data as it can corrupt the GC. - *ip = reg == JL_BT_INTERP_FRAME ? 0 : reg; + *ip = reg; if (unw_get_reg(cursor, UNW_REG_SP, ®) < 0) return 0; *sp = reg; @@ -426,15 +478,15 @@ int jl_unw_init_dwarf(bt_cursor_t *cursor, bt_context_t *uc) { return unw_init_local_dwarf(cursor, uc) != 0; } -size_t rec_backtrace_ctx_dwarf(uintptr_t *data, size_t maxsize, +size_t rec_backtrace_ctx_dwarf(uintptr_t *bt_data, size_t maxsize, bt_context_t *context, int add_interp_frames) { - size_t n; + size_t bt_size = 0; bt_cursor_t cursor; if (!jl_unw_init_dwarf(&cursor, context)) return 0; - n = jl_unw_stepn(&cursor, data, NULL, maxsize, add_interp_frames); - return n > maxsize ? maxsize : n; + jl_unw_stepn(&cursor, bt_data, &bt_size, NULL, maxsize, add_interp_frames); + return bt_size; } #endif diff --git a/stdlib/Profile/src/Profile.jl b/stdlib/Profile/src/Profile.jl index 124be6f5a7ef0c..76241d171310ee 100644 --- a/stdlib/Profile/src/Profile.jl +++ b/stdlib/Profile/src/Profile.jl @@ -8,8 +8,8 @@ module Profile import Base.StackTraces: lookupat, UNKNOWN, show_spec_linfo, StackFrame # deprecated functions: use `getdict` instead -lookup(ip::UInt) = lookupat(convert(Ptr{Cvoid}, ip) - 1) -lookup(ip::Ptr{Cvoid}) = lookupat(ip - 1) +lookup(ip::UInt) = lookupat(convert(Ptr{Cvoid}, ip)) +lookup(ip::Ptr{Cvoid}) = lookupat(ip) export @profile @@ -199,7 +199,7 @@ end function getdict(data::Vector{UInt}) dict = LineInfoDict() for ip in data - get!(() -> lookupat(convert(Ptr{Cvoid}, ip)), dict, UInt64(ip)) + get!(() -> lookup(convert(Ptr{Cvoid}, ip)), dict, UInt64(ip)) end return dict end @@ -336,17 +336,6 @@ function fetch() end data = Vector{UInt}(undef, len) GC.@preserve data unsafe_copyto!(pointer(data), get_data_pointer(), len) - # post-process the data to convert from a return-stack to a call-stack - first = true - for i = 1:length(data) - if data[i] == 0 - first = true - elseif first - first = false - else - data[i] -= 1 - end - end return data end diff --git a/test/backtrace.jl b/test/backtrace.jl index 41dd7f4b04a362..82871a0280b910 100644 --- a/test/backtrace.jl +++ b/test/backtrace.jl @@ -1,16 +1,15 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -lookup(ip) = StackTraces.lookupat(ip - 1) -lookup(ip::Base.InterpreterIP) = StackTraces.lookupat(ip) # TODO: Base.InterpreterIP should not need a special-case +import StackTraces: lookupat # Test location information for inlined code (ref issues #1334 #12544) module test_inline_bt using Test -import ..lookup +import ..lookupat function get_bt_frames(functionname, bt) for i = 1:length(bt) - lkup = lookup(bt[i]) + lkup = lookupat(bt[i]) lkup[end].func == functionname && return lkup end return StackTraces.StackFrame[] @@ -97,13 +96,13 @@ end module BackTraceTesting using Test -import ..lookup +import ..lookupat @inline bt2() = backtrace() @inline bt1() = bt2() bt() = bt1() -lkup = map(lookup, bt()) +lkup = map(lookupat, bt()) hasbt = hasbt2 = false for sfs in lkup for sf in sfs @@ -122,7 +121,7 @@ function btmacro() ret = @timed backtrace() ret[1] end -lkup = map(lookup, btmacro()) +lkup = map(lookupat, btmacro()) hasme = hasbtmacro = false for sfs in lkup for sf in sfs @@ -147,7 +146,7 @@ bt = eval(quote catch_backtrace() end end) -lkup = map(lookup, bt) +lkup = map(lookupat, bt) hastoplevel = false for sfs in lkup for sf in sfs @@ -175,7 +174,7 @@ let bt, found = false @testset begin bt = backtrace() end - for frame in map(lookup, bt) + for frame in map(lookupat, bt) if frame[1].line == @__LINE__() - 3 && frame[1].file == Symbol(@__FILE__) found = true; break end @@ -187,7 +186,7 @@ end let bt, found = false @info "" bt = backtrace() - for frame in map(lookup, bt) + for frame in map(lookupat, bt) if frame[1].line == @__LINE__() - 2 && frame[1].file == Symbol(@__FILE__) found = true; break end diff --git a/test/compiler/inference.jl b/test/compiler/inference.jl index 555cf5e8af6a24..7377bf34f48cef 100644 --- a/test/compiler/inference.jl +++ b/test/compiler/inference.jl @@ -894,7 +894,7 @@ function break_21369() i = 1 local fr while true - fr = Base.StackTraces.lookupat(bt[i] - 1)[end] + fr = Base.StackTraces.lookupat(bt[i])[end] if !fr.from_c && fr.func !== :error break end diff --git a/test/meta.jl b/test/meta.jl index 9f67a98252f022..baf26472b454ec 100644 --- a/test/meta.jl +++ b/test/meta.jl @@ -32,7 +32,7 @@ h_noinlined() = g_noinlined() function foundfunc(bt, funcname) for b in bt - for lkup in StackTraces.lookupat(b - 1) + for lkup in StackTraces.lookupat(b) if lkup.func == funcname return true end