From cbf339c736bf5c37e8872b5609d1c43fd62fc6d9 Mon Sep 17 00:00:00 2001 From: Justin Boswell Date: Tue, 5 Nov 2019 09:37:48 -0800 Subject: [PATCH] Generalized Mem tracing (#536) * Ported alloc tracer from Java CRT, added unit tests * Generalized backtrace/backtrace_symbols for multiple platforms * Added tests to ensure midstream usage works * Stack trace decoding test now uses virtual logger * Fixed AWS_VARIABLE_LENGTH_ARRAY on windows, causing stack corruption * Converted all tests to use the mem tracer * Removed allocator from test harness structure, enabled logging by default --- include/aws/common/allocator.h | 54 +++ include/aws/common/assert.h | 7 - include/aws/common/logging.h | 1 + include/aws/common/macros.h | 4 +- include/aws/common/system_info.h | 44 +++ include/aws/testing/aws_test_harness.h | 110 ++---- source/assert.c | 4 +- source/common.c | 1 + source/memtrace.c | 519 +++++++++++++++++++++++++ source/posix/system_info.c | 89 ++++- source/windows/system_info.c | 143 +++++-- tests/CMakeLists.txt | 5 + tests/assert_test.c | 2 - tests/logging/logging_test_utilities.c | 4 +- tests/logging/test_logger.c | 22 +- tests/logging/test_logger.h | 10 +- tests/memtrace_test.c | 250 ++++++++++++ tests/system_info_tests.c | 72 ++-- 18 files changed, 1175 insertions(+), 166 deletions(-) create mode 100644 source/memtrace.c create mode 100644 tests/memtrace_test.c diff --git a/include/aws/common/allocator.h b/include/aws/common/allocator.h index 327c56655..3d5b441e9 100644 --- a/include/aws/common/allocator.h +++ b/include/aws/common/allocator.h @@ -110,6 +110,60 @@ int aws_mem_realloc(struct aws_allocator *allocator, void **ptr, size_t oldsize, * that we can leave unchanged on failure. */ +enum aws_mem_trace_level { + AWS_MEMTRACE_NONE = 0, /* no tracing */ + AWS_MEMTRACE_BYTES = 1, /* just track allocation sizes and total allocated */ + AWS_MEMTRACE_STACKS = 2, /* capture callstacks for each allocation */ +}; + +#if defined(AWS_HAVE_EXECINFO) || defined(WIN32) || defined(__APPLE__) +# define AWS_MEMTRACE_STACKS_AVAILABLE +#endif + +/* + * Wraps an allocator and tracks all external allocations. If aws_mem_trace_dump() is called + * and there are still allocations active, they will be reported to the aws_logger at TRACE level. + * allocator - The allocator to wrap + * system_allocator - The allocator to allocate bookkeeping data from, or NULL to use the default + * level - The level to track allocations at + * frames_per_stack is how many frames to store per callstack if AWS_MEMTRACE_STACKS is in use, + * otherwise it is ignored. 8 tends to be a pretty good number balancing storage space vs useful stacks. + * Returns the tracer allocator, which should be used for all allocations that should be tracked. + */ +AWS_COMMON_API +struct aws_allocator *aws_mem_tracer_new( + struct aws_allocator *allocator, + struct aws_allocator *system_allocator, + enum aws_mem_trace_level level, + size_t frames_per_stack); + +/* + * Unwraps the traced allocator and cleans up the tracer. + * Returns the original allocator + */ +AWS_COMMON_API +struct aws_allocator *aws_mem_tracer_destroy(struct aws_allocator *trace_allocator); + +/* + * If there are outstanding allocations, dumps them to log, along with any information gathered + * based on the trace level set when aws_mem_trace() was called. + * Should be passed the tracer allocator returned from aws_mem_trace(). + */ +AWS_COMMON_API +void aws_mem_tracer_dump(struct aws_allocator *trace_allocator); + +/* + * Returns the current number of bytes in outstanding allocations + */ +AWS_COMMON_API +size_t aws_mem_tracer_bytes(struct aws_allocator *trace_allocator); + +/* + * Returns the current number of outstanding allocations + */ +AWS_COMMON_API +size_t aws_mem_tracer_count(struct aws_allocator *trace_allocator); + AWS_EXTERN_C_END #endif /* AWS_COMMON_ALLOCATOR_H */ diff --git a/include/aws/common/assert.h b/include/aws/common/assert.h index 01beb6996..b6d99a2c4 100644 --- a/include/aws/common/assert.h +++ b/include/aws/common/assert.h @@ -26,13 +26,6 @@ AWS_COMMON_API AWS_DECLSPEC_NORETURN void aws_fatal_assert(const char *cond_str, const char *file, int line) AWS_ATTRIBUTE_NORETURN; -/** - * Print a backtrace from either the current stack, or (if provided) the current exception/signal - * call_site_data is siginfo_t* on POSIX, and LPEXCEPTION_POINTERS on Windows, and can be null - */ -AWS_COMMON_API -void aws_backtrace_print(FILE *fp, void *call_site_data); - AWS_EXTERN_C_END #if defined(CBMC) diff --git a/include/aws/common/logging.h b/include/aws/common/logging.h index 9eaa5c5ba..adb54b18c 100644 --- a/include/aws/common/logging.h +++ b/include/aws/common/logging.h @@ -84,6 +84,7 @@ struct aws_log_subject_info_list { enum aws_common_log_subject { AWS_LS_COMMON_GENERAL = 0, AWS_LS_COMMON_TASK_SCHEDULER, + AWS_LS_COMMON_MEMTRACE, AWS_LS_COMMON_LAST = (AWS_LS_COMMON_GENERAL + AWS_LOG_SUBJECT_SPACE_SIZE - 1) }; diff --git a/include/aws/common/macros.h b/include/aws/common/macros.h index 68e7f5d3f..4e1548613 100644 --- a/include/aws/common/macros.h +++ b/include/aws/common/macros.h @@ -63,7 +63,7 @@ AWS_STATIC_ASSERT(CALL_OVERLOAD_TEST(1, 2, 3) == 3); # define AWS_LIKELY(x) x # define AWS_UNLIKELY(x) x # define AWS_FORCE_INLINE __forceinline -# define AWS_VARIABLE_LENGTH_ARRAY(type, name, length) type *name = _alloca(sizeof(type) * length) +# define AWS_VARIABLE_LENGTH_ARRAY(type, name, length) type *name = _alloca(sizeof(type) * (length)) # define AWS_DECLSPEC_NORETURN __declspec(noreturn) # define AWS_ATTRIBUTE_NORETURN #else @@ -76,7 +76,7 @@ AWS_STATIC_ASSERT(CALL_OVERLOAD_TEST(1, 2, 3) == 3); # define AWS_DECLSPEC_NORETURN # define AWS_ATTRIBUTE_NORETURN __attribute__((noreturn)) # if defined(__cplusplus) -# define AWS_VARIABLE_LENGTH_ARRAY(type, name, length) type *name = alloca(sizeof(type) * length) +# define AWS_VARIABLE_LENGTH_ARRAY(type, name, length) type *name = alloca(sizeof(type) * (length)) # else # define AWS_VARIABLE_LENGTH_ARRAY(type, name, length) type name[length]; # endif /* defined(__cplusplus) */ diff --git a/include/aws/common/system_info.h b/include/aws/common/system_info.h index c8415bdc0..41df1e1d9 100644 --- a/include/aws/common/system_info.h +++ b/include/aws/common/system_info.h @@ -32,6 +32,50 @@ bool aws_is_debugger_present(void); AWS_COMMON_API void aws_debug_break(void); +#if defined(AWS_HAVE_EXECINFO) || defined(WIN32) || defined(__APPLE__) +# define AWS_BACKTRACE_STACKS_AVAILABLE +#endif + +/* + * Records a stack trace from the call site. + * Returns the number of stack entries/stack depth captured, or 0 if the operation + * is not supported on this platform + */ +AWS_COMMON_API +size_t aws_backtrace(void **frames, size_t num_frames); + +/* + * Converts stack frame pointers to symbols, if symbols are available + * Returns an array up to stack_depth long, that needs to be free()ed. + * stack_depth should be the length of frames. + * Returns NULL if the platform does not support stack frame translation + * or an error occurs + */ +char **aws_backtrace_symbols(void *const *frames, size_t stack_depth); + +/* + * Converts stack frame pointers to symbols, using all available system + * tools to try to produce a human readable result. This call will not be + * quick, as it shells out to addr2line or similar tools. + * On Windows, this is the same as aws_backtrace_symbols() + * Returns an array up to stack_depth long that needs to be free()ed. Missing + * frames will be NULL. + * Returns NULL if the platform does not support stack frame translation + * or an error occurs + */ +char **aws_backtrace_addr2line(void *const *frames, size_t stack_depth); + +/** + * Print a backtrace from either the current stack, or (if provided) the current exception/signal + * call_site_data is siginfo_t* on POSIX, and LPEXCEPTION_POINTERS on Windows, and can be null + */ +AWS_COMMON_API +void aws_backtrace_print(FILE *fp, void *call_site_data); + +/* Log the callstack from the current stack to the currently configured aws_logger */ +AWS_COMMON_API +void aws_backtrace_log(void); + AWS_EXTERN_C_END #endif /* AWS_COMMON_SYSTEM_INFO_H */ diff --git a/include/aws/testing/aws_test_harness.h b/include/aws/testing/aws_test_harness.h index cd04a8bfe..30b9112ca 100644 --- a/include/aws/testing/aws_test_harness.h +++ b/include/aws/testing/aws_test_harness.h @@ -17,7 +17,9 @@ #include #include +#include #include +#include #include #include @@ -39,46 +41,6 @@ the AWS_UNSTABLE_TESTING_API compiler flag # pragma warning(disable : 4204) /* non-constant aggregate initializer */ #endif -struct memory_test_allocator { - size_t allocated; - size_t freed; - struct aws_mutex mutex; -}; - -struct memory_test_tracker { - size_t size; - void *blob; -}; - -static inline void *s_mem_acquire_malloc(struct aws_allocator *allocator, size_t size) { - struct memory_test_allocator *test_allocator = (struct memory_test_allocator *)allocator->impl; - - aws_mutex_lock(&test_allocator->mutex); - test_allocator->allocated += size; - struct memory_test_tracker *memory = - (struct memory_test_tracker *)malloc(size + sizeof(struct memory_test_tracker)); - - if (!memory) { - return NULL; - } - - memory->size = size; - memory->blob = (uint8_t *)memory + sizeof(struct memory_test_tracker); - aws_mutex_unlock(&test_allocator->mutex); - return memory->blob; -} - -static inline void s_mem_release_free(struct aws_allocator *allocator, void *ptr) { - struct memory_test_allocator *test_allocator = (struct memory_test_allocator *)allocator->impl; - - struct memory_test_tracker *memory = - (struct memory_test_tracker *)((uint8_t *)ptr - sizeof(struct memory_test_tracker)); - aws_mutex_lock(&test_allocator->mutex); - test_allocator->freed += memory->size; - aws_mutex_unlock(&test_allocator->mutex); - free(memory); -} - /** Prints a message to stdout using printf format that appends the function, file and line number. * If format is null, returns 0 without printing anything; otherwise returns 1. */ @@ -342,7 +304,6 @@ struct aws_test_harness { aws_test_before_fn *on_before; aws_test_run_fn *run; aws_test_after_fn *on_after; - struct aws_allocator *allocator; void *ctx; const char *test_name; int suppress_memcheck; @@ -385,34 +346,53 @@ static inline int s_aws_run_test_case(struct aws_test_harness *harness) { sigaction(SIGSEGV, &sa, NULL); #endif + /* track allocations and report leaks in tests, unless suppressed */ + struct aws_allocator *allocator = NULL; + if (harness->suppress_memcheck) { + allocator = aws_default_allocator(); + } else { + allocator = aws_mem_tracer_new(aws_default_allocator(), NULL, AWS_MEMTRACE_STACKS, 8); + } + + /* wire up a logger to stderr by default, may be replaced by some tests */ + struct aws_logger err_logger; + struct aws_logger_standard_options options; + options.file = AWS_TESTING_REPORT_FD; + options.level = AWS_LL_TRACE; + options.filename = NULL; + aws_logger_init_standard(&err_logger, aws_default_allocator(), &options); + aws_logger_set(&err_logger); + if (harness->on_before) { - harness->on_before(harness->allocator, harness->ctx); + harness->on_before(allocator, harness->ctx); } - int ret_val = harness->run(harness->allocator, harness->ctx); + int ret_val = harness->run(allocator, harness->ctx); if (harness->on_after) { - harness->on_after(harness->allocator, harness->ctx); + harness->on_after(allocator, harness->ctx); } if (!ret_val) { if (!harness->suppress_memcheck) { - struct memory_test_allocator *alloc_impl = (struct memory_test_allocator *)harness->allocator->impl; - ASSERT_UINT_EQUALS( - alloc_impl->allocated, - alloc_impl->freed, - "%s [ \033[31mFAILED\033[0m ]" - "Memory Leak Detected %d bytes were allocated, " - "but only %d were freed.", - harness->test_name, - alloc_impl->allocated, - alloc_impl->freed); + const size_t leaked_bytes = aws_mem_tracer_count(allocator); + if (leaked_bytes) { + aws_mem_tracer_dump(allocator); + } + ASSERT_UINT_EQUALS(0, aws_mem_tracer_count(allocator)); } + } + + /* clean up */ + aws_mem_tracer_destroy(allocator); + aws_logger_set(NULL); + aws_logger_clean_up(&err_logger); + if (!ret_val) { RETURN_SUCCESS("%s [ \033[32mOK\033[0m ]", harness->test_name); + } else { + FAIL("%s [ \033[31mFAILED\033[0m ]", harness->test_name); } - - FAIL("%s [ \033[31mFAILED\033[0m ]", harness->test_name); } /* Enables terminal escape sequences for text coloring on Windows. */ @@ -451,28 +431,12 @@ static inline int enable_vt_mode(void) { #endif -#define AWS_TEST_ALLOCATOR_INIT(name) \ - static struct memory_test_allocator name##_alloc_impl = { \ - 0, \ - 0, \ - AWS_MUTEX_INIT, \ - }; \ - static struct aws_allocator name##_allocator = { \ - s_mem_acquire_malloc, \ - s_mem_release_free, \ - NULL, \ - NULL, \ - &name##_alloc_impl, \ - }; - #define AWS_TEST_CASE_SUPRESSION(name, fn, s) \ static int fn(struct aws_allocator *allocator, void *ctx); \ - AWS_TEST_ALLOCATOR_INIT(name) \ static struct aws_test_harness name##_test = { \ NULL, \ fn, \ NULL, \ - &name##_allocator, \ NULL, \ #name, \ s, \ @@ -486,12 +450,10 @@ static inline int enable_vt_mode(void) { static void b(struct aws_allocator *allocator, void *ctx); \ static int fn(struct aws_allocator *allocator, void *ctx); \ static void af(struct aws_allocator *allocator, void *ctx); \ - AWS_TEST_ALLOCATOR_INIT(name) \ static struct aws_test_harness name##_test = { \ b, \ fn, \ af, \ - &name##_allocator, \ c, \ #name, \ s, \ diff --git a/source/assert.c b/source/assert.c index 560a32c92..6f90065a8 100644 --- a/source/assert.c +++ b/source/assert.c @@ -15,11 +15,11 @@ #include +#include + #include #include -void aws_debug_break(void); - void aws_fatal_assert(const char *cond_str, const char *file, int line) { aws_debug_break(); fprintf(stderr, "Fatal error condition occurred in %s:%d: %s\nExiting Application\n", file, line, cond_str); diff --git a/source/common.c b/source/common.c index f816083de..92622e765 100644 --- a/source/common.c +++ b/source/common.c @@ -241,6 +241,7 @@ static struct aws_log_subject_info s_common_log_subject_infos[] = { AWS_LS_COMMON_TASK_SCHEDULER, "task-scheduler", "Subject for task scheduler or task specific logging."), + DEFINE_LOG_SUBJECT_INFO(AWS_LS_COMMON_MEMTRACE, "memtrace", "Output from the aws_mem_trace_dump function"), }; static struct aws_log_subject_info_list s_common_log_subject_list = { diff --git a/source/memtrace.c b/source/memtrace.c new file mode 100644 index 000000000..505783ea1 --- /dev/null +++ b/source/memtrace.c @@ -0,0 +1,519 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* describes a single live allocation */ +struct alloc_info { + size_t size; + time_t time; + uint64_t stack; /* hash of stack frame pointers */ +}; + +/* Using a flexible array member is the C99 compliant way to have the frames immediately follow the header. + * + * MSVC doesn't know this for some reason so we need to use a pragma to make + * it happy. + */ +#ifdef _MSC_VER +# pragma warning(push) +# pragma warning(disable : 4200) /* nonstandard extension used: zero-sized array in struct/union */ +#endif + +/* one of these is stored per unique stack */ +struct stack_trace { + size_t depth; /* length of frames[] */ + void *const frames[]; /* rest of frames are allocated after */ +}; + +#ifdef _MSC_VER +# pragma warning(pop) +#endif + +/* Tracking structure, used as the allocator impl */ +struct alloc_tracer { + struct aws_allocator *allocator; /* underlying allocator */ + struct aws_allocator *system_allocator; /* bookkeeping allocator */ + enum aws_mem_trace_level level; /* level to trace at */ + size_t frames_per_stack; /* how many frames to keep per stack */ + struct aws_atomic_var allocated; /* bytes currently allocated */ + struct aws_mutex mutex; /* protects everything below */ + struct aws_hash_table allocs; /* live allocations, maps address -> alloc_info */ + struct aws_hash_table stacks; /* unique stack traces, maps hash -> stack_trace */ + struct aws_hash_table stack_info; /* only used during dumps, maps stack hash/id -> stack_metadata */ +}; + +/* number of frames to skip in call stacks (s_alloc_tracer_track, and the vtable function) */ +#define FRAMES_TO_SKIP 2 + +static void *s_trace_mem_acquire(struct aws_allocator *allocator, size_t size); +static void s_trace_mem_release(struct aws_allocator *allocator, void *ptr); +static void *s_trace_mem_realloc(struct aws_allocator *allocator, void *ptr, size_t old_size, size_t new_size); +static void *s_trace_mem_calloc(struct aws_allocator *allocator, size_t num, size_t size); + +static struct aws_allocator s_trace_allocator = { + .mem_acquire = s_trace_mem_acquire, + .mem_release = s_trace_mem_release, + .mem_realloc = s_trace_mem_realloc, + .mem_calloc = s_trace_mem_calloc, +}; + +/* for the hash table, to destroy elements */ +static void s_destroy_alloc(void *data) { + struct aws_allocator *allocator = aws_default_allocator(); + struct alloc_info *alloc = data; + aws_mem_release(allocator, alloc); +} + +static void s_destroy_stacktrace(void *data) { + struct aws_allocator *allocator = aws_default_allocator(); + struct stack_trace *stack = data; + aws_mem_release(allocator, stack); +} + +static void s_alloc_tracer_init( + struct alloc_tracer *tracer, + struct aws_allocator *allocator, + struct aws_allocator *system_allocator, + enum aws_mem_trace_level level, + size_t frames_per_stack) { + + void *stack[1]; + if (!aws_backtrace(stack, 1)) { + /* clamp level if tracing isn't available */ + level = level > AWS_MEMTRACE_BYTES ? AWS_MEMTRACE_BYTES : level; + } + + tracer->allocator = allocator; + tracer->system_allocator = system_allocator; + tracer->level = level; + + if (tracer->level >= AWS_MEMTRACE_BYTES) { + aws_atomic_init_int(&tracer->allocated, 0); + AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_mutex_init(&tracer->mutex)); + AWS_FATAL_ASSERT( + AWS_OP_SUCCESS == + aws_hash_table_init( + &tracer->allocs, tracer->system_allocator, 1024, aws_hash_ptr, aws_ptr_eq, NULL, s_destroy_alloc)); + } + + if (tracer->level == AWS_MEMTRACE_STACKS) { + if (frames_per_stack > 128) { + frames_per_stack = 128; + } + tracer->frames_per_stack = (frames_per_stack) ? frames_per_stack : 8; + AWS_FATAL_ASSERT( + AWS_OP_SUCCESS == + aws_hash_table_init( + &tracer->stacks, tracer->system_allocator, 1024, aws_hash_ptr, aws_ptr_eq, NULL, s_destroy_stacktrace)); + } +} + +static void s_alloc_tracer_track(struct alloc_tracer *tracer, void *ptr, size_t size) { + if (tracer->level == AWS_MEMTRACE_NONE) { + return; + } + + aws_atomic_fetch_add(&tracer->allocated, size); + + struct alloc_info *alloc = aws_mem_calloc(tracer->system_allocator, 1, sizeof(struct alloc_info)); + alloc->size = size; + alloc->time = time(NULL); + + if (tracer->level == AWS_MEMTRACE_STACKS) { + /* capture stack frames, skip 2 for this function and the allocation vtable function */ + AWS_VARIABLE_LENGTH_ARRAY(void *, stack_frames, (FRAMES_TO_SKIP + tracer->frames_per_stack)); + size_t stack_depth = aws_backtrace(stack_frames, FRAMES_TO_SKIP + tracer->frames_per_stack); + if (stack_depth) { + /* hash the stack pointers */ + struct aws_byte_cursor stack_cursor = + aws_byte_cursor_from_array(stack_frames, stack_depth * sizeof(void *)); + uint64_t stack_id = aws_hash_byte_cursor_ptr(&stack_cursor); + alloc->stack = stack_id; /* associate the stack with the alloc */ + + aws_mutex_lock(&tracer->mutex); + struct aws_hash_element *item = NULL; + int was_created = 0; + AWS_FATAL_ASSERT( + AWS_OP_SUCCESS == + aws_hash_table_create(&tracer->stacks, (void *)(uintptr_t)stack_id, &item, &was_created)); + /* If this is a new stack, save it to the hash */ + if (was_created) { + struct stack_trace *stack = aws_mem_calloc( + tracer->system_allocator, + 1, + sizeof(struct stack_trace) + (sizeof(void *) * tracer->frames_per_stack)); + memcpy( + (void **)&stack->frames[0], + &stack_frames[FRAMES_TO_SKIP], + (stack_depth - FRAMES_TO_SKIP) * sizeof(void *)); + stack->depth = stack_depth - FRAMES_TO_SKIP; + item->value = stack; + } + aws_mutex_unlock(&tracer->mutex); + } + } + + aws_mutex_lock(&tracer->mutex); + AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_hash_table_put(&tracer->allocs, ptr, alloc, NULL)); + aws_mutex_unlock(&tracer->mutex); +} + +static void s_alloc_tracer_untrack(struct alloc_tracer *tracer, void *ptr) { + if (tracer->level == AWS_MEMTRACE_NONE) { + return; + } + + aws_mutex_lock(&tracer->mutex); + struct aws_hash_element *item; + AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_hash_table_find(&tracer->allocs, ptr, &item)); + /* because the tracer can be installed at any time, it is possible for an allocation to not + * be tracked. Therefore, we make sure the find succeeds, but then check the returned + * value */ + if (item) { + AWS_FATAL_ASSERT(item->key == ptr && item->value); + struct alloc_info *alloc = item->value; + aws_atomic_fetch_sub(&tracer->allocated, alloc->size); + s_destroy_alloc(item->value); + AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_hash_table_remove_element(&tracer->allocs, item)); + } + aws_mutex_unlock(&tracer->mutex); +} + +/* used only to resolve stacks -> trace, count, size at dump time */ +struct stack_metadata { + struct aws_string *trace; + size_t count; + size_t size; +}; + +static int s_collect_stack_trace(void *context, struct aws_hash_element *item) { + struct alloc_tracer *tracer = context; + struct aws_hash_table *all_stacks = &tracer->stacks; + struct aws_allocator *allocator = tracer->system_allocator; + struct stack_metadata *stack_info = item->value; + struct aws_hash_element *stack_item = NULL; + AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_hash_table_find(all_stacks, item->key, &stack_item)); + AWS_FATAL_ASSERT(stack_item); + struct stack_trace *stack = stack_item->value; + void *const *stack_frames = &stack->frames[0]; + + /* convert the frame pointers to symbols, and concat into a buffer */ + char buf[4096] = {0}; + struct aws_byte_buf stacktrace = aws_byte_buf_from_empty_array(buf, AWS_ARRAY_SIZE(buf)); + struct aws_byte_cursor newline = aws_byte_cursor_from_c_str("\n"); + char **symbols = aws_backtrace_addr2line(stack_frames, stack->depth); + for (size_t idx = 0; idx < stack->depth; ++idx) { + if (idx > 0) { + aws_byte_buf_append(&stacktrace, &newline); + } + const char *caller = symbols[idx]; + if (!caller || !caller[0]) { + break; + } + struct aws_byte_cursor cursor = aws_byte_cursor_from_c_str(caller); + aws_byte_buf_append(&stacktrace, &cursor); + } + free(symbols); + /* record the resultant buffer as a string */ + stack_info->trace = aws_string_new_from_array(allocator, stacktrace.buffer, stacktrace.len); + aws_byte_buf_clean_up(&stacktrace); + return AWS_COMMON_HASH_TABLE_ITER_CONTINUE; +} + +static int s_stack_info_compare_size(const void *a, const void *b) { + const struct stack_metadata *stack_a = *(const struct stack_metadata **)a; + const struct stack_metadata *stack_b = *(const struct stack_metadata **)b; + return stack_b->size > stack_a->size; +} + +static int s_stack_info_compare_count(const void *a, const void *b) { + const struct stack_metadata *stack_a = *(const struct stack_metadata **)a; + const struct stack_metadata *stack_b = *(const struct stack_metadata **)b; + return stack_b->count > stack_a->count; +} + +static void s_stack_info_destroy(void *data) { + struct stack_metadata *stack = data; + struct aws_allocator *allocator = stack->trace->allocator; + aws_string_destroy(stack->trace); + aws_mem_release(allocator, stack); +} + +/* tally up count/size per stack from all allocs */ +static int s_collect_stack_stats(void *context, struct aws_hash_element *item) { + struct alloc_tracer *tracer = context; + struct alloc_info *alloc = item->value; + struct aws_hash_element *stack_item = NULL; + int was_created = 0; + AWS_FATAL_ASSERT( + AWS_OP_SUCCESS == + aws_hash_table_create(&tracer->stack_info, (void *)(uintptr_t)alloc->stack, &stack_item, &was_created)); + if (was_created) { + stack_item->value = aws_mem_calloc(tracer->system_allocator, 1, sizeof(struct stack_metadata)); + } + struct stack_metadata *stack = stack_item->value; + stack->count++; + stack->size += alloc->size; + return AWS_COMMON_HASH_TABLE_ITER_CONTINUE; +} + +static int s_insert_stacks(void *context, struct aws_hash_element *item) { + struct aws_priority_queue *pq = context; + struct stack_metadata *stack = item->value; + AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_priority_queue_push(pq, &stack)); + return AWS_COMMON_HASH_TABLE_ITER_CONTINUE; +} + +static int s_insert_allocs(void *context, struct aws_hash_element *item) { + struct aws_priority_queue *allocs = context; + struct alloc_info *alloc = item->value; + AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_priority_queue_push(allocs, &alloc)); + return AWS_COMMON_HASH_TABLE_ITER_CONTINUE; +} + +static int s_alloc_compare(const void *a, const void *b) { + const struct alloc_info *alloc_a = *(const struct alloc_info **)a; + const struct alloc_info *alloc_b = *(const struct alloc_info **)b; + return alloc_a->time > alloc_b->time; +} + +static void s_alloc_tracer_dump(struct alloc_tracer *tracer) { + if (tracer->level == AWS_MEMTRACE_NONE || aws_atomic_load_int(&tracer->allocated) == 0) { + return; + } + + aws_mutex_lock(&tracer->mutex); + + size_t num_allocs = aws_hash_table_get_entry_count(&tracer->allocs); + AWS_LOGF_TRACE( + AWS_LS_COMMON_MEMTRACE, "################################################################################\n"); + AWS_LOGF_TRACE( + AWS_LS_COMMON_MEMTRACE, "# BEGIN MEMTRACE DUMP #\n"); + AWS_LOGF_TRACE( + AWS_LS_COMMON_MEMTRACE, "################################################################################\n"); + AWS_LOGF_TRACE( + AWS_LS_COMMON_MEMTRACE, + "tracer: %zu bytes still allocated in %zu allocations\n", + aws_atomic_load_int(&tracer->allocated), + num_allocs); + + /* convert stacks from pointers -> symbols */ + if (tracer->level == AWS_MEMTRACE_STACKS) { + AWS_FATAL_ASSERT( + AWS_OP_SUCCESS == + aws_hash_table_init( + &tracer->stack_info, tracer->allocator, 64, aws_hash_ptr, aws_ptr_eq, NULL, s_stack_info_destroy)); + /* collect active stacks, tally up sizes and counts */ + aws_hash_table_foreach(&tracer->allocs, s_collect_stack_stats, tracer); + /* collect stack traces for active stacks */ + aws_hash_table_foreach(&tracer->stack_info, s_collect_stack_trace, tracer); + } + + /* sort allocs by time */ + struct aws_priority_queue allocs; + aws_priority_queue_init_dynamic( + &allocs, tracer->allocator, num_allocs, sizeof(struct alloc_info *), s_alloc_compare); + aws_hash_table_foreach(&tracer->allocs, s_insert_allocs, &allocs); + /* dump allocs by time */ + AWS_LOGF_TRACE( + AWS_LS_COMMON_MEMTRACE, "################################################################################\n"); + AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "Leaks in order of allocation:\n"); + AWS_LOGF_TRACE( + AWS_LS_COMMON_MEMTRACE, "################################################################################\n"); + while (aws_priority_queue_size(&allocs)) { + struct alloc_info *alloc = NULL; + aws_priority_queue_pop(&allocs, &alloc); + AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "ALLOC %zu bytes\n", alloc->size); + if (alloc->stack) { + struct aws_hash_element *item = NULL; + AWS_FATAL_ASSERT( + AWS_OP_SUCCESS == aws_hash_table_find(&tracer->stack_info, (void *)(uintptr_t)alloc->stack, &item)); + struct stack_metadata *stack = item->value; + AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, " stacktrace:\n%s\n", (const char *)aws_string_bytes(stack->trace)); + } + } + + aws_priority_queue_clean_up(&allocs); + + if (tracer->level == AWS_MEMTRACE_STACKS) { + size_t num_stacks = aws_hash_table_get_entry_count(&tracer->stack_info); + /* sort stacks by total size leaked */ + struct aws_priority_queue stacks_by_size; + AWS_FATAL_ASSERT( + AWS_OP_SUCCESS == aws_priority_queue_init_dynamic( + &stacks_by_size, + tracer->allocator, + num_stacks, + sizeof(struct stack_metadata *), + s_stack_info_compare_size)); + aws_hash_table_foreach(&tracer->stack_info, s_insert_stacks, &stacks_by_size); + AWS_LOGF_TRACE( + AWS_LS_COMMON_MEMTRACE, + "################################################################################\n"); + AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "Stacks by bytes leaked:\n"); + AWS_LOGF_TRACE( + AWS_LS_COMMON_MEMTRACE, + "################################################################################\n"); + while (aws_priority_queue_size(&stacks_by_size) > 0) { + struct stack_metadata *stack = NULL; + aws_priority_queue_pop(&stacks_by_size, &stack); + AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "%zu bytes in %zu allocations:\n", stack->size, stack->count); + AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "%s\n", (const char *)aws_string_bytes(stack->trace)); + } + aws_priority_queue_clean_up(&stacks_by_size); + + /* sort stacks by number of leaks */ + struct aws_priority_queue stacks_by_count; + AWS_FATAL_ASSERT( + AWS_OP_SUCCESS == aws_priority_queue_init_dynamic( + &stacks_by_count, + tracer->allocator, + num_stacks, + sizeof(struct stack_metadata *), + s_stack_info_compare_count)); + AWS_LOGF_TRACE( + AWS_LS_COMMON_MEMTRACE, + "################################################################################\n"); + AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "Stacks by number of leaks:\n"); + AWS_LOGF_TRACE( + AWS_LS_COMMON_MEMTRACE, + "################################################################################\n"); + aws_hash_table_foreach(&tracer->stack_info, s_insert_stacks, &stacks_by_count); + while (aws_priority_queue_size(&stacks_by_count) > 0) { + struct stack_metadata *stack = NULL; + aws_priority_queue_pop(&stacks_by_count, &stack); + AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "%zu allocations leaking %zu bytes:\n", stack->count, stack->size); + AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "%s\n", (const char *)aws_string_bytes(stack->trace)); + } + aws_priority_queue_clean_up(&stacks_by_count); + aws_hash_table_clean_up(&tracer->stack_info); + } + + AWS_LOGF_TRACE( + AWS_LS_COMMON_MEMTRACE, "################################################################################\n"); + AWS_LOGF_TRACE( + AWS_LS_COMMON_MEMTRACE, "# END MEMTRACE DUMP #\n"); + AWS_LOGF_TRACE( + AWS_LS_COMMON_MEMTRACE, "################################################################################\n"); + + aws_mutex_unlock(&tracer->mutex); +} + +static void *s_trace_mem_acquire(struct aws_allocator *allocator, size_t size) { + struct alloc_tracer *tracer = allocator->impl; + void *ptr = aws_mem_acquire(tracer->allocator, size); + s_alloc_tracer_track(tracer, ptr, size); + return ptr; +} + +static void s_trace_mem_release(struct aws_allocator *allocator, void *ptr) { + struct alloc_tracer *tracer = allocator->impl; + s_alloc_tracer_untrack(tracer, ptr); + aws_mem_release(tracer->allocator, ptr); +} + +static void *s_trace_mem_realloc(struct aws_allocator *allocator, void *ptr, size_t old_size, size_t new_size) { + struct alloc_tracer *tracer = allocator->impl; + void *new_ptr = ptr; + + AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_mem_realloc(tracer->allocator, &new_ptr, old_size, new_size)); + + s_alloc_tracer_untrack(tracer, ptr); + s_alloc_tracer_track(tracer, new_ptr, new_size); + + return new_ptr; +} + +static void *s_trace_mem_calloc(struct aws_allocator *allocator, size_t num, size_t size) { + struct alloc_tracer *tracer = allocator->impl; + void *ptr = aws_mem_calloc(tracer->allocator, num, size); + s_alloc_tracer_track(tracer, ptr, num * size); + return ptr; +} + +struct aws_allocator *aws_mem_tracer_new( + struct aws_allocator *allocator, + struct aws_allocator *system_allocator, + enum aws_mem_trace_level level, + size_t frames_per_stack) { + + if (!system_allocator) { + system_allocator = aws_default_allocator(); + } + + struct alloc_tracer *tracer = NULL; + struct aws_allocator *trace_allocator = NULL; + aws_mem_acquire_many( + system_allocator, 2, &tracer, sizeof(struct alloc_tracer), &trace_allocator, sizeof(struct aws_allocator)); + + AWS_FATAL_ASSERT(trace_allocator); + AWS_FATAL_ASSERT(tracer); + + AWS_ZERO_STRUCT(*trace_allocator); + AWS_ZERO_STRUCT(*tracer); + + /* copy the template vtable s*/ + *trace_allocator = s_trace_allocator; + trace_allocator->impl = tracer; + + s_alloc_tracer_init(tracer, allocator, system_allocator, level, frames_per_stack); + return trace_allocator; +} + +struct aws_allocator *aws_mem_tracer_destroy(struct aws_allocator *trace_allocator) { + struct alloc_tracer *tracer = trace_allocator->impl; + struct aws_allocator *allocator = tracer->allocator; + + /* This is not necessary, as if you are destroying the allocator, what are your + * expectations? Either way, we can, so we might as well... + */ + aws_mutex_lock(&tracer->mutex); + aws_hash_table_clean_up(&tracer->allocs); + aws_hash_table_clean_up(&tracer->stacks); + aws_mutex_unlock(&tracer->mutex); + aws_mutex_clean_up(&tracer->mutex); + + struct aws_allocator *system_allocator = tracer->system_allocator; + aws_mem_release(system_allocator, tracer); + /* trace_allocator is freed as part of the block tracer was allocated in */ + return allocator; +} + +void aws_mem_tracer_dump(struct aws_allocator *trace_allocator) { + struct alloc_tracer *tracer = trace_allocator->impl; + s_alloc_tracer_dump(tracer); +} + +size_t aws_mem_tracer_bytes(struct aws_allocator *trace_allocator) { + struct alloc_tracer *tracer = trace_allocator->impl; + return aws_atomic_load_int(&tracer->allocated); +} + +size_t aws_mem_tracer_count(struct aws_allocator *trace_allocator) { + struct alloc_tracer *tracer = trace_allocator->impl; + aws_mutex_lock(&tracer->mutex); + size_t count = aws_hash_table_get_entry_count(&tracer->allocs); + aws_mutex_unlock(&tracer->mutex); + return count; +} diff --git a/source/posix/system_info.c b/source/posix/system_info.c index 045bbd54e..95d556ffe 100644 --- a/source/posix/system_info.c +++ b/source/posix/system_info.c @@ -15,6 +15,9 @@ #include +#include +#include + #if defined(__FreeBSD__) || defined(__NetBSD__) # define __BSD_VISIBLE 1 #endif @@ -236,6 +239,63 @@ void s_resolve_cmd(char *cmd, size_t len, struct aws_stack_frame_info *frame) { } # endif +size_t aws_backtrace(void **frames, size_t size) { + return backtrace(frames, size); +} + +char **aws_backtrace_symbols(void *const *frames, size_t stack_depth) { + return backtrace_symbols(frames, stack_depth); +} + +char **aws_backtrace_addr2line(void *const *stack_frames, size_t stack_depth) { + char **symbols = aws_backtrace_symbols(stack_frames, stack_depth); + AWS_FATAL_ASSERT(symbols); + struct aws_byte_buf lines; + aws_byte_buf_init(&lines, aws_default_allocator(), stack_depth * 256); + + /* insert pointers for each stack entry */ + memset(lines.buffer, 0, stack_depth * sizeof(void *)); + lines.len += stack_depth * sizeof(void *); + + /* symbols look like: (+) [0x] + * or: [0x] + * start at 1 to skip the current frame (this function) */ + for (size_t frame_idx = 0; frame_idx < stack_depth; ++frame_idx) { + struct aws_stack_frame_info frame; + AWS_ZERO_STRUCT(frame); + const char *symbol = symbols[frame_idx]; + if (s_parse_symbol(symbol, stack_frames[frame_idx], &frame)) { + goto parse_failed; + } + + /* TODO: Emulate libunwind */ + char cmd[sizeof(struct aws_stack_frame_info)] = {0}; + s_resolve_cmd(cmd, sizeof(cmd), &frame); + FILE *out = popen(cmd, "r"); + if (!out) { + goto parse_failed; + } + char output[1024]; + if (fgets(output, sizeof(output), out)) { + /* if addr2line or atos don't know what to do with an address, they just echo it */ + /* if there are spaces in the output, then they resolved something */ + if (strstr(output, " ")) { + symbol = output; + } + } + pclose(out); + + parse_failed: + /* record the pointer to where the symbol will be */ + *((char **)&lines.buffer[frame_idx * sizeof(void *)]) = (char *)lines.buffer + lines.len; + struct aws_byte_cursor line_cursor = aws_byte_cursor_from_c_str(symbol); + line_cursor.len += 1; /* strings must be null terminated, make sure we copy the null */ + aws_byte_buf_append_dynamic(&lines, &line_cursor); + } + free(symbols); + return (char **)lines.buffer; /* caller is responsible for freeing */ +} + void aws_backtrace_print(FILE *fp, void *call_site_data) { siginfo_t *siginfo = call_site_data; if (siginfo) { @@ -246,8 +306,8 @@ void aws_backtrace_print(FILE *fp, void *call_site_data) { } void *stack_frames[AWS_BACKTRACE_DEPTH]; - int stack_depth = backtrace(stack_frames, AWS_BACKTRACE_DEPTH); - char **symbols = backtrace_symbols(stack_frames, stack_depth); + size_t stack_depth = aws_backtrace(stack_frames, AWS_BACKTRACE_DEPTH); + char **symbols = aws_backtrace_symbols(stack_frames, stack_depth); if (symbols == NULL) { fprintf(fp, "Unable to decode backtrace via backtrace_symbols\n"); return; @@ -256,7 +316,7 @@ void aws_backtrace_print(FILE *fp, void *call_site_data) { /* symbols look like: (+) [0x] * or: [0x] * start at 1 to skip the current frame (this function) */ - for (int frame_idx = 1; frame_idx < stack_depth; ++frame_idx) { + for (size_t frame_idx = 1; frame_idx < stack_depth; ++frame_idx) { struct aws_stack_frame_info frame; AWS_ZERO_STRUCT(frame); const char *symbol = symbols[frame_idx]; @@ -289,6 +349,29 @@ void aws_backtrace_print(FILE *fp, void *call_site_data) { #else void aws_backtrace_print(FILE *fp, void *call_site_data) { + (void)call_site_data; fprintf(fp, "No call stack information available\n"); } + +size_t aws_backtrace(void **frames, size_t size) { + return 0; +} + +char **aws_backtrace_symbols(void *const *frames, size_t stack_depth) { + return NULL; +} #endif /* AWS_HAVE_EXECINFO */ + +void aws_backtrace_log() { + void *stack[1024]; + size_t num_frames = aws_backtrace(stack, 1024); + if (!num_frames) { + return; + } + char **symbols = aws_backtrace_addr2line(stack, num_frames); + for (size_t line = 0; line < num_frames; ++line) { + const char *symbol = symbols[line]; + AWS_LOGF_TRACE(AWS_LS_COMMON_GENERAL, "%s", symbol); + } + free(symbols); +} diff --git a/source/windows/system_info.c b/source/windows/system_info.c index 53775c4fb..76f62e541 100644 --- a/source/windows/system_info.c +++ b/source/windows/system_info.c @@ -15,6 +15,10 @@ #include +#include +#include +#include + #include size_t aws_system_info_processor_count(void) { @@ -68,44 +72,79 @@ typedef BOOL __stdcall SymGetLineFromAddr_fn( # define SymGetLineFromAddrName "SymGetLineFromAddr" #endif -void aws_backtrace_print(FILE *fp, void *call_site_data) { - struct _EXCEPTION_POINTERS *exception_pointers = call_site_data; - if (exception_pointers) { - fprintf(fp, "** Exception 0x%x occured **\n", exception_pointers->ExceptionRecord->ExceptionCode); - } +static SymInitialize_fn *s_SymInitialize = NULL; +static SymFromAddr_fn *s_SymFromAddr = NULL; +static SymGetLineFromAddr_fn *s_SymGetLineFromAddr = NULL; +static aws_thread_once s_init_once = AWS_THREAD_ONCE_STATIC_INIT; +static void s_init_dbghelp_impl(void *user_data) { + (void)user_data; HMODULE dbghelp = LoadLibraryA("DbgHelp.dll"); if (!dbghelp) { fprintf(stderr, "Failed to load DbgHelp.dll.\n"); goto done; } - SymInitialize_fn *p_SymInitialize = (SymInitialize_fn *)GetProcAddress(dbghelp, "SymInitialize"); - if (!p_SymInitialize) { + s_SymInitialize = (SymInitialize_fn *)GetProcAddress(dbghelp, "SymInitialize"); + if (!s_SymInitialize) { fprintf(stderr, "Failed to load SymInitialize from DbgHelp.dll.\n"); goto done; } - SymFromAddr_fn *p_SymFromAddr = (SymFromAddr_fn *)GetProcAddress(dbghelp, "SymFromAddr"); - if (!p_SymFromAddr) { + s_SymFromAddr = (SymFromAddr_fn *)GetProcAddress(dbghelp, "SymFromAddr"); + if (!s_SymFromAddr) { fprintf(stderr, "Failed to load SymFromAddr from DbgHelp.dll.\n"); goto done; } - SymGetLineFromAddr_fn *p_SymGetLineFromAddr = - (SymGetLineFromAddr_fn *)GetProcAddress(dbghelp, SymGetLineFromAddrName); - if (!p_SymGetLineFromAddr) { + s_SymGetLineFromAddr = (SymGetLineFromAddr_fn *)GetProcAddress(dbghelp, SymGetLineFromAddrName); + if (!s_SymGetLineFromAddr) { fprintf(stderr, "Failed to load " SymGetLineFromAddrName " from DbgHelp.dll.\n"); goto done; } HANDLE process = GetCurrentProcess(); - p_SymInitialize(process, NULL, TRUE); - void *stack[1024]; - WORD num_frames = CaptureStackBackTrace(0, 1024, stack, NULL); + AWS_FATAL_ASSERT(process); + s_SymInitialize(process, NULL, TRUE); + return; + +done: + if (dbghelp) { + FreeLibrary(dbghelp); + } + return; +} + +static bool s_init_dbghelp() { + if (AWS_LIKELY(s_SymInitialize)) { + return true; + } + + aws_thread_call_once(&s_init_once, s_init_dbghelp_impl, NULL); + return s_SymInitialize != NULL; +} + +size_t aws_backtrace(void **frames, size_t size) { + return (int)CaptureStackBackTrace(0, (ULONG)size, frames, NULL); +} + +char **aws_backtrace_symbols(void *const *stack, size_t num_frames) { + if (!s_init_dbghelp()) { + return NULL; + } + + struct aws_byte_buf symbols; + aws_byte_buf_init(&symbols, aws_default_allocator(), num_frames * 256); + /* pointers for each stack entry */ + memset(symbols.buffer, 0, num_frames * sizeof(void *)); + symbols.len += num_frames * sizeof(void *); + DWORD64 displacement = 0; DWORD disp = 0; + struct aws_byte_cursor null_term = aws_byte_cursor_from_array("", 1); + HANDLE process = GetCurrentProcess(); + AWS_FATAL_ASSERT(process); fprintf(stderr, "Stack Trace:\n"); for (size_t i = 0; i < num_frames; ++i) { uintptr_t address = (uintptr_t)stack[i]; @@ -113,25 +152,73 @@ void aws_backtrace_print(FILE *fp, void *call_site_data) { AWS_ZERO_STRUCT(sym_info); sym_info.sym_info.MaxNameLen = sizeof(sym_info.symbol_name); sym_info.sym_info.SizeOfStruct = sizeof(struct _SYMBOL_INFO); - p_SymFromAddr(process, address, &displacement, &sym_info.sym_info); + s_SymFromAddr(process, address, &displacement, &sym_info.sym_info); + + /* record a pointer to where the symbol will be */ + *((char **)&symbols.buffer[i * sizeof(void *)]) = (char *)symbols.buffer + symbols.len; IMAGEHLP_LINE line; line.SizeOfStruct = sizeof(IMAGEHLP_LINE); - if (p_SymGetLineFromAddr(process, address, &disp, &line)) { - if (i != 0) { - fprintf( - stderr, - "at %s(%s:%lu): address: 0x%llX\n", - sym_info.sym_info.Name, - line.FileName, - line.LineNumber, - sym_info.sym_info.Address); + if (s_SymGetLineFromAddr(process, address, &disp, &line)) { + char buf[1024]; + int len = snprintf( + buf, + AWS_ARRAY_SIZE(buf), + "at %s(%s:%lu): address: 0x%llX", + sym_info.sym_info.Name, + line.FileName, + line.LineNumber, + sym_info.sym_info.Address); + if (len != -1) { + struct aws_byte_cursor symbol = aws_byte_cursor_from_array(buf, len + 1); /* include null terminator */ + aws_byte_buf_append_dynamic(&symbols, &symbol); + continue; } } + + /* Need at least a null so the address changes between lines */ + aws_byte_buf_append_dynamic(&symbols, &null_term); } -done: - if (dbghelp) { - FreeLibrary(dbghelp); + return (char **)symbols.buffer; /* buffer must be freed by the caller */ +} + +char **aws_backtrace_addr2line(void *const *frames, size_t stack_depth) { + return aws_backtrace_symbols(frames, stack_depth); +} + +void aws_backtrace_print(FILE *fp, void *call_site_data) { + struct _EXCEPTION_POINTERS *exception_pointers = call_site_data; + if (exception_pointers) { + fprintf(fp, "** Exception 0x%x occured **\n", exception_pointers->ExceptionRecord->ExceptionCode); + } + + if (!s_init_dbghelp()) { + fprintf(fp, "Unable to initialize dbghelp.dll"); + return; + } + + void *stack[1024]; + size_t num_frames = aws_backtrace(stack, 1024); + char **symbols = aws_backtrace_symbols(stack, num_frames); + for (size_t line = 0; line < num_frames; ++line) { + const char *symbol = symbols[line]; + fprintf(fp, "%s\n", symbol); + } +} + +void aws_backtrace_log() { + if (!s_init_dbghelp()) { + AWS_LOGF_ERROR(AWS_LS_COMMON_GENERAL, "Unable to initialize dbghelp.dll for backtrace"); + return; + } + + void *stack[1024]; + size_t num_frames = aws_backtrace(stack, 1024); + char **symbols = aws_backtrace_symbols(stack, num_frames); + for (size_t line = 0; line < num_frames; ++line) { + const char *symbol = symbols[line]; + AWS_LOGF_TRACE(AWS_LS_COMMON_GENERAL, "%s", symbol); } + free(symbols); } diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 40ae9c35e..867679700 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -254,6 +254,11 @@ add_test_case(test_cf_allocator_wrapper) add_test_case(test_acquire_many) add_test_case(test_alloc_nothing) +add_test_case(test_memtrace_none) +add_test_case(test_memtrace_count) +add_test_case(test_memtrace_stacks) +add_test_case(test_memtrace_midstream) + add_test_case(test_calloc_override) add_test_case(test_calloc_fallback_from_default_allocator) add_test_case(test_calloc_fallback_from_given) diff --git a/tests/assert_test.c b/tests/assert_test.c index 0a7eab932..6d3d48774 100644 --- a/tests/assert_test.c +++ b/tests/assert_test.c @@ -209,8 +209,6 @@ int main(int argc, char **argv) { g_test_filename = argv[1]; // Suppress unused function warnings - (void)s_mem_acquire_malloc; - (void)s_mem_release_free; (void)s_aws_run_test_case; // Sanity checks for our own test macros diff --git a/tests/logging/logging_test_utilities.c b/tests/logging/logging_test_utilities.c index 64360d189..e07765072 100644 --- a/tests/logging/logging_test_utilities.c +++ b/tests/logging/logging_test_utilities.c @@ -17,6 +17,8 @@ #include "test_logger.h" +#define TEST_LOGGER_MAX_BUFFER_SIZE 4096 + int do_log_test( struct aws_allocator *allocator, enum aws_log_level level, @@ -25,7 +27,7 @@ int do_log_test( /* Create and attach a logger for testing*/ struct aws_logger test_logger; - test_logger_init(&test_logger, allocator, level); + test_logger_init(&test_logger, allocator, level, TEST_LOGGER_MAX_BUFFER_SIZE); aws_logger_set(&test_logger); /* Perform logging operations */ diff --git a/tests/logging/test_logger.c b/tests/logging/test_logger.c index 75a3081d7..9a358e112 100644 --- a/tests/logging/test_logger.c +++ b/tests/logging/test_logger.c @@ -23,7 +23,7 @@ /** * A real logger wouldn't have such size restrictions, but these are good enough for our tests */ -#define TEST_LOGGER_MAX_LOG_LINE_SIZE 256 +#define TEST_LOGGER_MAX_LOG_LINE_SIZE 2048 int s_test_logger_log( struct aws_logger *logger, @@ -53,8 +53,15 @@ int s_test_logger_log( struct test_logger_impl *impl = (struct test_logger_impl *)logger->p_impl; - if (aws_byte_buf_write(&impl->log_buffer, (uint8_t *)buffer, written)) { - return aws_raise_error(AWS_ERROR_SHORT_BUFFER); + struct aws_byte_cursor line = aws_byte_cursor_from_array(buffer, written); + if (impl->max_size) { + if (aws_byte_buf_write(&impl->log_buffer, line.ptr, line.len)) { + return aws_raise_error(AWS_ERROR_SHORT_BUFFER); + } + } else { + if (aws_byte_buf_append_dynamic(&impl->log_buffer, &line)) { + return aws_raise_error(AWS_ERROR_SHORT_BUFFER); + } } return AWS_OP_SUCCESS; @@ -81,7 +88,11 @@ static struct aws_logger_vtable s_test_logger_vtable = {.get_log_level = s_test_ .log = s_test_logger_log, .clean_up = s_test_logger_clean_up}; -int test_logger_init(struct aws_logger *logger, struct aws_allocator *allocator, enum aws_log_level level) { +int test_logger_init( + struct aws_logger *logger, + struct aws_allocator *allocator, + enum aws_log_level level, + size_t max_size) { struct test_logger_impl *impl = (struct test_logger_impl *)aws_mem_acquire(allocator, sizeof(struct test_logger_impl)); @@ -89,12 +100,13 @@ int test_logger_init(struct aws_logger *logger, struct aws_allocator *allocator, return AWS_OP_ERR; } - if (aws_byte_buf_init(&impl->log_buffer, allocator, TEST_LOGGER_MAX_BUFFER_SIZE)) { + if (aws_byte_buf_init(&impl->log_buffer, allocator, max_size ? max_size : 4096)) { aws_mem_release(allocator, impl); return AWS_OP_ERR; } impl->level = level; + impl->max_size = max_size; logger->vtable = &s_test_logger_vtable; logger->allocator = allocator; diff --git a/tests/logging/test_logger.h b/tests/logging/test_logger.h index e98bf921a..f8f0d0e0c 100644 --- a/tests/logging/test_logger.h +++ b/tests/logging/test_logger.h @@ -21,8 +21,6 @@ #include -#define TEST_LOGGER_MAX_BUFFER_SIZE 4096 - /** * The test logger is a simple forwarding logger that just records what was passed to it. * We provide an extraction function for easy test validation. @@ -30,12 +28,18 @@ struct test_logger_impl { enum aws_log_level level; struct aws_byte_buf log_buffer; + size_t max_size; }; /** * Given a pointer to a logger, initializes it as a test logger using the supplied log level. + * max_size of 0 is unlimited */ -int test_logger_init(struct aws_logger *logger, struct aws_allocator *allocator, enum aws_log_level level); +int test_logger_init( + struct aws_logger *logger, + struct aws_allocator *allocator, + enum aws_log_level level, + size_t max_size); /** * Extracts logged content from a test logger. diff --git a/tests/memtrace_test.c b/tests/memtrace_test.c new file mode 100644 index 000000000..3a1dd5d2c --- /dev/null +++ b/tests/memtrace_test.c @@ -0,0 +1,250 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +#include + +#include +#include + +#include "logging/test_logger.h" + +#define NUM_ALLOCS 100 +static int s_test_memtrace_count(struct aws_allocator *allocator, void *ctx) { + (void)ctx; + + struct aws_allocator *tracer = aws_mem_tracer_new(allocator, NULL, AWS_MEMTRACE_BYTES, 0); + + void *allocs[NUM_ALLOCS] = {0}; + size_t sizes[NUM_ALLOCS] = {0}; + size_t total = 0; + + for (size_t idx = 0; idx < AWS_ARRAY_SIZE(allocs); ++idx) { + uint32_t size = 0; + aws_device_random_u32(&size); + size = (size % 1024) + 1; /* not necessary to allocate a gajillion bytes */ + allocs[idx] = aws_mem_acquire(tracer, size); + sizes[idx] = size; + total += size; + } + + ASSERT_UINT_EQUALS(total, aws_mem_tracer_bytes(tracer)); + ASSERT_UINT_EQUALS(NUM_ALLOCS, aws_mem_tracer_count(tracer)); + + size_t freed = 0; + for (size_t idx = 0; idx < AWS_ARRAY_SIZE(allocs); ++idx) { + uint32_t roll = 0; + aws_device_random_u32(&roll); + if (roll % 3 == 0) { + aws_mem_release(tracer, allocs[idx]); + allocs[idx] = NULL; + total -= sizes[idx]; + ++freed; + } + } + + ASSERT_UINT_EQUALS(total, aws_mem_tracer_bytes(tracer)); + ASSERT_UINT_EQUALS(NUM_ALLOCS - freed, aws_mem_tracer_count(tracer)); + + for (size_t idx = 0; idx < AWS_ARRAY_SIZE(allocs); ++idx) { + if (allocs[idx]) { + aws_mem_release(tracer, allocs[idx]); + } + } + + ASSERT_UINT_EQUALS(0, aws_mem_tracer_bytes(tracer)); + ASSERT_UINT_EQUALS(0, aws_mem_tracer_count(tracer)); + + struct aws_allocator *original = aws_mem_tracer_destroy(tracer); + ASSERT_PTR_EQUALS(allocator, original); + + return 0; +} +AWS_TEST_CASE(test_memtrace_count, s_test_memtrace_count) + +static void *s_alloc_1(struct aws_allocator *allocator, size_t size) { + return aws_mem_acquire(allocator, size); +} + +static void *s_alloc_2(struct aws_allocator *allocator, size_t size) { + return aws_mem_acquire(allocator, size); +} + +static void *s_alloc_3(struct aws_allocator *allocator, size_t size) { + return aws_mem_acquire(allocator, size); +} + +static void *s_alloc_4(struct aws_allocator *allocator, size_t size) { + return aws_mem_acquire(allocator, size); +} + +static struct aws_logger s_test_logger; + +static int s_test_memtrace_stacks(struct aws_allocator *allocator, void *ctx) { + (void)ctx; +#if !defined(AWS_MEMTRACE_STACKS_AVAILABLE) + return 0; +#endif + + test_logger_init(&s_test_logger, allocator, AWS_LL_TRACE, 0); + aws_logger_set(&s_test_logger); + + struct aws_allocator *tracer = aws_mem_tracer_new(allocator, NULL, AWS_MEMTRACE_STACKS, 8); + + void *allocs[NUM_ALLOCS] = {0}; + size_t total = 0; + + for (size_t idx = 0; idx < AWS_ARRAY_SIZE(allocs); ++idx) { + uint32_t size = 0; + aws_device_random_u32(&size); + size = (size % 1024) + 1; /* not necessary to allocate a gajillion bytes */ + + void *(*allocate)(struct aws_allocator *, size_t) = NULL; + switch (idx % 4) { + case 0: + allocate = s_alloc_1; + break; + case 1: + allocate = s_alloc_2; + break; + case 2: + allocate = s_alloc_3; + break; + case 3: + allocate = s_alloc_4; + break; + } + + allocs[idx] = allocate(tracer, size); + total += size; + } + + ASSERT_UINT_EQUALS(total, aws_mem_tracer_bytes(tracer)); + ASSERT_UINT_EQUALS(NUM_ALLOCS, aws_mem_tracer_count(tracer)); + aws_mem_tracer_dump(tracer); + + /* make sure all of the functions that allocated are found */ + struct test_logger_impl *test_logger = s_test_logger.p_impl; + /* if this is not a debug build, there may not be symbols, so the test cannot + * verify if a best effort was made */ +#if defined(DEBUG_BUILD) + ASSERT_TRUE(strstr((const char *)test_logger->log_buffer.buffer, "s_alloc_1")); + ASSERT_TRUE(strstr((const char *)test_logger->log_buffer.buffer, "s_alloc_2")); + ASSERT_TRUE(strstr((const char *)test_logger->log_buffer.buffer, "s_alloc_3")); + ASSERT_TRUE(strstr((const char *)test_logger->log_buffer.buffer, "s_alloc_4")); + ASSERT_TRUE(strstr((const char *)test_logger->log_buffer.buffer, __FUNCTION__)); +#endif + + /* reset log */ + aws_byte_buf_reset(&test_logger->log_buffer, true); + + for (size_t idx = 0; idx < AWS_ARRAY_SIZE(allocs); ++idx) { + if (allocs[idx]) { + aws_mem_release(tracer, allocs[idx]); + } + } + + ASSERT_UINT_EQUALS(0, aws_mem_tracer_bytes(tracer)); + ASSERT_UINT_EQUALS(0, aws_mem_tracer_count(tracer)); + aws_mem_tracer_dump(tracer); + + /* Make sure no known allocs are left */ + ASSERT_UINT_EQUALS(0, test_logger->log_buffer.len); + + struct aws_allocator *original = aws_mem_tracer_destroy(tracer); + ASSERT_PTR_EQUALS(allocator, original); + + aws_logger_clean_up(&s_test_logger); + + return 0; +} +AWS_TEST_CASE(test_memtrace_stacks, s_test_memtrace_stacks) + +static int s_test_memtrace_none(struct aws_allocator *allocator, void *ctx) { + (void)ctx; + struct aws_allocator *tracer = aws_mem_tracer_new(allocator, NULL, AWS_MEMTRACE_NONE, 0); + + void *allocs[NUM_ALLOCS] = {0}; + size_t total = 0; + + for (size_t idx = 0; idx < AWS_ARRAY_SIZE(allocs); ++idx) { + uint32_t size = 0; + aws_device_random_u32(&size); + size = (size % 1024) + 1; /* not necessary to allocate a gajillion bytes */ + allocs[idx] = aws_mem_acquire(tracer, size); + total += size; + } + + ASSERT_UINT_EQUALS(0, aws_mem_tracer_bytes(tracer)); + + for (size_t idx = 0; idx < AWS_ARRAY_SIZE(allocs); ++idx) { + if (allocs[idx]) { + aws_mem_release(tracer, allocs[idx]); + } + } + + ASSERT_UINT_EQUALS(0, aws_mem_tracer_bytes(tracer)); + + struct aws_allocator *original = aws_mem_tracer_destroy(tracer); + ASSERT_PTR_EQUALS(allocator, original); + + return 0; +} +AWS_TEST_CASE(test_memtrace_none, s_test_memtrace_none) + +static int s_test_memtrace_midstream(struct aws_allocator *allocator, void *ctx) { + (void)ctx; + + void *allocs[NUM_ALLOCS] = {0}; + + /* allocate some from the base allocator first */ + for (size_t idx = 0; idx < AWS_ARRAY_SIZE(allocs) / 4; ++idx) { + uint32_t size = 0; + aws_device_random_u32(&size); + size = (size % 1024) + 1; /* not necessary to allocate a gajillion bytes */ + allocs[idx] = aws_mem_acquire(allocator, size); + } + + struct aws_allocator *tracer = aws_mem_tracer_new(allocator, NULL, AWS_MEMTRACE_BYTES, 0); + + /* Now allocate from the tracer, and make sure everything still works */ + size_t total = 0; + size_t tracked_allocs = 0; + for (size_t idx = AWS_ARRAY_SIZE(allocs) / 4 + 1; idx < AWS_ARRAY_SIZE(allocs); ++idx) { + uint32_t size = 0; + aws_device_random_u32(&size); + size = (size % 1024) + 1; /* not necessary to allocate a gajillion bytes */ + allocs[idx] = aws_mem_acquire(tracer, size); + total += size; + ++tracked_allocs; + } + + ASSERT_UINT_EQUALS(total, aws_mem_tracer_bytes(tracer)); + ASSERT_UINT_EQUALS(tracked_allocs, aws_mem_tracer_count(tracer)); + + for (size_t idx = 0; idx < AWS_ARRAY_SIZE(allocs); ++idx) { + if (allocs[idx]) { + aws_mem_release(tracer, allocs[idx]); + } + } + + ASSERT_UINT_EQUALS(0, aws_mem_tracer_bytes(tracer)); + ASSERT_UINT_EQUALS(0, aws_mem_tracer_count(tracer)); + + struct aws_allocator *original = aws_mem_tracer_destroy(tracer); + ASSERT_PTR_EQUALS(allocator, original); + + return 0; +} +AWS_TEST_CASE(test_memtrace_midstream, s_test_memtrace_midstream) diff --git a/tests/system_info_tests.c b/tests/system_info_tests.c index 8a3d7b8ab..cee5246ad 100644 --- a/tests/system_info_tests.c +++ b/tests/system_info_tests.c @@ -15,6 +15,7 @@ #include +#include "logging/test_logger.h" #include static int s_test_cpu_count_at_least_works_superficially_fn(struct aws_allocator *allocator, void *ctx) { @@ -34,75 +35,68 @@ AWS_TEST_CASE(test_cpu_count_at_least_works_superficially, s_test_cpu_count_at_l #if defined(_WIN32) # include +# define DIRSEP "\\" +#else +# define DIRSEP "/" #endif static int s_test_stack_trace_decoding(struct aws_allocator *allocator, void *ctx) { (void)ctx; - char tmp_filename[] = "backtraceXXXXXX"; - FILE *tmp_file = NULL; -#if defined(_WIN32) - errno_t tmp_err = _mktemp_s(tmp_filename, sizeof(tmp_filename)); - ASSERT_INT_EQUALS(0, tmp_err); - errno_t open = fopen_s(&tmp_file, tmp_filename, "w+"); - ASSERT_INT_EQUALS(0, open); -#else - int tmp_fileno = mkstemp(tmp_filename); - ASSERT_TRUE(tmp_fileno > -1); - tmp_file = fdopen(tmp_fileno, "r+"); -#endif - ASSERT_NOT_NULL(tmp_file); - - int line = 0; /* captured on line of aws_backtrace_print call to match call site */ - (void)line; /* may not be used if debug info is unavailable */ - aws_backtrace_print(tmp_file, (line = __LINE__, NULL)); /* NOLINT */ - fflush(tmp_file); - - fseek(tmp_file, 0L, SEEK_END); - long file_size = ftell(tmp_file); - fseek(tmp_file, 0L, SEEK_SET); - char *buffer = aws_mem_acquire(allocator, file_size + 1); - ASSERT_NOT_NULL(buffer); - ASSERT_INT_EQUALS(file_size, fread(buffer, 1, file_size, tmp_file)); - fclose(tmp_file); - buffer[file_size] = 0; - -#if defined(AWS_HAVE_EXECINFO) + struct aws_logger test_log; + test_logger_init(&test_log, allocator, AWS_LL_TRACE, 0); + aws_logger_set(&test_log); + + int line = 0; /* captured on line of aws_backtrace_log call to match call site */ + (void)line; /* may not be used if debug info is unavailable */ + aws_backtrace_log(), (line = __LINE__); /* NOLINT */ + + struct test_logger_impl *log = test_log.p_impl; + ASSERT_NOT_NULL(log); + + struct aws_byte_buf *buffer = &log->log_buffer; + (void)buffer; + +#if defined(AWS_BACKTRACE_STACKS_AVAILABLE) && defined(DEBUG_BUILD) /* ensure that this file/function is found */ char *file = __FILE__; - char *next = NULL; + char *next = strstr(file, DIRSEP); /* strip path info, just filename will be found */ - while ((next = strstr(file, "/"))) { + while (next) { file = next + 1; + next = strstr(file, DIRSEP); } - ASSERT_NOT_NULL(strstr(buffer, __func__)); + struct aws_byte_cursor null_term = aws_byte_cursor_from_array("", 1); + aws_byte_buf_append_dynamic(buffer, &null_term); + ASSERT_NOT_NULL(strstr((const char *)buffer->buffer, __func__)); # if !defined(__APPLE__) /* apple doesn't always find file info */ /* if this is not a debug build, there may not be symbols, so the test cannot * verify if a best effort was made */ -# if defined(DEBUG_BUILD) - ASSERT_NOT_NULL(strstr(buffer, file)); + ASSERT_NOT_NULL(strstr((const char *)buffer->buffer, file)); /* check for the call site of aws_backtrace_print. Note that line numbers are off by one * in both directions depending on compiler, so we check a range around the call site __LINE__ * The line number can also be ? on old compilers */ char fileline[4096]; - bool found_file_line = false; + uint32_t found_file_line = 0; for (int lineno = line - 1; lineno <= line + 1; ++lineno) { snprintf(fileline, sizeof(fileline), "%s:%d", file, lineno); - found_file_line |= strstr(buffer, fileline) != NULL; + found_file_line |= strstr((const char *)buffer->buffer, fileline) != NULL; + if (found_file_line) { + break; + } } if (!found_file_line) { snprintf(fileline, sizeof(fileline), "%s:?", file); - found_file_line = strstr(buffer, fileline) != NULL; + found_file_line = strstr((const char *)buffer->buffer, fileline) != NULL; } ASSERT_TRUE(found_file_line); -# endif # endif /* __APPLE__ */ #endif - aws_mem_release(allocator, buffer); + aws_logger_clean_up(&test_log); return 0; }