From d6e03c9b4dd772edc35e52dd9043b8f8283c1110 Mon Sep 17 00:00:00 2001 From: Alexandre Fonseca Date: Fri, 15 Dec 2023 18:09:27 +0000 Subject: [PATCH 1/5] [PROF-8667] Heap Profiling - Part 5 - Size --- .../heap_recorder.c | 17 +++++--- .../heap_recorder.h | 3 ++ .../ruby_helpers.c | 36 +++++++++++++++++ .../ruby_helpers.h | 5 +++ .../stack_recorder.c | 14 +++++-- .../cpu_and_wall_time_worker_spec.rb | 2 + spec/datadog/profiling/stack_recorder_spec.rb | 40 ++++++++++++++++--- 7 files changed, 101 insertions(+), 16 deletions(-) diff --git a/ext/ddtrace_profiling_native_extension/heap_recorder.c b/ext/ddtrace_profiling_native_extension/heap_recorder.c index 608d16b070e..c6a28570646 100644 --- a/ext/ddtrace_profiling_native_extension/heap_recorder.c +++ b/ext/ddtrace_profiling_native_extension/heap_recorder.c @@ -126,7 +126,7 @@ static heap_record* get_or_create_heap_record(heap_recorder*, ddog_prof_Slice_Lo static void cleanup_heap_record_if_unused(heap_recorder*, heap_record*); static int st_heap_record_entry_free(st_data_t, st_data_t, st_data_t); static int st_object_record_entry_free(st_data_t, st_data_t, st_data_t); -static int st_object_record_entry_free_if_invalid(st_data_t, st_data_t, st_data_t); +static int st_object_record_update(st_data_t, st_data_t, st_data_t); static int st_object_records_iterate(st_data_t, st_data_t, st_data_t); static int st_object_records_debug(st_data_t key, st_data_t value, st_data_t extra); static int update_object_record_entry(st_data_t*, st_data_t*, st_data_t, int); @@ -260,7 +260,7 @@ void heap_recorder_prepare_iteration(heap_recorder *heap_recorder) { rb_raise(rb_eRuntimeError, "New heap recorder iteration prepared without the previous one having been finished."); } - st_foreach(heap_recorder->object_records, st_object_record_entry_free_if_invalid, (st_data_t) heap_recorder); + st_foreach(heap_recorder->object_records, st_object_record_update, (st_data_t) heap_recorder); heap_recorder->object_records_snapshot = st_copy(heap_recorder->object_records); if (heap_recorder->object_records_snapshot == NULL) { @@ -361,13 +361,15 @@ static int st_object_record_entry_free(DDTRACE_UNUSED st_data_t key, st_data_t v return ST_DELETE; } -static int st_object_record_entry_free_if_invalid(st_data_t key, st_data_t value, st_data_t extra_arg) { +static int st_object_record_update(st_data_t key, st_data_t value, st_data_t extra_arg) { long obj_id = (long) key; object_record *record = (object_record*) value; heap_recorder *recorder = (heap_recorder*) extra_arg; - if (!ruby_ref_from_id(LONG2NUM(obj_id), NULL)) { - // Id no longer associated with a valid ref. Need to clean things up! + VALUE ref; + + if (!ruby_ref_from_id(LONG2NUM(obj_id), &ref)) { + // Id no longer associated with a valid ref. Need to delete this object record! // Starting with the associated heap record. There will now be one less tracked object pointing to it heap_record *heap_record = record->heap_record; @@ -380,6 +382,9 @@ static int st_object_record_entry_free_if_invalid(st_data_t key, st_data_t value return ST_DELETE; } + // If we got this far, entry is still valid so lets update its size + record->object_data.size = ruby_obj_memsize_of(ref); + return ST_CONTINUE; } @@ -418,7 +423,7 @@ static int st_object_records_debug(DDTRACE_UNUSED st_data_t key, st_data_t value object_record *record = (object_record*) value; heap_frame top_frame = record->heap_record->stack->frames[0]; - rb_str_catf(debug_str, "obj_id=%ld weight=%d location=%s:%d alloc_gen=%zu ", record->obj_id, record->object_data.weight, top_frame.filename, (int) top_frame.line, record->object_data.alloc_gen); + rb_str_catf(debug_str, "obj_id=%ld weight=%d size=%zu location=%s:%d alloc_gen=%zu ", record->obj_id, record->object_data.weight, record->object_data.size, top_frame.filename, (int) top_frame.line, record->object_data.alloc_gen); const char *class = record->object_data.class; if (class != NULL) { diff --git a/ext/ddtrace_profiling_native_extension/heap_recorder.h b/ext/ddtrace_profiling_native_extension/heap_recorder.h index 768b24522fd..787c668e2dd 100644 --- a/ext/ddtrace_profiling_native_extension/heap_recorder.h +++ b/ext/ddtrace_profiling_native_extension/heap_recorder.h @@ -27,6 +27,9 @@ typedef struct live_object_data { // could be seen as being representative of 50 objects. unsigned int weight; + // Size of this object on last flush/update. + size_t size; + // The class of the object that we're tracking. // NOTE: This is optional and will be set to NULL if not set. char* class; diff --git a/ext/ddtrace_profiling_native_extension/ruby_helpers.c b/ext/ddtrace_profiling_native_extension/ruby_helpers.c index 99dc6097d62..883297472c1 100644 --- a/ext/ddtrace_profiling_native_extension/ruby_helpers.c +++ b/ext/ddtrace_profiling_native_extension/ruby_helpers.c @@ -166,3 +166,39 @@ bool ruby_ref_from_id(VALUE obj_id, VALUE *value) { return true; } + +// Not part of public headers but is externed from Ruby +size_t rb_obj_memsize_of(VALUE obj); + +// Wrapper around rb_obj_memsize_of to avoid hitting crashing paths. +size_t ruby_obj_memsize_of(VALUE obj) { + switch (BUILTIN_TYPE(obj)) { + case T_OBJECT: + case T_MODULE: + case T_CLASS: + case T_ICLASS: + case T_STRING: + case T_ARRAY: + case T_HASH: + case T_REGEXP: + case T_DATA: + case T_MATCH: + case T_FILE: + case T_RATIONAL: + case T_COMPLEX: + case T_IMEMO: + case T_FLOAT: + case T_SYMBOL: + case T_BIGNUM: + // case T_NODE: -> Throws exception in rb_obj_memsize_of + case T_STRUCT: + case T_ZOMBIE: + #ifndef NO_T_MOVED + case T_MOVED: + #endif + return rb_obj_memsize_of(obj); + default: + // Unsupported, return 0 instead of erroring like rb_obj_memsize_of likes doing + return 0; + } +} diff --git a/ext/ddtrace_profiling_native_extension/ruby_helpers.h b/ext/ddtrace_profiling_native_extension/ruby_helpers.h index d0bf3cfcb0e..1ca01652b4a 100644 --- a/ext/ddtrace_profiling_native_extension/ruby_helpers.h +++ b/ext/ddtrace_profiling_native_extension/ruby_helpers.h @@ -106,3 +106,8 @@ char* ruby_strndup(const char *str, size_t size); // writes the ref to the value pointer parameter if !NULL. False if id doesn't // reference a valid object (in which case value is not changed). bool ruby_ref_from_id(size_t id, VALUE *value); + +// Native wrapper to get the approximate/estimated current size of the passed +// object. +size_t ruby_obj_memsize_of(VALUE obj); + diff --git a/ext/ddtrace_profiling_native_extension/stack_recorder.c b/ext/ddtrace_profiling_native_extension/stack_recorder.c index 041cf4fc880..60b0fb6a20c 100644 --- a/ext/ddtrace_profiling_native_extension/stack_recorder.c +++ b/ext/ddtrace_profiling_native_extension/stack_recorder.c @@ -153,17 +153,19 @@ static VALUE error_symbol = Qnil; // :error in Ruby #define ALLOC_SAMPLES_VALUE_ID 3 #define HEAP_SAMPLES_VALUE {.type_ = VALUE_STRING("heap-live-samples"), .unit = VALUE_STRING("count")} #define HEAP_SAMPLES_VALUE_ID 4 +#define HEAP_SIZE_VALUE {.type_ = VALUE_STRING("heap-live-size"), .unit = VALUE_STRING("bytes")} +#define HEAP_SIZE_VALUE_ID 5 #define TIMELINE_VALUE {.type_ = VALUE_STRING("timeline"), .unit = VALUE_STRING("nanoseconds")} -#define TIMELINE_VALUE_ID 5 +#define TIMELINE_VALUE_ID 6 static const ddog_prof_ValueType all_value_types[] = - {CPU_TIME_VALUE, CPU_SAMPLES_VALUE, WALL_TIME_VALUE, ALLOC_SAMPLES_VALUE, HEAP_SAMPLES_VALUE, TIMELINE_VALUE}; + {CPU_TIME_VALUE, CPU_SAMPLES_VALUE, WALL_TIME_VALUE, ALLOC_SAMPLES_VALUE, HEAP_SAMPLES_VALUE, HEAP_SIZE_VALUE, TIMELINE_VALUE}; // This array MUST be kept in sync with all_value_types above and is intended to act as a "hashmap" between VALUE_ID and the position it // occupies on the all_value_types array. // E.g. all_value_types_positions[CPU_TIME_VALUE_ID] => 0, means that CPU_TIME_VALUE was declared at position 0 of all_value_types. static const uint8_t all_value_types_positions[] = - {CPU_TIME_VALUE_ID, CPU_SAMPLES_VALUE_ID, WALL_TIME_VALUE_ID, ALLOC_SAMPLES_VALUE_ID, HEAP_SAMPLES_VALUE_ID, TIMELINE_VALUE_ID}; + {CPU_TIME_VALUE_ID, CPU_SAMPLES_VALUE_ID, WALL_TIME_VALUE_ID, ALLOC_SAMPLES_VALUE_ID, HEAP_SAMPLES_VALUE_ID, HEAP_SIZE_VALUE_ID, TIMELINE_VALUE_ID}; #define ALL_VALUE_TYPES_COUNT (sizeof(all_value_types) / sizeof(ddog_prof_ValueType)) @@ -380,7 +382,7 @@ static VALUE _native_initialize( uint8_t requested_values_count = ALL_VALUE_TYPES_COUNT - (cpu_time_enabled == Qtrue ? 0 : 1) - (alloc_samples_enabled == Qtrue? 0 : 1) - - (heap_samples_enabled == Qtrue ? 0 : 1) - + (heap_samples_enabled == Qtrue ? 0 : 2) - (timeline_enabled == Qtrue ? 0 : 1); if (requested_values_count == ALL_VALUE_TYPES_COUNT) return Qtrue; // Nothing to do, this is the default @@ -420,8 +422,11 @@ static VALUE _native_initialize( if (heap_samples_enabled == Qtrue) { enabled_value_types[next_enabled_pos] = (ddog_prof_ValueType) HEAP_SAMPLES_VALUE; state->position_for[HEAP_SAMPLES_VALUE_ID] = next_enabled_pos++; + enabled_value_types[next_enabled_pos] = (ddog_prof_ValueType) HEAP_SIZE_VALUE; + state->position_for[HEAP_SIZE_VALUE_ID] = next_enabled_pos++; } else { state->position_for[HEAP_SAMPLES_VALUE_ID] = next_disabled_pos++; + state->position_for[HEAP_SIZE_VALUE_ID] = next_disabled_pos++; // Turns out heap sampling is disabled but we initialized everything in _native_new // assuming all samples were enabled. We need to deinitialize the heap recorder. @@ -599,6 +604,7 @@ static bool add_heap_sample_to_active_profile_without_gvl(heap_recorder_iteratio uint8_t *position_for = context->state->position_for; metric_values[position_for[HEAP_SAMPLES_VALUE_ID]] = object_data->weight; + metric_values[position_for[HEAP_SIZE_VALUE_ID]] = object_data->size * object_data->weight; ddog_prof_Label labels[2]; size_t label_offset = 0; diff --git a/spec/datadog/profiling/collectors/cpu_and_wall_time_worker_spec.rb b/spec/datadog/profiling/collectors/cpu_and_wall_time_worker_spec.rb index b4e18d9fa98..3e303a97d5b 100644 --- a/spec/datadog/profiling/collectors/cpu_and_wall_time_worker_spec.rb +++ b/spec/datadog/profiling/collectors/cpu_and_wall_time_worker_spec.rb @@ -594,6 +594,8 @@ .find(&test_struct_heap_sample) expect(relevant_sample.values[:'heap-live-samples']).to eq test_num_allocated_object + # 40 is the size of a basic object and we have test_num_allocated_object of them + expect(relevant_sample.values[:'heap-live-size']).to eq test_num_allocated_object * 40 end end diff --git a/spec/datadog/profiling/stack_recorder_spec.rb b/spec/datadog/profiling/stack_recorder_spec.rb index 0a2289ae563..ea828a6cc0c 100644 --- a/spec/datadog/profiling/stack_recorder_spec.rb +++ b/spec/datadog/profiling/stack_recorder_spec.rb @@ -132,12 +132,13 @@ def slot_two_mutex_locked? 'wall-time' => 'nanoseconds', 'alloc-samples' => 'count', 'heap-live-samples' => 'count', + 'heap-live-size' => 'bytes', 'timeline' => 'nanoseconds', } end - def profile_types_without(type) - all_profile_types.dup.tap { |it| it.delete(type) { raise 'Missing key' } } + def profile_types_without(*types) + all_profile_types.dup.tap { |it| it.delete_if { |k| types.include?(k) } } end context 'when all profile types are enabled' do @@ -166,7 +167,7 @@ def profile_types_without(type) let(:heap_samples_enabled) { false } it 'returns a pprof without the heap-live-samples type' do - expect(sample_types_from(decoded_profile)).to eq(profile_types_without('heap-live-samples')) + expect(sample_types_from(decoded_profile)).to eq(profile_types_without('heap-live-samples', 'heap-live-size')) end end @@ -348,8 +349,8 @@ def sample_types_from(decoded_profile) let(:labels) { { 'label_a' => 'value_a', 'label_b' => 'value_b', 'state' => 'unknown' }.to_a } let(:a_string) { 'a beautiful string' } - let(:an_array) { (1..10).to_a } - let(:a_hash) { { 'a' => 1, 'b' => '2', 'c' => true } } + let(:an_array) { (1..100).to_a.compact } + let(:a_hash) { { 'a' => 1, 'b' => '2', 'c' => true, 'd' => Object.new } } let(:samples) { samples_from_pprof(encoded_pprof) } @@ -409,6 +410,7 @@ def sample_allocation(obj) # We sample from 2 distinct locations expect(samples.size).to eq(2) expect(samples.select { |s| s.values.key?('heap-live-samples') }).to be_empty + expect(samples.select { |s| s.values.key?('heap-live-size') }).to be_empty end end @@ -431,6 +433,32 @@ def sample_allocation(obj) expect(heap_samples.map(&:labels)).to all(match(hash_including(:'gc gen age' => be_a(Integer).and(be >= 0)))) end + it 'include accurate object sizes' do + string_sample = heap_samples.find { |s| s.labels[:'allocation class'] == 'String' } + expect(string_sample.values[:'heap-live-size']).to be_between( + # 18 UTF-8 characters at minimum. + (18 * 2) * sample_rate, + # Add some extra padding (per char and object-wide) for extra data. + (18 * 4 + 40) * sample_rate + ) + + array_sample = heap_samples.find { |s| s.labels[:'allocation class'] == 'Array' } + expect(array_sample.values[:'heap-live-size']).to be_between( + # Basic object + 100 FIXNUMs (32 bits) + (40 + 100 * 4) * sample_rate, + # Basic object + 128 FIXNUMs (64 bits and round to nearest power of 2) and eventual extra data + (40 + 128 * 8 + 10) * sample_rate, + ) + + hash_sample = heap_samples.find { |s| s.labels[:'allocation class'] == 'Hash' } + expect(hash_sample.values[:'heap-live-size']).to be_between( + # Basic object + 4 table entries + no bins + (40 + 4 * 16) * sample_rate, + # Add extra padding to hash itself as well as each entry and 8 bins + (80 + 4 * 32 + 8 * 16) * sample_rate, + ) + end + it 'include accurate object ages' do string_sample = heap_samples.find { |s| s.labels[:'allocation class'] == 'String' } string_age = string_sample.labels[:'gc gen age'] @@ -469,7 +497,7 @@ def sample_allocation(obj) # We use the same metric_values in all sample calls in before. So we'd expect # the summed values to match `@num_allocations * metric_values[profile-type]` # for each profile-type there in. - expected_summed_values = { :'heap-live-samples' => 0 } + expected_summed_values = { :'heap-live-samples' => 0, :'heap-live-size' => 0, } metric_values.each_pair do |k, v| expected_summed_values[k.to_sym] = v * @num_allocations end From c6c480513806900a144c01ec83517969d714a517 Mon Sep 17 00:00:00 2001 From: Alexandre Fonseca Date: Thu, 21 Dec 2023 16:04:08 +0000 Subject: [PATCH 2/5] [PROF-8667] Make size calculations optional --- benchmarks/profiler_sample_loop_v2.rb | 1 + .../heap_recorder.c | 28 +++++++++++++++---- .../heap_recorder.h | 6 +++- .../stack_recorder.c | 24 ++++++++++++---- lib/datadog/profiling/component.rb | 1 + lib/datadog/profiling/stack_recorder.rb | 6 +++- .../cpu_and_wall_time_worker_spec.rb | 4 ++- spec/datadog/profiling/spec_helper.rb | 3 +- spec/datadog/profiling/stack_recorder_spec.rb | 22 ++++++++++++--- 9 files changed, 76 insertions(+), 19 deletions(-) diff --git a/benchmarks/profiler_sample_loop_v2.rb b/benchmarks/profiler_sample_loop_v2.rb index 43a9e85e148..bfd66c7d596 100644 --- a/benchmarks/profiler_sample_loop_v2.rb +++ b/benchmarks/profiler_sample_loop_v2.rb @@ -20,6 +20,7 @@ def create_profiler cpu_time_enabled: true, alloc_samples_enabled: false, heap_samples_enabled: false, + heap_size_enabled: false, timeline_enabled: false, ) @collector = Datadog::Profiling::Collectors::ThreadContext.new( diff --git a/ext/ddtrace_profiling_native_extension/heap_recorder.c b/ext/ddtrace_profiling_native_extension/heap_recorder.c index c6a28570646..6ce6313d373 100644 --- a/ext/ddtrace_profiling_native_extension/heap_recorder.c +++ b/ext/ddtrace_profiling_native_extension/heap_recorder.c @@ -250,7 +250,15 @@ void end_heap_allocation_recording(struct heap_recorder *heap_recorder, ddog_pro commit_allocation(heap_recorder, heap_record, partial_object_record); } -void heap_recorder_prepare_iteration(heap_recorder *heap_recorder) { +typedef struct { + // A reference to the heap recorder so we can access extra stuff to cleanup unused records. + heap_recorder *heap_recorder; + + // Whether we should update object sizes as part of the update iteration or not. + bool update_sizes; +} prepare_iteration_context; + +void heap_recorder_prepare_iteration(heap_recorder *heap_recorder, bool update_sizes) { if (heap_recorder == NULL) { return; } @@ -260,7 +268,11 @@ void heap_recorder_prepare_iteration(heap_recorder *heap_recorder) { rb_raise(rb_eRuntimeError, "New heap recorder iteration prepared without the previous one having been finished."); } - st_foreach(heap_recorder->object_records, st_object_record_update, (st_data_t) heap_recorder); + prepare_iteration_context context = (prepare_iteration_context) { + .heap_recorder = heap_recorder, + .update_sizes = update_sizes, + }; + st_foreach(heap_recorder->object_records, st_object_record_update, (st_data_t) &context); heap_recorder->object_records_snapshot = st_copy(heap_recorder->object_records); if (heap_recorder->object_records_snapshot == NULL) { @@ -364,7 +376,7 @@ static int st_object_record_entry_free(DDTRACE_UNUSED st_data_t key, st_data_t v static int st_object_record_update(st_data_t key, st_data_t value, st_data_t extra_arg) { long obj_id = (long) key; object_record *record = (object_record*) value; - heap_recorder *recorder = (heap_recorder*) extra_arg; + prepare_iteration_context *context = (prepare_iteration_context*) extra_arg; VALUE ref; @@ -376,14 +388,18 @@ static int st_object_record_update(st_data_t key, st_data_t value, st_data_t ext heap_record->num_tracked_objects--; // One less object using this heap record, it may have become unused... - cleanup_heap_record_if_unused(recorder, heap_record); + cleanup_heap_record_if_unused(context->heap_recorder, heap_record); object_record_free(record); return ST_DELETE; } - // If we got this far, entry is still valid so lets update its size - record->object_data.size = ruby_obj_memsize_of(ref); + // If we got this far, entry is still valid + + if (context->update_sizes) { + // if we were asked to update sizes, do so... + record->object_data.size = ruby_obj_memsize_of(ref); + } return ST_CONTINUE; } diff --git a/ext/ddtrace_profiling_native_extension/heap_recorder.h b/ext/ddtrace_profiling_native_extension/heap_recorder.h index 787c668e2dd..abff3bc6bbe 100644 --- a/ext/ddtrace_profiling_native_extension/heap_recorder.h +++ b/ext/ddtrace_profiling_native_extension/heap_recorder.h @@ -82,8 +82,12 @@ void end_heap_allocation_recording(heap_recorder *heap_recorder, ddog_prof_Slice // Update the heap recorder to reflect the latest state of the VM and prepare internal structures // for efficient iteration. // +// @param update_sizes +// True if we should re-calculate live object sizes ahead of the next iteration. If false, +// the previous sizes will be used (or 0 if we never updated them before). +// // WARN: This must be called strictly before iteration. Failing to do so will result in exceptions. -void heap_recorder_prepare_iteration(heap_recorder *heap_recorder); +void heap_recorder_prepare_iteration(heap_recorder *heap_recorder, bool update_sizes); // Optimize the heap recorder by cleaning up any data that might have been prepared specifically // for the purpose of iterating over the heap recorder data. diff --git a/ext/ddtrace_profiling_native_extension/stack_recorder.c b/ext/ddtrace_profiling_native_extension/stack_recorder.c index 60b0fb6a20c..f1a44715d72 100644 --- a/ext/ddtrace_profiling_native_extension/stack_recorder.c +++ b/ext/ddtrace_profiling_native_extension/stack_recorder.c @@ -184,6 +184,7 @@ struct stack_recorder_state { uint8_t position_for[ALL_VALUE_TYPES_COUNT]; uint8_t enabled_values_count; + bool heap_size_enabled; }; // Used to return a pair of values from sampler_lock_active_profile() @@ -216,6 +217,7 @@ static VALUE _native_initialize( VALUE cpu_time_enabled, VALUE alloc_samples_enabled, VALUE heap_samples_enabled, + VALUE heap_sizes_enabled, VALUE timeline_enabled ); static VALUE _native_serialize(VALUE self, VALUE recorder_instance); @@ -255,7 +257,7 @@ void stack_recorder_init(VALUE profiling_module) { // https://bugs.ruby-lang.org/issues/18007 for a discussion around this. rb_define_alloc_func(stack_recorder_class, _native_new); - rb_define_singleton_method(stack_recorder_class, "_native_initialize", _native_initialize, 5); + rb_define_singleton_method(stack_recorder_class, "_native_initialize", _native_initialize, 6); rb_define_singleton_method(stack_recorder_class, "_native_serialize", _native_serialize, 1); rb_define_singleton_method(stack_recorder_class, "_native_reset_after_fork", _native_reset_after_fork, 1); rb_define_singleton_method(testing_module, "_native_active_slot", _native_active_slot, 1); @@ -309,6 +311,7 @@ static VALUE _native_new(VALUE klass) { // heap samples, we will free and reset heap_recorder to NULL, effectively disabling all behaviour specific // to heap profiling (all calls to heap_recorder_* with a NULL heap recorder are noops). state->heap_recorder = heap_recorder_new(); + state->heap_size_enabled = true; // Note: Don't raise exceptions after this point, since it'll lead to libdatadog memory leaking! @@ -369,11 +372,13 @@ static VALUE _native_initialize( VALUE cpu_time_enabled, VALUE alloc_samples_enabled, VALUE heap_samples_enabled, + VALUE heap_size_enabled, VALUE timeline_enabled ) { ENFORCE_BOOLEAN(cpu_time_enabled); ENFORCE_BOOLEAN(alloc_samples_enabled); ENFORCE_BOOLEAN(heap_samples_enabled); + ENFORCE_BOOLEAN(heap_size_enabled); ENFORCE_BOOLEAN(timeline_enabled); struct stack_recorder_state *state; @@ -382,7 +387,8 @@ static VALUE _native_initialize( uint8_t requested_values_count = ALL_VALUE_TYPES_COUNT - (cpu_time_enabled == Qtrue ? 0 : 1) - (alloc_samples_enabled == Qtrue? 0 : 1) - - (heap_samples_enabled == Qtrue ? 0 : 2) - + (heap_samples_enabled == Qtrue ? 0 : 1) - + (heap_size_enabled == Qtrue ? 0 : 1) - (timeline_enabled == Qtrue ? 0 : 1); if (requested_values_count == ALL_VALUE_TYPES_COUNT) return Qtrue; // Nothing to do, this is the default @@ -422,12 +428,20 @@ static VALUE _native_initialize( if (heap_samples_enabled == Qtrue) { enabled_value_types[next_enabled_pos] = (ddog_prof_ValueType) HEAP_SAMPLES_VALUE; state->position_for[HEAP_SAMPLES_VALUE_ID] = next_enabled_pos++; + } else { + state->position_for[HEAP_SAMPLES_VALUE_ID] = next_disabled_pos++; + } + + if (heap_size_enabled == Qtrue) { enabled_value_types[next_enabled_pos] = (ddog_prof_ValueType) HEAP_SIZE_VALUE; state->position_for[HEAP_SIZE_VALUE_ID] = next_enabled_pos++; + state->heap_size_enabled = true; } else { - state->position_for[HEAP_SAMPLES_VALUE_ID] = next_disabled_pos++; state->position_for[HEAP_SIZE_VALUE_ID] = next_disabled_pos++; + state->heap_size_enabled = false; + } + if (heap_samples_enabled == Qfalse && heap_size_enabled == Qfalse) { // Turns out heap sampling is disabled but we initialized everything in _native_new // assuming all samples were enabled. We need to deinitialize the heap recorder. heap_recorder_free(state->heap_recorder); @@ -460,7 +474,7 @@ static VALUE _native_serialize(DDTRACE_UNUSED VALUE _self, VALUE recorder_instan // Prepare the iteration on heap recorder we'll be doing outside the GVL. The preparation needs to // happen while holding on to the GVL. - heap_recorder_prepare_iteration(state->heap_recorder); + heap_recorder_prepare_iteration(state->heap_recorder, state->heap_size_enabled); // We'll release the Global VM Lock while we're calling serialize, so that the Ruby VM can continue to work while this // is pending @@ -868,7 +882,7 @@ static VALUE _native_start_fake_slow_heap_serialization(DDTRACE_UNUSED VALUE _se struct stack_recorder_state *state; TypedData_Get_Struct(recorder_instance, struct stack_recorder_state, &stack_recorder_typed_data, state); - heap_recorder_prepare_iteration(state->heap_recorder); + heap_recorder_prepare_iteration(state->heap_recorder, false); return Qnil; } diff --git a/lib/datadog/profiling/component.rb b/lib/datadog/profiling/component.rb index 5c59ea83b71..d60e4247aaa 100644 --- a/lib/datadog/profiling/component.rb +++ b/lib/datadog/profiling/component.rb @@ -85,6 +85,7 @@ def self.build_profiler_component(settings:, agent_settings:, optional_tracer:) cpu_time_enabled: RUBY_PLATFORM.include?('linux'), # Only supported on Linux currently alloc_samples_enabled: allocation_profiling_enabled, heap_samples_enabled: heap_profiling_enabled, + heap_size_enabled: heap_profiling_enabled, timeline_enabled: timeline_enabled, ) end diff --git a/lib/datadog/profiling/stack_recorder.rb b/lib/datadog/profiling/stack_recorder.rb index 6337d3ec588..faa917c46d0 100644 --- a/lib/datadog/profiling/stack_recorder.rb +++ b/lib/datadog/profiling/stack_recorder.rb @@ -4,7 +4,10 @@ module Profiling # Note that `record_sample` is only accessible from native code. # Methods prefixed with _native_ are implemented in `stack_recorder.c` class StackRecorder - def initialize(cpu_time_enabled:, alloc_samples_enabled:, heap_samples_enabled:, timeline_enabled:) + def initialize( + cpu_time_enabled:, alloc_samples_enabled:, heap_samples_enabled:, heap_size_enabled:, + timeline_enabled: + ) # This mutex works in addition to the fancy C-level mutexes we have in the native side (see the docs there). # It prevents multiple Ruby threads calling serialize at the same time -- something like # `10.times { Thread.new { stack_recorder.serialize } }`. @@ -18,6 +21,7 @@ def initialize(cpu_time_enabled:, alloc_samples_enabled:, heap_samples_enabled:, cpu_time_enabled, alloc_samples_enabled, heap_samples_enabled, + heap_size_enabled, timeline_enabled, ) end diff --git a/spec/datadog/profiling/collectors/cpu_and_wall_time_worker_spec.rb b/spec/datadog/profiling/collectors/cpu_and_wall_time_worker_spec.rb index 3e303a97d5b..2abc7ecd837 100644 --- a/spec/datadog/profiling/collectors/cpu_and_wall_time_worker_spec.rb +++ b/spec/datadog/profiling/collectors/cpu_and_wall_time_worker_spec.rb @@ -12,7 +12,9 @@ let(:allocation_sample_every) { 50 } let(:allocation_profiling_enabled) { false } let(:heap_profiling_enabled) { false } - let(:recorder) { build_stack_recorder(heap_samples_enabled: heap_profiling_enabled) } + let(:recorder) do + build_stack_recorder(heap_samples_enabled: heap_profiling_enabled, heap_size_enabled: heap_profiling_enabled) + end let(:no_signals_workaround_enabled) { false } let(:timeline_enabled) { false } let(:options) { {} } diff --git a/spec/datadog/profiling/spec_helper.rb b/spec/datadog/profiling/spec_helper.rb index d35e4fa088d..a663055406c 100644 --- a/spec/datadog/profiling/spec_helper.rb +++ b/spec/datadog/profiling/spec_helper.rb @@ -81,11 +81,12 @@ def samples_for_thread(samples, thread) # We disable heap_sample collection by default in tests since it requires some extra mocking/ # setup for it to properly work. - def build_stack_recorder(heap_samples_enabled: false, timeline_enabled: false) + def build_stack_recorder(heap_samples_enabled: false, heap_size_enabled: false, timeline_enabled: false) Datadog::Profiling::StackRecorder.new( cpu_time_enabled: true, alloc_samples_enabled: true, heap_samples_enabled: heap_samples_enabled, + heap_size_enabled: heap_size_enabled, timeline_enabled: timeline_enabled, ) end diff --git a/spec/datadog/profiling/stack_recorder_spec.rb b/spec/datadog/profiling/stack_recorder_spec.rb index ea828a6cc0c..c4c314866dd 100644 --- a/spec/datadog/profiling/stack_recorder_spec.rb +++ b/spec/datadog/profiling/stack_recorder_spec.rb @@ -10,6 +10,7 @@ # Disabling these by default since they require some extra setup and produce separate samples. # Enabling this is tested in a particular context below. let(:heap_samples_enabled) { false } + let(:heap_size_enabled) { false } let(:timeline_enabled) { true } subject(:stack_recorder) do @@ -17,6 +18,7 @@ cpu_time_enabled: cpu_time_enabled, alloc_samples_enabled: alloc_samples_enabled, heap_samples_enabled: heap_samples_enabled, + heap_size_enabled: heap_size_enabled, timeline_enabled: timeline_enabled, ) end @@ -124,6 +126,7 @@ def slot_two_mutex_locked? let(:cpu_time_enabled) { true } let(:alloc_samples_enabled) { true } let(:heap_samples_enabled) { true } + let(:heap_size_enabled) { true } let(:timeline_enabled) { true } let(:all_profile_types) do { @@ -137,8 +140,8 @@ def slot_two_mutex_locked? } end - def profile_types_without(*types) - all_profile_types.dup.tap { |it| it.delete_if { |k| types.include?(k) } } + def profile_types_without(type) + all_profile_types.dup.tap { |it| it.delete(type) { raise 'Missing key' } } end context 'when all profile types are enabled' do @@ -167,7 +170,15 @@ def profile_types_without(*types) let(:heap_samples_enabled) { false } it 'returns a pprof without the heap-live-samples type' do - expect(sample_types_from(decoded_profile)).to eq(profile_types_without('heap-live-samples', 'heap-live-size')) + expect(sample_types_from(decoded_profile)).to eq(profile_types_without('heap-live-samples')) + end + end + + context 'when heap-live-size is disabled' do + let(:heap_size_enabled) { false } + + it 'returns a pprof without the heap-live-size type' do + expect(sample_types_from(decoded_profile)).to eq(profile_types_without('heap-live-size')) end end @@ -183,6 +194,7 @@ def profile_types_without(*types) let(:cpu_time_enabled) { false } let(:alloc_samples_enabled) { false } let(:heap_samples_enabled) { false } + let(:heap_size_enabled) { false } let(:timeline_enabled) { false } it 'returns a pprof without the optional types' do @@ -341,7 +353,7 @@ def sample_types_from(decoded_profile) end end - describe 'heap samples' do + describe 'heap samples and sizes' do let(:sample_rate) { 50 } let(:metric_values) do { 'cpu-time' => 101, 'cpu-samples' => 1, 'wall-time' => 789, 'alloc-samples' => sample_rate, 'timeline' => 42 } @@ -405,6 +417,7 @@ def sample_allocation(obj) context 'when disabled' do let(:heap_samples_enabled) { false } + let(:heap_size_enabled) { false } it 'are ommitted from the profile' do # We sample from 2 distinct locations @@ -416,6 +429,7 @@ def sample_allocation(obj) context 'when enabled' do let(:heap_samples_enabled) { true } + let(:heap_size_enabled) { true } let(:heap_samples) do samples.select { |s| s.values[:'heap-live-samples'] > 0 } From bd4993811faf9075ee5cb90ce889c36d56643070 Mon Sep 17 00:00:00 2001 From: Alexandre Fonseca Date: Fri, 22 Dec 2023 12:15:17 +0000 Subject: [PATCH 3/5] [PROF-8667] Support special consts in ruby_obj_memsize_of --- ext/ddtrace_profiling_native_extension/ruby_helpers.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ext/ddtrace_profiling_native_extension/ruby_helpers.c b/ext/ddtrace_profiling_native_extension/ruby_helpers.c index 883297472c1..4144326e899 100644 --- a/ext/ddtrace_profiling_native_extension/ruby_helpers.c +++ b/ext/ddtrace_profiling_native_extension/ruby_helpers.c @@ -172,7 +172,7 @@ size_t rb_obj_memsize_of(VALUE obj); // Wrapper around rb_obj_memsize_of to avoid hitting crashing paths. size_t ruby_obj_memsize_of(VALUE obj) { - switch (BUILTIN_TYPE(obj)) { + switch (rb_type(obj)) { case T_OBJECT: case T_MODULE: case T_CLASS: From 4e2909427fc3265ea159dd0b2bfe1e027171065c Mon Sep 17 00:00:00 2001 From: Alexandre Fonseca Date: Wed, 3 Jan 2024 18:04:23 +0000 Subject: [PATCH 4/5] [PROF-8667] Fix types --- sig/datadog/profiling/stack_recorder.rbs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/sig/datadog/profiling/stack_recorder.rbs b/sig/datadog/profiling/stack_recorder.rbs index f0681354e95..cc4787bad95 100644 --- a/sig/datadog/profiling/stack_recorder.rbs +++ b/sig/datadog/profiling/stack_recorder.rbs @@ -3,13 +3,20 @@ module Datadog class StackRecorder @no_concurrent_synchronize_mutex: ::Thread::Mutex - def initialize: (cpu_time_enabled: bool, alloc_samples_enabled: bool, heap_samples_enabled: bool, timeline_enabled: bool) -> void + def initialize: ( + cpu_time_enabled: bool, + alloc_samples_enabled: bool, + heap_samples_enabled: bool, + heap_size_enabled: bool, + timeline_enabled: bool, + ) -> void def self._native_initialize: ( Datadog::Profiling::StackRecorder recorder_instance, bool cpu_time_enabled, bool alloc_samples_enabled, bool heap_samples_enabled, + bool heap_size_enabled, bool timeline_enabled, ) -> true From 63c286e69ffae9ac86b661f405e8ad666f8fe54a Mon Sep 17 00:00:00 2001 From: Alexandre Fonseca Date: Fri, 5 Jan 2024 12:22:20 +0000 Subject: [PATCH 5/5] [PROF-8667] Address comments --- .../heap_recorder.c | 36 +++++++++---------- .../heap_recorder.h | 22 +++++++++--- .../ruby_helpers.c | 10 +++++- .../stack_recorder.c | 11 +++--- spec/datadog/profiling/stack_recorder_spec.rb | 21 ++--------- 5 files changed, 51 insertions(+), 49 deletions(-) diff --git a/ext/ddtrace_profiling_native_extension/heap_recorder.c b/ext/ddtrace_profiling_native_extension/heap_recorder.c index 6ce6313d373..69fbed81442 100644 --- a/ext/ddtrace_profiling_native_extension/heap_recorder.c +++ b/ext/ddtrace_profiling_native_extension/heap_recorder.c @@ -94,6 +94,10 @@ static object_record* object_record_new(long, heap_record*, live_object_data); static void object_record_free(object_record*); struct heap_recorder { + // Config + // Whether the recorder should try to determine approximate sizes for tracked objects. + bool size_enabled; + // Map[key: heap_record_key*, record: heap_record*] // NOTE: We always use heap_record_key.type == HEAP_STACK for storage but support lookups // via heap_record_key.type == LOCATION_SLICE to allow for allocation-free fast-paths. @@ -149,6 +153,7 @@ heap_recorder* heap_recorder_new(void) { recorder->object_records_snapshot = NULL; recorder->reusable_locations = ruby_xcalloc(MAX_FRAMES_LIMIT, sizeof(ddog_prof_Location)); recorder->partial_object_record = NULL; + recorder->size_enabled = true; return recorder; } @@ -182,6 +187,10 @@ void heap_recorder_free(heap_recorder *heap_recorder) { ruby_xfree(heap_recorder); } +void heap_recorder_set_size_enabled(heap_recorder *heap_recorder, bool size_enabled) { + heap_recorder->size_enabled = size_enabled; +} + // WARN: Assumes this gets called before profiler is reinitialized on the fork void heap_recorder_after_fork(heap_recorder *heap_recorder) { if (heap_recorder == NULL) { @@ -250,15 +259,7 @@ void end_heap_allocation_recording(struct heap_recorder *heap_recorder, ddog_pro commit_allocation(heap_recorder, heap_record, partial_object_record); } -typedef struct { - // A reference to the heap recorder so we can access extra stuff to cleanup unused records. - heap_recorder *heap_recorder; - - // Whether we should update object sizes as part of the update iteration or not. - bool update_sizes; -} prepare_iteration_context; - -void heap_recorder_prepare_iteration(heap_recorder *heap_recorder, bool update_sizes) { +void heap_recorder_prepare_iteration(heap_recorder *heap_recorder) { if (heap_recorder == NULL) { return; } @@ -268,11 +269,7 @@ void heap_recorder_prepare_iteration(heap_recorder *heap_recorder, bool update_s rb_raise(rb_eRuntimeError, "New heap recorder iteration prepared without the previous one having been finished."); } - prepare_iteration_context context = (prepare_iteration_context) { - .heap_recorder = heap_recorder, - .update_sizes = update_sizes, - }; - st_foreach(heap_recorder->object_records, st_object_record_update, (st_data_t) &context); + st_foreach(heap_recorder->object_records, st_object_record_update, (st_data_t) heap_recorder); heap_recorder->object_records_snapshot = st_copy(heap_recorder->object_records); if (heap_recorder->object_records_snapshot == NULL) { @@ -376,7 +373,7 @@ static int st_object_record_entry_free(DDTRACE_UNUSED st_data_t key, st_data_t v static int st_object_record_update(st_data_t key, st_data_t value, st_data_t extra_arg) { long obj_id = (long) key; object_record *record = (object_record*) value; - prepare_iteration_context *context = (prepare_iteration_context*) extra_arg; + heap_recorder *recorder = (heap_recorder*) extra_arg; VALUE ref; @@ -388,7 +385,7 @@ static int st_object_record_update(st_data_t key, st_data_t value, st_data_t ext heap_record->num_tracked_objects--; // One less object using this heap record, it may have become unused... - cleanup_heap_record_if_unused(context->heap_recorder, heap_record); + cleanup_heap_record_if_unused(recorder, heap_record); object_record_free(record); return ST_DELETE; @@ -396,9 +393,12 @@ static int st_object_record_update(st_data_t key, st_data_t value, st_data_t ext // If we got this far, entry is still valid - if (context->update_sizes) { - // if we were asked to update sizes, do so... + if (recorder->size_enabled && !record->object_data.is_frozen) { + // if we were asked to update sizes and this object was not already seen as being frozen, + // update size again. record->object_data.size = ruby_obj_memsize_of(ref); + // Check if it's now frozen so we skip a size update next time + record->object_data.is_frozen = RB_OBJ_FROZEN(ref); } return ST_CONTINUE; diff --git a/ext/ddtrace_profiling_native_extension/heap_recorder.h b/ext/ddtrace_profiling_native_extension/heap_recorder.h index abff3bc6bbe..1c5ae2b3890 100644 --- a/ext/ddtrace_profiling_native_extension/heap_recorder.h +++ b/ext/ddtrace_profiling_native_extension/heap_recorder.h @@ -38,6 +38,11 @@ typedef struct live_object_data { // // This enables us to calculate the age of this object in terms of GC executions. size_t alloc_gen; + + // Whether this object was previously seen as being frozen. If this is the case, + // we'll skip any further size updates since frozen objects are supposed to be + // immutable. + bool is_frozen; } live_object_data; // Data that is made available to iterators of heap recorder data for each live object @@ -53,6 +58,17 @@ heap_recorder* heap_recorder_new(void); // Free a previously initialized heap recorder. void heap_recorder_free(heap_recorder *heap_recorder); +// Sets whether this heap recorder should keep track of sizes or not. +// +// If set to true, the heap recorder will attempt to determine the approximate sizes of +// tracked objects and wield them during iteration. +// If set to false, sizes returned during iteration should not be used/relied on (they +// may be 0 or the last determined size before disabling the tracking of sizes). +// +// NOTE: Default is true, i.e., it will attempt to determine approximate sizes of tracked +// objects. +void heap_recorder_set_size_enabled(heap_recorder *heap_recorder, bool size_enabled); + // Do any cleanup needed after forking. // WARN: Assumes this gets called before profiler is reinitialized on the fork void heap_recorder_after_fork(heap_recorder *heap_recorder); @@ -82,12 +98,8 @@ void end_heap_allocation_recording(heap_recorder *heap_recorder, ddog_prof_Slice // Update the heap recorder to reflect the latest state of the VM and prepare internal structures // for efficient iteration. // -// @param update_sizes -// True if we should re-calculate live object sizes ahead of the next iteration. If false, -// the previous sizes will be used (or 0 if we never updated them before). -// // WARN: This must be called strictly before iteration. Failing to do so will result in exceptions. -void heap_recorder_prepare_iteration(heap_recorder *heap_recorder, bool update_sizes); +void heap_recorder_prepare_iteration(heap_recorder *heap_recorder); // Optimize the heap recorder by cleaning up any data that might have been prepared specifically // for the purpose of iterating over the heap recorder data. diff --git a/ext/ddtrace_profiling_native_extension/ruby_helpers.c b/ext/ddtrace_profiling_native_extension/ruby_helpers.c index 4144326e899..fd1901068b0 100644 --- a/ext/ddtrace_profiling_native_extension/ruby_helpers.c +++ b/ext/ddtrace_profiling_native_extension/ruby_helpers.c @@ -171,6 +171,14 @@ bool ruby_ref_from_id(VALUE obj_id, VALUE *value) { size_t rb_obj_memsize_of(VALUE obj); // Wrapper around rb_obj_memsize_of to avoid hitting crashing paths. +// +// The crashing paths are due to calls to rb_bug so should hopefully +// be situations that can't happen. But given that rb_obj_memsize_of +// isn't fully public (it's externed but not part of public headers) +// there is a possibility that it is just assumed that whoever calls +// it, will do proper checking for those cases. We want to be cautious +// so we'll assume that's the case and will skip over known crashing +// paths in this wrapper. size_t ruby_obj_memsize_of(VALUE obj) { switch (rb_type(obj)) { case T_OBJECT: @@ -190,7 +198,7 @@ size_t ruby_obj_memsize_of(VALUE obj) { case T_FLOAT: case T_SYMBOL: case T_BIGNUM: - // case T_NODE: -> Throws exception in rb_obj_memsize_of + // case T_NODE: -> Crashes the vm in rb_obj_memsize_of case T_STRUCT: case T_ZOMBIE: #ifndef NO_T_MOVED diff --git a/ext/ddtrace_profiling_native_extension/stack_recorder.c b/ext/ddtrace_profiling_native_extension/stack_recorder.c index f1a44715d72..e3df3d76cc8 100644 --- a/ext/ddtrace_profiling_native_extension/stack_recorder.c +++ b/ext/ddtrace_profiling_native_extension/stack_recorder.c @@ -184,7 +184,6 @@ struct stack_recorder_state { uint8_t position_for[ALL_VALUE_TYPES_COUNT]; uint8_t enabled_values_count; - bool heap_size_enabled; }; // Used to return a pair of values from sampler_lock_active_profile() @@ -217,7 +216,7 @@ static VALUE _native_initialize( VALUE cpu_time_enabled, VALUE alloc_samples_enabled, VALUE heap_samples_enabled, - VALUE heap_sizes_enabled, + VALUE heap_size_enabled, VALUE timeline_enabled ); static VALUE _native_serialize(VALUE self, VALUE recorder_instance); @@ -311,7 +310,6 @@ static VALUE _native_new(VALUE klass) { // heap samples, we will free and reset heap_recorder to NULL, effectively disabling all behaviour specific // to heap profiling (all calls to heap_recorder_* with a NULL heap recorder are noops). state->heap_recorder = heap_recorder_new(); - state->heap_size_enabled = true; // Note: Don't raise exceptions after this point, since it'll lead to libdatadog memory leaking! @@ -435,11 +433,10 @@ static VALUE _native_initialize( if (heap_size_enabled == Qtrue) { enabled_value_types[next_enabled_pos] = (ddog_prof_ValueType) HEAP_SIZE_VALUE; state->position_for[HEAP_SIZE_VALUE_ID] = next_enabled_pos++; - state->heap_size_enabled = true; } else { state->position_for[HEAP_SIZE_VALUE_ID] = next_disabled_pos++; - state->heap_size_enabled = false; } + heap_recorder_set_size_enabled(state->heap_recorder, heap_size_enabled); if (heap_samples_enabled == Qfalse && heap_size_enabled == Qfalse) { // Turns out heap sampling is disabled but we initialized everything in _native_new @@ -474,7 +471,7 @@ static VALUE _native_serialize(DDTRACE_UNUSED VALUE _self, VALUE recorder_instan // Prepare the iteration on heap recorder we'll be doing outside the GVL. The preparation needs to // happen while holding on to the GVL. - heap_recorder_prepare_iteration(state->heap_recorder, state->heap_size_enabled); + heap_recorder_prepare_iteration(state->heap_recorder); // We'll release the Global VM Lock while we're calling serialize, so that the Ruby VM can continue to work while this // is pending @@ -882,7 +879,7 @@ static VALUE _native_start_fake_slow_heap_serialization(DDTRACE_UNUSED VALUE _se struct stack_recorder_state *state; TypedData_Get_Struct(recorder_instance, struct stack_recorder_state, &stack_recorder_typed_data, state); - heap_recorder_prepare_iteration(state->heap_recorder, false); + heap_recorder_prepare_iteration(state->heap_recorder); return Qnil; } diff --git a/spec/datadog/profiling/stack_recorder_spec.rb b/spec/datadog/profiling/stack_recorder_spec.rb index c4c314866dd..f330c79d1a8 100644 --- a/spec/datadog/profiling/stack_recorder_spec.rb +++ b/spec/datadog/profiling/stack_recorder_spec.rb @@ -449,28 +449,13 @@ def sample_allocation(obj) it 'include accurate object sizes' do string_sample = heap_samples.find { |s| s.labels[:'allocation class'] == 'String' } - expect(string_sample.values[:'heap-live-size']).to be_between( - # 18 UTF-8 characters at minimum. - (18 * 2) * sample_rate, - # Add some extra padding (per char and object-wide) for extra data. - (18 * 4 + 40) * sample_rate - ) + expect(string_sample.values[:'heap-live-size']).to eq(ObjectSpace.memsize_of(a_string) * sample_rate) array_sample = heap_samples.find { |s| s.labels[:'allocation class'] == 'Array' } - expect(array_sample.values[:'heap-live-size']).to be_between( - # Basic object + 100 FIXNUMs (32 bits) - (40 + 100 * 4) * sample_rate, - # Basic object + 128 FIXNUMs (64 bits and round to nearest power of 2) and eventual extra data - (40 + 128 * 8 + 10) * sample_rate, - ) + expect(array_sample.values[:'heap-live-size']).to eq(ObjectSpace.memsize_of(an_array) * sample_rate) hash_sample = heap_samples.find { |s| s.labels[:'allocation class'] == 'Hash' } - expect(hash_sample.values[:'heap-live-size']).to be_between( - # Basic object + 4 table entries + no bins - (40 + 4 * 16) * sample_rate, - # Add extra padding to hash itself as well as each entry and 8 bins - (80 + 4 * 32 + 8 * 16) * sample_rate, - ) + expect(hash_sample.values[:'heap-live-size']).to eq(ObjectSpace.memsize_of(a_hash) * sample_rate) end it 'include accurate object ages' do