From 51a3d4763d2513bca7e76a45965fead8b4ecba48 Mon Sep 17 00:00:00 2001 From: Diogo Netto <61364108+d-netto@users.noreply.github.com> Date: Sat, 4 Nov 2023 10:23:15 -0300 Subject: [PATCH] make pool_live_bytes metric more accurate (#52015) `pool_live_bytes` was previously lazily updated during the GC, meaning it was only accurate right after a GC. Make this metric accurate if gathered after a GC has happened. --- src/gc.c | 15 ++++++++++++--- src/julia_threads.h | 1 + 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/src/gc.c b/src/gc.c index 982c2e3e736332..5b833d911d21d7 100644 --- a/src/gc.c +++ b/src/gc.c @@ -753,7 +753,6 @@ int current_sweep_full = 0; int under_pressure = 0; // Full collection heuristics -static int64_t pool_live_bytes = 0; static int64_t live_bytes = 0; static int64_t promoted_bytes = 0; static int64_t last_full_live = 0; // live_bytes after last full collection @@ -1306,6 +1305,8 @@ STATIC_INLINE jl_value_t *jl_gc_pool_alloc_inner(jl_ptls_t ptls, int pool_offset maybe_collect(ptls); jl_atomic_store_relaxed(&ptls->gc_num.allocd, jl_atomic_load_relaxed(&ptls->gc_num.allocd) + osize); + jl_atomic_store_relaxed(&ptls->gc_num.pool_live_bytes, + jl_atomic_load_relaxed(&ptls->gc_num.pool_live_bytes) + osize); jl_atomic_store_relaxed(&ptls->gc_num.poolalloc, jl_atomic_load_relaxed(&ptls->gc_num.poolalloc) + 1); // first try to use the freelist @@ -1492,7 +1493,8 @@ static void gc_sweep_page(jl_gc_pool_t *p, jl_gc_page_stack_t *allocd, jl_gc_pag } } gc_time_count_page(freedall, pg_skpd); - jl_atomic_fetch_add((_Atomic(int64_t) *)&pool_live_bytes, GC_PAGE_SZ - GC_PAGE_OFFSET - nfree * osize); + jl_ptls_t ptls = gc_all_tls_states[pg->thread_n]; + jl_atomic_fetch_add(&ptls->gc_num.pool_live_bytes, GC_PAGE_SZ - GC_PAGE_OFFSET - nfree * osize); jl_atomic_fetch_add((_Atomic(int64_t) *)&gc_num.freed, (nfree - old_nfree) * osize); } @@ -1614,6 +1616,7 @@ static void gc_sweep_pool(void) } continue; } + jl_atomic_store_relaxed(&ptls2->gc_num.pool_live_bytes, 0); for (int i = 0; i < JL_GC_N_POOLS; i++) { jl_gc_pool_t *p = &ptls2->heap.norm_pools[i]; jl_taggedvalue_t *last = p->freelist; @@ -3265,6 +3268,13 @@ JL_DLLEXPORT int64_t jl_gc_sync_total_bytes(int64_t offset) JL_NOTSAFEPOINT JL_DLLEXPORT int64_t jl_gc_pool_live_bytes(void) { + int64_t pool_live_bytes = 0; + for (int i = 0; i < gc_n_threads; i++) { + jl_ptls_t ptls2 = gc_all_tls_states[i]; + if (ptls2 != NULL) { + pool_live_bytes += jl_atomic_load_relaxed(&ptls2->gc_num.pool_live_bytes); + } + } return pool_live_bytes; } @@ -3470,7 +3480,6 @@ static int _jl_gc_collect(jl_ptls_t ptls, jl_gc_collection_t collection) promoted_bytes = 0; } scanned_bytes = 0; - pool_live_bytes = 0; // 6. start sweeping uint64_t start_sweep_time = jl_hrtime(); JL_PROBE_GC_SWEEP_BEGIN(sweep_full); diff --git a/src/julia_threads.h b/src/julia_threads.h index 025c5707e5507b..98aa4497abfd21 100644 --- a/src/julia_threads.h +++ b/src/julia_threads.h @@ -130,6 +130,7 @@ typedef struct { typedef struct { _Atomic(int64_t) allocd; + _Atomic(int64_t) pool_live_bytes; _Atomic(int64_t) freed; _Atomic(uint64_t) malloc; _Atomic(uint64_t) realloc;