From 129314273beeed5b45ea1914241081999c2ec7f9 Mon Sep 17 00:00:00 2001 From: Minsoo Choo Date: Tue, 9 Apr 2024 18:12:33 -0400 Subject: [PATCH] Update jemalloc to version 5.3.0 Summary: jemalloc 5.3.0 was released in 2022 but has not been imported into FreeBSD base system yet. The new version of jemalloc includes new features and bug fixes which are listed on [[ https://github.com/jemalloc/jemalloc/releases/tag/5.3.0 | release note ]]. Test Plan: ``` cd lib/libc make cd /usr/src/ make buildworld && make installworld ``` then reboot and test several programs and applications Reviewers: #contributor_reviews_src, vangyzen Subscribers: jasone, vangyzen, instructionset_gmail.com, jrtc27, brooks, gbe, emaste, lwhsu, #contributor_reviews_src, imp Tags: #contributor_reviews_src Differential Revision: https://reviews.freebsd.org/D41421 --- contrib/jemalloc/.clang-format | 122 + contrib/jemalloc/ChangeLog | 100 + contrib/jemalloc/FREEBSD-Xlist | 2 + contrib/jemalloc/FREEBSD-diffs | 161 +- contrib/jemalloc/VERSION | 2 +- contrib/jemalloc/doc/jemalloc.3 | 198 +- .../jemalloc/internal/activity_callback.h | 23 + .../include/jemalloc/internal/arena_externs.h | 77 +- .../jemalloc/internal/arena_inlines_a.h | 35 +- .../jemalloc/internal/arena_inlines_b.h | 489 ++- .../include/jemalloc/internal/arena_stats.h | 227 +- .../include/jemalloc/internal/arena_structs.h | 101 + .../jemalloc/internal/arena_structs_a.h | 11 - .../jemalloc/internal/arena_structs_b.h | 232 -- .../include/jemalloc/internal/arena_types.h | 23 +- .../include/jemalloc/internal/atomic.h | 33 +- .../internal/background_thread_externs.h | 7 +- .../internal/background_thread_inlines.h | 14 - .../internal/background_thread_structs.h | 12 + .../jemalloc/include/jemalloc/internal/base.h | 110 + .../include/jemalloc/internal/base_externs.h | 22 - .../include/jemalloc/internal/base_inlines.h | 13 - .../include/jemalloc/internal/base_structs.h | 59 - .../include/jemalloc/internal/base_types.h | 33 - .../jemalloc/include/jemalloc/internal/bin.h | 85 +- .../include/jemalloc/internal/bin_info.h | 50 + .../include/jemalloc/internal/bin_stats.h | 5 +- .../include/jemalloc/internal/bin_types.h | 2 +- .../include/jemalloc/internal/bit_util.h | 457 ++- .../include/jemalloc/internal/bitmap.h | 21 +- .../include/jemalloc/internal/buf_writer.h | 32 + .../include/jemalloc/internal/cache_bin.h | 625 +++- .../include/jemalloc/internal/counter.h | 34 + .../jemalloc/include/jemalloc/internal/ctl.h | 31 +- .../include/jemalloc/internal/decay.h | 186 + .../include/jemalloc/internal/ecache.h | 55 + .../include/jemalloc/internal/edata.h | 698 ++++ .../include/jemalloc/internal/edata_cache.h | 49 + .../include/jemalloc/internal/ehooks.h | 412 +++ .../jemalloc/include/jemalloc/internal/emap.h | 357 ++ .../include/jemalloc/internal/emitter.h | 74 +- .../jemalloc/include/jemalloc/internal/eset.h | 77 + .../include/jemalloc/internal/exp_grow.h | 50 + .../include/jemalloc/internal/extent.h | 137 + .../jemalloc/internal/extent_externs.h | 83 - .../jemalloc/internal/extent_inlines.h | 501 --- .../jemalloc/internal/extent_structs.h | 256 -- .../include/jemalloc/internal/extent_types.h | 23 - .../jemalloc/include/jemalloc/internal/fb.h | 373 ++ .../jemalloc/include/jemalloc/internal/fxp.h | 126 + .../jemalloc/include/jemalloc/internal/hash.h | 63 +- .../jemalloc/include/jemalloc/internal/hpa.h | 182 + .../include/jemalloc/internal/hpa_hooks.h | 17 + .../include/jemalloc/internal/hpa_opts.h | 74 + .../include/jemalloc/internal/hpdata.h | 413 +++ .../include/jemalloc/internal/inspect.h | 40 + .../internal/jemalloc_internal_decls.h | 16 +- .../internal/jemalloc_internal_defs.h | 91 +- .../internal/jemalloc_internal_externs.h | 28 +- .../internal/jemalloc_internal_includes.h | 16 +- .../internal/jemalloc_internal_inlines_a.h | 82 +- .../internal/jemalloc_internal_inlines_b.h | 52 +- .../internal/jemalloc_internal_inlines_c.h | 124 +- .../internal/jemalloc_internal_macros.h | 13 +- .../internal/jemalloc_internal_types.h | 28 +- .../jemalloc/internal/jemalloc_preamble.h | 52 +- .../include/jemalloc/internal/large_externs.h | 26 +- .../include/jemalloc/internal/lockedint.h | 204 ++ .../include/jemalloc/internal/malloc_io.h | 11 +- .../include/jemalloc/internal/mpsc_queue.h | 134 + .../include/jemalloc/internal/mutex.h | 63 +- .../include/jemalloc/internal/mutex_pool.h | 94 - .../include/jemalloc/internal/mutex_prof.h | 13 +- .../include/jemalloc/internal/nstime.h | 43 +- .../jemalloc/include/jemalloc/internal/pa.h | 243 ++ .../jemalloc/include/jemalloc/internal/pac.h | 178 + .../include/jemalloc/internal/pages.h | 31 + .../jemalloc/include/jemalloc/internal/pai.h | 95 + .../jemalloc/include/jemalloc/internal/peak.h | 37 + .../include/jemalloc/internal/peak_event.h | 24 + .../jemalloc/include/jemalloc/internal/ph.h | 817 +++-- .../jemalloc/internal/private_namespace.h | 479 ++- .../jemalloc/include/jemalloc/internal/prng.h | 93 +- .../include/jemalloc/internal/prof_data.h | 37 + .../include/jemalloc/internal/prof_externs.h | 116 +- .../include/jemalloc/internal/prof_hook.h | 21 + .../include/jemalloc/internal/prof_inlines.h | 261 ++ .../jemalloc/internal/prof_inlines_a.h | 85 - .../jemalloc/internal/prof_inlines_b.h | 250 -- .../include/jemalloc/internal/prof_log.h | 22 + .../include/jemalloc/internal/prof_recent.h | 23 + .../include/jemalloc/internal/prof_stats.h | 17 + .../include/jemalloc/internal/prof_structs.h | 47 +- .../include/jemalloc/internal/prof_sys.h | 30 + .../include/jemalloc/internal/prof_types.h | 37 +- .../include/jemalloc/internal/psset.h | 131 + .../jemalloc/internal/public_namespace.h | 4 +- .../jemalloc/include/jemalloc/internal/ql.h | 129 +- .../jemalloc/include/jemalloc/internal/qr.h | 130 +- .../include/jemalloc/internal/quantum.h | 12 +- .../jemalloc/include/jemalloc/internal/rb.h | 920 ++++- .../include/jemalloc/internal/rtree.h | 520 +-- .../include/jemalloc/internal/rtree_tsd.h | 24 +- .../include/jemalloc/internal/safety_check.h | 7 +- .../jemalloc/include/jemalloc/internal/san.h | 191 + .../include/jemalloc/internal/san_bump.h | 51 + .../jemalloc/include/jemalloc/internal/sc.h | 78 +- .../jemalloc/include/jemalloc/internal/sec.h | 120 + .../include/jemalloc/internal/sec_opts.h | 59 + .../include/jemalloc/internal/slab_data.h | 12 + .../include/jemalloc/internal/stats.h | 29 +- .../jemalloc/include/jemalloc/internal/sz.h | 97 +- .../jemalloc/internal/tcache_externs.h | 60 +- .../jemalloc/internal/tcache_inlines.h | 156 +- .../jemalloc/internal/tcache_structs.h | 62 +- .../include/jemalloc/internal/tcache_types.h | 40 +- .../include/jemalloc/internal/test_hooks.h | 16 +- .../include/jemalloc/internal/thread_event.h | 301 ++ .../include/jemalloc/internal/ticker.h | 92 +- .../jemalloc/include/jemalloc/internal/tsd.h | 243 +- .../include/jemalloc/internal/tsd_generic.h | 23 +- .../internal/tsd_malloc_thread_cleanup.h | 2 +- .../include/jemalloc/internal/tsd_types.h | 2 +- .../include/jemalloc/internal/typed_list.h | 55 + .../jemalloc/include/jemalloc/internal/util.h | 56 + .../include/jemalloc/internal/witness.h | 183 +- contrib/jemalloc/include/jemalloc/jemalloc.h | 73 +- .../include/jemalloc/jemalloc_FreeBSD.h | 1 + contrib/jemalloc/src/arena.c | 2063 +++++------ contrib/jemalloc/src/background_thread.c | 335 +- contrib/jemalloc/src/base.c | 193 +- contrib/jemalloc/src/bin.c | 30 +- contrib/jemalloc/src/bin_info.c | 30 + contrib/jemalloc/src/bitmap.c | 1 - contrib/jemalloc/src/buf_writer.c | 144 + contrib/jemalloc/src/cache_bin.c | 99 + contrib/jemalloc/src/ckh.c | 7 +- contrib/jemalloc/src/counter.c | 30 + contrib/jemalloc/src/ctl.c | 1691 +++++++-- contrib/jemalloc/src/decay.c | 295 ++ contrib/jemalloc/src/ecache.c | 35 + contrib/jemalloc/src/edata.c | 6 + contrib/jemalloc/src/edata_cache.c | 154 + contrib/jemalloc/src/ehooks.c | 275 ++ contrib/jemalloc/src/emap.c | 386 ++ contrib/jemalloc/src/eset.c | 282 ++ contrib/jemalloc/src/exp_grow.c | 8 + contrib/jemalloc/src/extent.c | 2488 ++++--------- contrib/jemalloc/src/extent_dss.c | 42 +- contrib/jemalloc/src/extent_mmap.c | 1 - contrib/jemalloc/src/fxp.c | 124 + contrib/jemalloc/src/hash.c | 3 - contrib/jemalloc/src/hook.c | 6 +- contrib/jemalloc/src/hpa.c | 1044 ++++++ contrib/jemalloc/src/hpa_hooks.c | 63 + contrib/jemalloc/src/hpdata.c | 325 ++ contrib/jemalloc/src/inspect.c | 77 + contrib/jemalloc/src/jemalloc.c | 2155 +++++++---- contrib/jemalloc/src/large.c | 299 +- contrib/jemalloc/src/malloc_io.c | 46 +- contrib/jemalloc/src/mutex.c | 19 +- contrib/jemalloc/src/mutex_pool.c | 18 - contrib/jemalloc/src/nstime.c | 127 +- contrib/jemalloc/src/pa.c | 277 ++ contrib/jemalloc/src/pa_extra.c | 191 + contrib/jemalloc/src/pac.c | 587 +++ contrib/jemalloc/src/pages.c | 209 +- contrib/jemalloc/src/pai.c | 31 + contrib/jemalloc/src/peak_event.c | 82 + contrib/jemalloc/src/prng.c | 3 - contrib/jemalloc/src/prof.c | 3189 +++-------------- contrib/jemalloc/src/prof_data.c | 1447 ++++++++ contrib/jemalloc/src/prof_log.c | 717 ++++ contrib/jemalloc/src/prof_recent.c | 600 ++++ contrib/jemalloc/src/prof_stats.c | 57 + contrib/jemalloc/src/prof_sys.c | 669 ++++ contrib/jemalloc/src/psset.c | 385 ++ contrib/jemalloc/src/rtree.c | 75 +- contrib/jemalloc/src/safety_check.c | 16 +- contrib/jemalloc/src/san.c | 208 ++ contrib/jemalloc/src/san_bump.c | 104 + contrib/jemalloc/src/sc.c | 17 +- contrib/jemalloc/src/sec.c | 422 +++ contrib/jemalloc/src/stats.c | 794 +++- contrib/jemalloc/src/sz.c | 52 +- contrib/jemalloc/src/tcache.c | 1137 +++--- contrib/jemalloc/src/thread_event.c | 343 ++ contrib/jemalloc/src/ticker.c | 31 +- contrib/jemalloc/src/tsd.c | 75 +- contrib/jemalloc/src/witness.c | 44 +- lib/libc/stdlib/malloc/jemalloc/Makefile.inc | 14 +- 191 files changed, 28010 insertions(+), 12273 deletions(-) create mode 100644 contrib/jemalloc/.clang-format create mode 100644 contrib/jemalloc/include/jemalloc/internal/activity_callback.h create mode 100644 contrib/jemalloc/include/jemalloc/internal/arena_structs.h delete mode 100644 contrib/jemalloc/include/jemalloc/internal/arena_structs_a.h delete mode 100644 contrib/jemalloc/include/jemalloc/internal/arena_structs_b.h create mode 100644 contrib/jemalloc/include/jemalloc/internal/base.h delete mode 100644 contrib/jemalloc/include/jemalloc/internal/base_externs.h delete mode 100644 contrib/jemalloc/include/jemalloc/internal/base_inlines.h delete mode 100644 contrib/jemalloc/include/jemalloc/internal/base_structs.h delete mode 100644 contrib/jemalloc/include/jemalloc/internal/base_types.h create mode 100644 contrib/jemalloc/include/jemalloc/internal/bin_info.h create mode 100644 contrib/jemalloc/include/jemalloc/internal/buf_writer.h create mode 100644 contrib/jemalloc/include/jemalloc/internal/counter.h create mode 100644 contrib/jemalloc/include/jemalloc/internal/decay.h create mode 100644 contrib/jemalloc/include/jemalloc/internal/ecache.h create mode 100644 contrib/jemalloc/include/jemalloc/internal/edata.h create mode 100644 contrib/jemalloc/include/jemalloc/internal/edata_cache.h create mode 100644 contrib/jemalloc/include/jemalloc/internal/ehooks.h create mode 100644 contrib/jemalloc/include/jemalloc/internal/emap.h create mode 100644 contrib/jemalloc/include/jemalloc/internal/eset.h create mode 100644 contrib/jemalloc/include/jemalloc/internal/exp_grow.h create mode 100644 contrib/jemalloc/include/jemalloc/internal/extent.h delete mode 100644 contrib/jemalloc/include/jemalloc/internal/extent_externs.h delete mode 100644 contrib/jemalloc/include/jemalloc/internal/extent_inlines.h delete mode 100644 contrib/jemalloc/include/jemalloc/internal/extent_structs.h delete mode 100644 contrib/jemalloc/include/jemalloc/internal/extent_types.h create mode 100644 contrib/jemalloc/include/jemalloc/internal/fb.h create mode 100644 contrib/jemalloc/include/jemalloc/internal/fxp.h create mode 100644 contrib/jemalloc/include/jemalloc/internal/hpa.h create mode 100644 contrib/jemalloc/include/jemalloc/internal/hpa_hooks.h create mode 100644 contrib/jemalloc/include/jemalloc/internal/hpa_opts.h create mode 100644 contrib/jemalloc/include/jemalloc/internal/hpdata.h create mode 100644 contrib/jemalloc/include/jemalloc/internal/inspect.h create mode 100644 contrib/jemalloc/include/jemalloc/internal/lockedint.h create mode 100644 contrib/jemalloc/include/jemalloc/internal/mpsc_queue.h delete mode 100644 contrib/jemalloc/include/jemalloc/internal/mutex_pool.h create mode 100644 contrib/jemalloc/include/jemalloc/internal/pa.h create mode 100644 contrib/jemalloc/include/jemalloc/internal/pac.h create mode 100644 contrib/jemalloc/include/jemalloc/internal/pai.h create mode 100644 contrib/jemalloc/include/jemalloc/internal/peak.h create mode 100644 contrib/jemalloc/include/jemalloc/internal/peak_event.h create mode 100644 contrib/jemalloc/include/jemalloc/internal/prof_data.h create mode 100644 contrib/jemalloc/include/jemalloc/internal/prof_hook.h create mode 100644 contrib/jemalloc/include/jemalloc/internal/prof_inlines.h delete mode 100644 contrib/jemalloc/include/jemalloc/internal/prof_inlines_a.h delete mode 100644 contrib/jemalloc/include/jemalloc/internal/prof_inlines_b.h create mode 100644 contrib/jemalloc/include/jemalloc/internal/prof_log.h create mode 100644 contrib/jemalloc/include/jemalloc/internal/prof_recent.h create mode 100644 contrib/jemalloc/include/jemalloc/internal/prof_stats.h create mode 100644 contrib/jemalloc/include/jemalloc/internal/prof_sys.h create mode 100644 contrib/jemalloc/include/jemalloc/internal/psset.h create mode 100644 contrib/jemalloc/include/jemalloc/internal/san.h create mode 100644 contrib/jemalloc/include/jemalloc/internal/san_bump.h create mode 100644 contrib/jemalloc/include/jemalloc/internal/sec.h create mode 100644 contrib/jemalloc/include/jemalloc/internal/sec_opts.h create mode 100644 contrib/jemalloc/include/jemalloc/internal/slab_data.h create mode 100644 contrib/jemalloc/include/jemalloc/internal/thread_event.h create mode 100644 contrib/jemalloc/include/jemalloc/internal/typed_list.h create mode 100644 contrib/jemalloc/src/bin_info.c create mode 100644 contrib/jemalloc/src/buf_writer.c create mode 100644 contrib/jemalloc/src/cache_bin.c create mode 100644 contrib/jemalloc/src/counter.c create mode 100644 contrib/jemalloc/src/decay.c create mode 100644 contrib/jemalloc/src/ecache.c create mode 100644 contrib/jemalloc/src/edata.c create mode 100644 contrib/jemalloc/src/edata_cache.c create mode 100644 contrib/jemalloc/src/ehooks.c create mode 100644 contrib/jemalloc/src/emap.c create mode 100644 contrib/jemalloc/src/eset.c create mode 100644 contrib/jemalloc/src/exp_grow.c create mode 100644 contrib/jemalloc/src/fxp.c delete mode 100644 contrib/jemalloc/src/hash.c create mode 100644 contrib/jemalloc/src/hpa.c create mode 100644 contrib/jemalloc/src/hpa_hooks.c create mode 100644 contrib/jemalloc/src/hpdata.c create mode 100644 contrib/jemalloc/src/inspect.c delete mode 100644 contrib/jemalloc/src/mutex_pool.c create mode 100644 contrib/jemalloc/src/pa.c create mode 100644 contrib/jemalloc/src/pa_extra.c create mode 100644 contrib/jemalloc/src/pac.c create mode 100644 contrib/jemalloc/src/pai.c create mode 100644 contrib/jemalloc/src/peak_event.c delete mode 100644 contrib/jemalloc/src/prng.c create mode 100644 contrib/jemalloc/src/prof_data.c create mode 100644 contrib/jemalloc/src/prof_log.c create mode 100644 contrib/jemalloc/src/prof_recent.c create mode 100644 contrib/jemalloc/src/prof_stats.c create mode 100644 contrib/jemalloc/src/prof_sys.c create mode 100644 contrib/jemalloc/src/psset.c create mode 100644 contrib/jemalloc/src/san.c create mode 100644 contrib/jemalloc/src/san_bump.c create mode 100644 contrib/jemalloc/src/sec.c create mode 100644 contrib/jemalloc/src/thread_event.c diff --git a/contrib/jemalloc/.clang-format b/contrib/jemalloc/.clang-format new file mode 100644 index 00000000000000..719c03c59c2a01 --- /dev/null +++ b/contrib/jemalloc/.clang-format @@ -0,0 +1,122 @@ +# jemalloc targets clang-format version 8. We include every option it supports +# here, but comment out the ones that aren't relevant for us. +--- +# AccessModifierOffset: -2 +AlignAfterOpenBracket: DontAlign +AlignConsecutiveAssignments: false +AlignConsecutiveDeclarations: false +AlignEscapedNewlines: Right +AlignOperands: false +AlignTrailingComments: false +AllowAllParametersOfDeclarationOnNextLine: true +AllowShortBlocksOnASingleLine: false +AllowShortCaseLabelsOnASingleLine: false +AllowShortFunctionsOnASingleLine: Empty +AllowShortIfStatementsOnASingleLine: false +AllowShortLoopsOnASingleLine: false +AlwaysBreakAfterReturnType: AllDefinitions +AlwaysBreakBeforeMultilineStrings: true +# AlwaysBreakTemplateDeclarations: Yes +BinPackArguments: true +BinPackParameters: true +BraceWrapping: + AfterClass: false + AfterControlStatement: false + AfterEnum: false + AfterFunction: false + AfterNamespace: false + AfterObjCDeclaration: false + AfterStruct: false + AfterUnion: false + BeforeCatch: false + BeforeElse: false + IndentBraces: false +# BreakAfterJavaFieldAnnotations: true +BreakBeforeBinaryOperators: NonAssignment +BreakBeforeBraces: Attach +BreakBeforeTernaryOperators: true +# BreakConstructorInitializers: BeforeColon +# BreakInheritanceList: BeforeColon +BreakStringLiterals: false +ColumnLimit: 80 +# CommentPragmas: '' +# CompactNamespaces: true +# ConstructorInitializerAllOnOneLineOrOnePerLine: true +# ConstructorInitializerIndentWidth: 4 +ContinuationIndentWidth: 2 +Cpp11BracedListStyle: true +DerivePointerAlignment: false +DisableFormat: false +ExperimentalAutoDetectBinPacking: false +FixNamespaceComments: true +ForEachMacros: [ ql_foreach, qr_foreach, ] +# IncludeBlocks: Preserve +# IncludeCategories: +# - Regex: '^<.*\.h(pp)?>' +# Priority: 1 +# IncludeIsMainRegex: '' +IndentCaseLabels: false +IndentPPDirectives: AfterHash +IndentWidth: 4 +IndentWrappedFunctionNames: false +# JavaImportGroups: [] +# JavaScriptQuotes: Leave +# JavaScriptWrapImports: True +KeepEmptyLinesAtTheStartOfBlocks: false +Language: Cpp +MacroBlockBegin: '' +MacroBlockEnd: '' +MaxEmptyLinesToKeep: 1 +# NamespaceIndentation: None +# ObjCBinPackProtocolList: Auto +# ObjCBlockIndentWidth: 2 +# ObjCSpaceAfterProperty: false +# ObjCSpaceBeforeProtocolList: false + +PenaltyBreakAssignment: 2 +PenaltyBreakBeforeFirstCallParameter: 1 +PenaltyBreakComment: 300 +PenaltyBreakFirstLessLess: 120 +PenaltyBreakString: 1000 +# PenaltyBreakTemplateDeclaration: 10 +PenaltyExcessCharacter: 1000000 +PenaltyReturnTypeOnItsOwnLine: 60 +PointerAlignment: Right +# RawStringFormats: +# - Language: TextProto +# Delimiters: +# - 'pb' +# - 'proto' +# EnclosingFunctions: +# - 'PARSE_TEXT_PROTO' +# BasedOnStyle: google +# - Language: Cpp +# Delimiters: +# - 'cc' +# - 'cpp' +# BasedOnStyle: llvm +# CanonicalDelimiter: 'cc' +ReflowComments: true +SortIncludes: false +SpaceAfterCStyleCast: false +# SpaceAfterTemplateKeyword: true +SpaceBeforeAssignmentOperators: true +# SpaceBeforeCpp11BracedList: false +# SpaceBeforeCtorInitializerColon: true +# SpaceBeforeInheritanceColon: true +SpaceBeforeParens: ControlStatements +# SpaceBeforeRangeBasedForLoopColon: true +SpaceInEmptyParentheses: false +SpacesBeforeTrailingComments: 2 +SpacesInAngles: false +SpacesInCStyleCastParentheses: false +# SpacesInContainerLiterals: false +SpacesInParentheses: false +SpacesInSquareBrackets: false +# Standard: Cpp11 +# This is nominally supported in clang-format version 8, but not in the build +# used by some of the core jemalloc developers. +# StatementMacros: [] +TabWidth: 8 +UseTab: Never +... diff --git a/contrib/jemalloc/ChangeLog b/contrib/jemalloc/ChangeLog index e55813b7beccb9..32fde56247f6de 100644 --- a/contrib/jemalloc/ChangeLog +++ b/contrib/jemalloc/ChangeLog @@ -4,6 +4,106 @@ brevity. Much more detail can be found in the git revision history: https://github.com/jemalloc/jemalloc +* 5.3.0 (May 6, 2022) + + This release contains many speed and space optimizations, from micro + optimizations on common paths to rework of internal data structures and + locking schemes, and many more too detailed to list below. Multiple percent + of system level metric improvements were measured in tested production + workloads. The release has gone through large-scale production testing. + + New features: + - Add the thread.idle mallctl which hints that the calling thread will be + idle for a nontrivial period of time. (@davidtgoldblatt) + - Allow small size classes to be the maximum size class to cache in the + thread-specific cache, through the opt.[lg_]tcache_max option. (@interwq, + @jordalgo) + - Make the behavior of realloc(ptr, 0) configurable with opt.zero_realloc. + (@davidtgoldblatt) + - Add 'make uninstall' support. (@sangshuduo, @Lapenkov) + - Support C++17 over-aligned allocation. (@marksantaniello) + - Add the thread.peak mallctl for approximate per-thread peak memory tracking. + (@davidtgoldblatt) + - Add interval-based stats output opt.stats_interval. (@interwq) + - Add prof.prefix to override filename prefixes for dumps. (@zhxchen17) + - Add high resolution timestamp support for profiling. (@tyroguru) + - Add the --collapsed flag to jeprof for flamegraph generation. + (@igorwwwwwwwwwwwwwwwwwwww) + - Add the --debug-syms-by-id option to jeprof for debug symbols discovery. + (@DeannaGelbart) + - Add the opt.prof_leak_error option to exit with error code when leak is + detected using opt.prof_final. (@yunxuo) + - Add opt.cache_oblivious as an runtime alternative to config.cache_oblivious. + (@interwq) + - Add mallctl interfaces: + + opt.zero_realloc (@davidtgoldblatt) + + opt.cache_oblivious (@interwq) + + opt.prof_leak_error (@yunxuo) + + opt.stats_interval (@interwq) + + opt.stats_interval_opts (@interwq) + + opt.tcache_max (@interwq) + + opt.trust_madvise (@azat) + + prof.prefix (@zhxchen17) + + stats.zero_reallocs (@davidtgoldblatt) + + thread.idle (@davidtgoldblatt) + + thread.peak.{read,reset} (@davidtgoldblatt) + + Bug fixes: + - Fix the synchronization around explicit tcache creation which could cause + invalid tcache identifiers. This regression was first released in 5.0.0. + (@yoshinorim, @davidtgoldblatt) + - Fix a profiling biasing issue which could cause incorrect heap usage and + object counts. This issue existed in all previous releases with the heap + profiling feature. (@davidtgoldblatt) + - Fix the order of stats counter updating on large realloc which could cause + failed assertions. This regression was first released in 5.0.0. (@azat) + - Fix the locking on the arena destroy mallctl, which could cause concurrent + arena creations to fail. This functionality was first introduced in 5.0.0. + (@interwq) + + Portability improvements: + - Remove nothrow from system function declarations on macOS and FreeBSD. + (@davidtgoldblatt, @fredemmott, @leres) + - Improve overcommit and page alignment settings on NetBSD. (@zoulasc) + - Improve CPU affinity support on BSD platforms. (@devnexen) + - Improve utrace detection and support. (@devnexen) + - Improve QEMU support with MADV_DONTNEED zeroed pages detection. (@azat) + - Add memcntl support on Solaris / illumos. (@devnexen) + - Improve CPU_SPINWAIT on ARM. (@AWSjswinney) + - Improve TSD cleanup on FreeBSD. (@Lapenkov) + - Disable percpu_arena if the CPU count cannot be reliably detected. (@azat) + - Add malloc_size(3) override support. (@devnexen) + - Add mmap VM_MAKE_TAG support. (@devnexen) + - Add support for MADV_[NO]CORE. (@devnexen) + - Add support for DragonFlyBSD. (@devnexen) + - Fix the QUANTUM setting on MIPS64. (@brooksdavis) + - Add the QUANTUM setting for ARC. (@vineetgarc) + - Add the QUANTUM setting for LoongArch. (@wangjl-uos) + - Add QNX support. (@jqian-aurora) + - Avoid atexit(3) calls unless the relevant profiling features are enabled. + (@BusyJay, @laiwei-rice, @interwq) + - Fix unknown option detection when using Clang. (@Lapenkov) + - Fix symbol conflict with musl libc. (@georgthegreat) + - Add -Wimplicit-fallthrough checks. (@nickdesaulniers) + - Add __forceinline support on MSVC. (@santagada) + - Improve FreeBSD and Windows CI support. (@Lapenkov) + - Add CI support for PPC64LE architecture. (@ezeeyahoo) + + Incompatible changes: + - Maximum size class allowed in tcache (opt.[lg_]tcache_max) now has an upper + bound of 8MiB. (@interwq) + + Optimizations and refactors (@davidtgoldblatt, @Lapenkov, @interwq): + - Optimize the common cases of the thread cache operations. + - Optimize internal data structures, including RB tree and pairing heap. + - Optimize the internal locking on extent management. + - Extract and refactor the internal page allocator and interface modules. + + Documentation: + - Fix doc build with --with-install-suffix. (@lawmurray, @interwq) + - Add PROFILING_INTERNALS.md. (@davidtgoldblatt) + - Ensure the proper order of doc building and installation. (@Mingli-Yu) + * 5.2.1 (August 5, 2019) This release is primarily about Windows. A critical virtual memory leak is diff --git a/contrib/jemalloc/FREEBSD-Xlist b/contrib/jemalloc/FREEBSD-Xlist index 41ca4712173e2e..16dd15f5f60d6d 100644 --- a/contrib/jemalloc/FREEBSD-Xlist +++ b/contrib/jemalloc/FREEBSD-Xlist @@ -11,6 +11,7 @@ configure* doc/*.in doc/*.xml doc/*.xsl +doc_internal/ include/jemalloc/internal/atomic_msvc.h include/jemalloc/internal/jemalloc_internal_defs.h.in include/jemalloc/internal/jemalloc_preamble.h.in @@ -45,5 +46,6 @@ msvc/ run_tests.sh scripts/ src/jemalloc_cpp.cpp +src/ticker.py src/zone.c test/ diff --git a/contrib/jemalloc/FREEBSD-diffs b/contrib/jemalloc/FREEBSD-diffs index 5da01508650126..e24e25a1c1d44f 100644 --- a/contrib/jemalloc/FREEBSD-diffs +++ b/contrib/jemalloc/FREEBSD-diffs @@ -1,5 +1,5 @@ diff --git a/doc/jemalloc.xml.in b/doc/jemalloc.xml.in -index 7fecda7c..d5ca5e86 100644 +index e28e8f38..4f5d2799 100644 --- a/doc/jemalloc.xml.in +++ b/doc/jemalloc.xml.in @@ -53,11 +53,22 @@ @@ -26,7 +26,7 @@ index 7fecda7c..d5ca5e86 100644 Standard API -@@ -3510,4 +3521,18 @@ malloc_conf = "narenas:1";]]> +@@ -3760,4 +3771,18 @@ malloc_conf = "narenas:1";]]> The posix_memalign() function conforms to IEEE Std 1003.1-2001 (POSIX.1). @@ -46,7 +46,7 @@ index 7fecda7c..d5ca5e86 100644 + diff --git a/include/jemalloc/internal/jemalloc_internal_decls.h b/include/jemalloc/internal/jemalloc_internal_decls.h -index 7d6053e2..a0e4f5af 100644 +index 983027c8..3cbed72b 100644 --- a/include/jemalloc/internal/jemalloc_internal_decls.h +++ b/include/jemalloc/internal/jemalloc_internal_decls.h @@ -1,6 +1,9 @@ @@ -75,11 +75,11 @@ index 00000000..0dab1296 +# undef JEMALLOC_GCC_U8_SYNC_ATOMICS +#endif diff --git a/include/jemalloc/internal/jemalloc_preamble.h.in b/include/jemalloc/internal/jemalloc_preamble.h.in -index 3418cbfa..53e30dc4 100644 +index 5ce77d96..d970370c 100644 --- a/include/jemalloc/internal/jemalloc_preamble.h.in +++ b/include/jemalloc/internal/jemalloc_preamble.h.in -@@ -8,6 +8,9 @@ - #include +@@ -14,6 +14,9 @@ + # endif #endif +#include "un-namespace.h" @@ -88,7 +88,7 @@ index 3418cbfa..53e30dc4 100644 #define JEMALLOC_NO_DEMANGLE #ifdef JEMALLOC_JET # undef JEMALLOC_IS_MALLOC -@@ -79,13 +82,7 @@ static const bool config_fill = +@@ -85,13 +88,7 @@ static const bool config_fill = false #endif ; @@ -104,10 +104,10 @@ index 3418cbfa..53e30dc4 100644 static const bool config_prof = #ifdef JEMALLOC_PROF diff --git a/include/jemalloc/internal/mutex.h b/include/jemalloc/internal/mutex.h -index 7c24f072..94af1618 100644 +index 63a0b1b3..8468e7bc 100644 --- a/include/jemalloc/internal/mutex.h +++ b/include/jemalloc/internal/mutex.h -@@ -135,9 +135,6 @@ struct malloc_mutex_s { +@@ -131,9 +131,6 @@ struct malloc_mutex_s { #ifdef JEMALLOC_LAZY_LOCK extern bool isthreaded; @@ -117,7 +117,7 @@ index 7c24f072..94af1618 100644 #endif bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name, -@@ -145,6 +142,7 @@ bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name, +@@ -141,6 +138,7 @@ bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name, void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex); void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex); void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex); @@ -125,31 +125,37 @@ index 7c24f072..94af1618 100644 bool malloc_mutex_boot(void); void malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex); -diff --git a/include/jemalloc/internal/test_hooks.h b/include/jemalloc/internal/test_hooks.h -index a6351e59..0780c52f 100644 ---- a/include/jemalloc/internal/test_hooks.h -+++ b/include/jemalloc/internal/test_hooks.h -@@ -6,13 +6,6 @@ extern JEMALLOC_EXPORT void (*test_hooks_libc_hook)(); +diff --git a/include/jemalloc/internal/pac.h b/include/jemalloc/internal/pac.h +index 01c4e6af..9122aa0f 100644 +--- a/include/jemalloc/internal/pac.h ++++ b/include/jemalloc/internal/pac.h +@@ -74,7 +74,6 @@ struct pac_stats_s { + atomic_zu_t abandoned_vm; + }; - #define JEMALLOC_HOOK(fn, hook) ((void)(hook != NULL && (hook(), 0)), fn) +-typedef struct pac_s pac_t; + struct pac_s { + /* + * Must be the first member (we convert it to a PAC given only a +diff --git a/include/jemalloc/internal/san_bump.h b/include/jemalloc/internal/san_bump.h +index 8ec4a710..d5902ba7 100644 +--- a/include/jemalloc/internal/san_bump.h ++++ b/include/jemalloc/internal/san_bump.h +@@ -9,7 +9,6 @@ --#define open JEMALLOC_HOOK(open, test_hooks_libc_hook) --#define read JEMALLOC_HOOK(read, test_hooks_libc_hook) --#define write JEMALLOC_HOOK(write, test_hooks_libc_hook) --#define readlink JEMALLOC_HOOK(readlink, test_hooks_libc_hook) --#define close JEMALLOC_HOOK(close, test_hooks_libc_hook) --#define creat JEMALLOC_HOOK(creat, test_hooks_libc_hook) --#define secure_getenv JEMALLOC_HOOK(secure_getenv, test_hooks_libc_hook) - /* Note that this is undef'd and re-define'd in src/prof.c. */ - #define _Unwind_Backtrace JEMALLOC_HOOK(_Unwind_Backtrace, test_hooks_libc_hook) + extern bool opt_retain; +-typedef struct ehooks_s ehooks_t; + typedef struct pac_s pac_t; + + typedef struct san_bump_alloc_s san_bump_alloc_t; diff --git a/include/jemalloc/internal/tsd.h b/include/jemalloc/internal/tsd.h -index 9ba26004..ecfda5d6 100644 +index 66d68822..6cd52aee 100644 --- a/include/jemalloc/internal/tsd.h +++ b/include/jemalloc/internal/tsd.h -@@ -198,7 +198,8 @@ struct tsd_s { - t TSD_MANGLE(n); - MALLOC_TSD +@@ -256,7 +256,8 @@ struct tsd_s { + TSD_DATA_FAST + TSD_DATA_SLOWER #undef O -}; +/* AddressSanitizer requires TLS data to be aligned to at least 8 bytes. */ @@ -159,10 +165,10 @@ index 9ba26004..ecfda5d6 100644 tsd_state_get(tsd_t *tsd) { diff --git a/include/jemalloc/jemalloc_FreeBSD.h b/include/jemalloc/jemalloc_FreeBSD.h new file mode 100644 -index 00000000..b752b0e7 +index 00000000..b23bdba0 --- /dev/null +++ b/include/jemalloc/jemalloc_FreeBSD.h -@@ -0,0 +1,185 @@ +@@ -0,0 +1,192 @@ +/* + * Override settings that were generated in jemalloc_defs.h as necessary. + */ @@ -228,15 +234,21 @@ index 00000000..b752b0e7 +#ifdef __powerpc64__ +# define LG_VADDR 64 +# define LG_SIZEOF_PTR 3 ++# define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec"))) +#elif defined(__powerpc__) +# define LG_VADDR 32 +# define LG_SIZEOF_PTR 2 ++# define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec"))) +#endif +#ifdef __riscv +# define LG_VADDR 48 +# define LG_SIZEOF_PTR 3 +#endif + ++#if LG_VADDR > 32 ++# define JEMALLOC_RETAIN ++#endif ++ +#ifndef JEMALLOC_TLS_MODEL +# define JEMALLOC_TLS_MODEL /* Default. */ +#endif @@ -318,6 +330,7 @@ index 00000000..b752b0e7 +#define pthread_cond_wait _pthread_cond_wait +#define pthread_cond_timedwait _pthread_cond_timedwait +#define pthread_cond_signal _pthread_cond_signal ++#define pthread_getname_np _pthread_getname_np + +#ifdef JEMALLOC_C_ +/* @@ -360,10 +373,10 @@ index f9438912..47d032c1 100755 +#include "jemalloc_FreeBSD.h" EOF diff --git a/src/jemalloc.c b/src/jemalloc.c -index ed13718d..fefb719a 100644 +index 7655de4e..e4b183d1 100644 --- a/src/jemalloc.c +++ b/src/jemalloc.c -@@ -23,6 +23,10 @@ +@@ -29,6 +29,10 @@ /******************************************************************************/ /* Data. */ @@ -374,33 +387,7 @@ index ed13718d..fefb719a 100644 /* Runtime configuration options. */ const char *je_malloc_conf #ifndef _WIN32 -@@ -2660,25 +2664,6 @@ je_realloc(void *ptr, size_t arg_size) { - LOG("core.realloc.entry", "ptr: %p, size: %zu\n", ptr, size); - - if (unlikely(size == 0)) { -- if (ptr != NULL) { -- /* realloc(ptr, 0) is equivalent to free(ptr). */ -- UTRACE(ptr, 0, 0); -- tcache_t *tcache; -- tsd_t *tsd = tsd_fetch(); -- if (tsd_reentrancy_level_get(tsd) == 0) { -- tcache = tcache_get(tsd); -- } else { -- tcache = NULL; -- } -- -- uintptr_t args[3] = {(uintptr_t)ptr, size}; -- hook_invoke_dalloc(hook_dalloc_realloc, ptr, args); -- -- ifree(tsd, ptr, tcache, true); -- -- LOG("core.realloc.exit", "result: %p", NULL); -- return NULL; -- } - size = 1; - } - -@@ -3750,6 +3735,103 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) { +@@ -4297,6 +4301,103 @@ label_done: * End non-standard functions. */ /******************************************************************************/ @@ -504,7 +491,7 @@ index ed13718d..fefb719a 100644 /* * The following functions are used by threading libraries for protection of * malloc during fork(). -@@ -3919,4 +4001,11 @@ jemalloc_postfork_child(void) { +@@ -4473,4 +4574,11 @@ jemalloc_postfork_child(void) { ctl_postfork_child(tsd_tsdn(tsd)); } @@ -517,10 +504,10 @@ index ed13718d..fefb719a 100644 + /******************************************************************************/ diff --git a/src/malloc_io.c b/src/malloc_io.c -index d7cb0f52..cda589c4 100644 +index b76885cb..93a30497 100644 --- a/src/malloc_io.c +++ b/src/malloc_io.c -@@ -75,6 +75,20 @@ wrtmessage(void *cbopaque, const char *s) { +@@ -73,6 +73,20 @@ wrtmessage(void *cbopaque, const char *s) { JEMALLOC_EXPORT void (*je_malloc_message)(void *, const char *s); @@ -542,10 +529,10 @@ index d7cb0f52..cda589c4 100644 * Wrapper around malloc_message() that avoids the need for * je_malloc_message(...) throughout the code. diff --git a/src/mutex.c b/src/mutex.c -index 3f920f5b..88a7730c 100644 +index 0b3547a8..1eedd626 100644 --- a/src/mutex.c +++ b/src/mutex.c -@@ -41,6 +41,17 @@ pthread_create(pthread_t *__restrict thread, +@@ -46,6 +46,17 @@ pthread_create(pthread_t *__restrict thread, #ifdef JEMALLOC_MUTEX_INIT_CB JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, void *(calloc_cb)(size_t, size_t)); @@ -563,7 +550,7 @@ index 3f920f5b..88a7730c 100644 #endif void -@@ -131,6 +142,16 @@ mutex_addr_comp(const witness_t *witness1, void *mutex1, +@@ -136,6 +147,16 @@ mutex_addr_comp(const witness_t *witness1, void *mutex1, } } @@ -580,3 +567,45 @@ index 3f920f5b..88a7730c 100644 bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name, witness_rank_t rank, malloc_mutex_lock_order_t lock_order) { +diff --git a/src/pages.c b/src/pages.c +index 8c83a7de..9ccff113 100644 +--- a/src/pages.c ++++ b/src/pages.c +@@ -10,7 +10,9 @@ + #ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT + #include + #ifdef __FreeBSD__ ++#include + #include ++#include + #endif + #endif + #ifdef __NetBSD__ +@@ -592,6 +594,13 @@ os_overcommits_sysctl(void) { + int vm_overcommit; + size_t sz; + ++#ifdef ELF_BSDF_VMNOOVERCOMMIT ++ int bsdflags; ++ ++ if (_elf_aux_info(AT_BSDFLAGS, &bsdflags, sizeof(bsdflags)) == 0) ++ return ((bsdflags & ELF_BSDF_VMNOOVERCOMMIT) == 0); ++#endif ++ + sz = sizeof(vm_overcommit); + #if defined(__FreeBSD__) && defined(VM_OVERCOMMIT) + int mib[2]; +@@ -607,7 +616,12 @@ os_overcommits_sysctl(void) { + } + #endif + +- return ((vm_overcommit & 0x3) == 0); ++#ifndef SWAP_RESERVE_FORCE_ON ++#define SWAP_RESERVE_FORCE_ON (1 << 0) ++#define SWAP_RESERVE_RLIMIT_ON (1 << 1) ++#endif ++ return ((vm_overcommit & (SWAP_RESERVE_FORCE_ON | ++ SWAP_RESERVE_RLIMIT_ON)) == 0); + } + #endif + diff --git a/contrib/jemalloc/VERSION b/contrib/jemalloc/VERSION index 428ded8c071fb5..1dcfea03fc1a36 100644 --- a/contrib/jemalloc/VERSION +++ b/contrib/jemalloc/VERSION @@ -1 +1 @@ -5.2.1-0-gea6b3e973b477b8061e0076bb257dbd7f3faa756 +5.3.0-0-g54eaed1d8b56b1aa528be3bdd1877e59c56fa90c diff --git a/contrib/jemalloc/doc/jemalloc.3 b/contrib/jemalloc/doc/jemalloc.3 index a4ea3e1f54a984..4ab60eaa4a704d 100644 --- a/contrib/jemalloc/doc/jemalloc.3 +++ b/contrib/jemalloc/doc/jemalloc.3 @@ -2,12 +2,12 @@ .\" Title: JEMALLOC .\" Author: Jason Evans .\" Generator: DocBook XSL Stylesheets v1.79.1 -.\" Date: 11/10/2019 +.\" Date: 08/15/2023 .\" Manual: User Manual -.\" Source: jemalloc 5.2.1-0-gea6b3e973b477b8061e0076bb257dbd7f3faa756 +.\" Source: jemalloc 5.3.0-0-g54eaed1d8b56b1aa528be3bdd1877e59c56fa90c .\" Language: English .\" -.TH "JEMALLOC" "3" "11/10/2019" "jemalloc 5.2.1-0-gea6b3e973b47" "User Manual" +.TH "JEMALLOC" "3" "08/15/2023" "jemalloc 5.3.0-0-g54eaed1d8b56" "User Manual" .\" ----------------------------------------------------------------- .\" * Define some portability stuff .\" ----------------------------------------------------------------- @@ -31,7 +31,7 @@ jemalloc \- general purpose memory allocation functions .SH "LIBRARY" .PP -This manual describes jemalloc 5\&.2\&.1\-0\-gea6b3e973b477b8061e0076bb257dbd7f3faa756\&. More information can be found at the +This manual describes jemalloc 5\&.3\&.0\-0\-g54eaed1d8b56b1aa528be3bdd1877e59c56fa90c\&. More information can be found at the \m[blue]\fBjemalloc website\fR\m[]\&\s-2\u[1]\d\s+2\&. .PP The following configuration options are enabled in libc\*(Aqs built\-in jemalloc: @@ -603,7 +603,7 @@ T} :T{ 8 KiB T}:T{ -[40 KiB, 48 KiB, 54 KiB, 64 KiB] +[40 KiB, 48 KiB, 56 KiB, 64 KiB] T} :T{ 16 KiB @@ -848,6 +848,11 @@ in these cases\&. This option is disabled by default unless is specified during configuration, in which case it is enabled by default\&. .RE .PP +opt\&.cache_oblivious (\fBbool\fR) r\- +.RS 4 +Enable / Disable cache\-oblivious large allocation alignment, for large requests with no alignment constraints\&. If this feature is disabled, all large allocations are page\-aligned as an implementation artifact, which can severely harm CPU cache utilization\&. However, the cache\-oblivious layout comes at the cost of one extra page per large allocation, which in the most extreme case increases physical memory usage for the 16 KiB size class to 20 KiB\&. This option is enabled by default\&. +.RE +.PP opt\&.metadata_thp (\fBconst char *\fR) r\- .RS 4 Controls whether to allow jemalloc to use transparent huge page (THP) for internal metadata (see @@ -859,6 +864,11 @@ uses no THP initially, but may begin to do so when metadata usage reaches certai \(lqdisabled\(rq\&. .RE .PP +opt\&.trust_madvise (\fBbool\fR) r\- +.RS 4 +If true, do not perform runtime check for MADV_DONTNEED, to check that it actually zeros pages\&. The default is disabled on Linux and enabled elsewhere\&. +.RE +.PP opt\&.retain (\fBbool\fR) r\- .RS 4 If true, retain unused virtual memory for later reuse rather than discarding it by calling @@ -990,6 +1000,28 @@ is enabled\&. The default is \(lq\(rq\&. .RE .PP +opt\&.stats_interval (\fBint64_t\fR) r\- +.RS 4 +Average interval between statistics outputs, as measured in bytes of allocation activity\&. The actual interval may be sporadic because decentralized event counters are used to avoid synchronization bottlenecks\&. The output may be triggered on any thread, which then calls +malloc_stats_print()\&. +opt\&.stats_interval_opts +can be combined to specify output options\&. By default, interval\-triggered stats output is disabled (encoded as \-1)\&. +.RE +.PP +opt\&.stats_interval_opts (\fBconst char *\fR) r\- +.RS 4 +Options (the +\fIopts\fR +string) to pass to the +malloc_stats_print() +for interval based statistics printing (enabled through +opt\&.stats_interval)\&. See available options in +malloc_stats_print()\&. Has no effect unless +opt\&.stats_interval +is enabled\&. The default is +\(lq\(rq\&. +.RE +.PP opt\&.junk (\fBconst char *\fR) r\- [\fB\-\-enable\-fill\fR] .RS 4 Junk filling\&. If set to @@ -1046,13 +1078,13 @@ This option is disabled by default\&. opt\&.tcache (\fBbool\fR) r\- .RS 4 Thread\-specific caching (tcache) enabled/disabled\&. When there are multiple threads, each thread uses a tcache for objects up to a certain size\&. Thread\-specific caching allows many allocations to be satisfied without performing any thread synchronization, at the cost of increased memory use\&. See the -opt\&.lg_tcache_max +opt\&.tcache_max option for related tuning information\&. This option is enabled by default\&. .RE .PP -opt\&.lg_tcache_max (\fBsize_t\fR) r\- +opt\&.tcache_max (\fBsize_t\fR) r\- .RS 4 -Maximum size class (log base 2) to cache in the thread\-specific cache (tcache)\&. At a minimum, all small size classes are cached, and at a maximum all large size classes are cached\&. The default maximum is 32 KiB (2^15)\&. +Maximum size class to cache in the thread\-specific cache (tcache)\&. At a minimum, the first size class is cached; and at a maximum, size classes up to 8 MiB can be cached\&. The default maximum is 32 KiB (2^15)\&. As a convenience, this may also be set by specifying lg_tcache_max, which will be taken to be the base\-2 logarithm of the setting of tcache_max\&. .RE .PP opt\&.thp (\fBconst char *\fR) r\- @@ -1091,7 +1123,8 @@ for heap profile format documentation\&. opt\&.prof_prefix (\fBconst char *\fR) r\- [\fB\-\-enable\-prof\fR] .RS 4 Filename prefix for profile dumps\&. If the prefix is set to the empty string, no automatic dumps will occur; this is primarily useful for disabling the automatic final heap dump (which also disables leak reporting, if enabled)\&. The default prefix is -jeprof\&. +jeprof\&. This prefix value can be overridden by +prof\&.prefix\&. .RE .PP opt\&.prof_active (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR] @@ -1129,7 +1162,9 @@ Average interval (log base 2) between memory profile dumps, as measured in bytes is controlled by the opt\&.prof_prefix -option\&. By default, interval\-triggered profile dumping is disabled (encoded as \-1)\&. +and +prof\&.prefix +options\&. By default, interval\-triggered profile dumping is disabled (encoded as \-1)\&. .RE .PP opt\&.prof_gdump (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR] @@ -1147,7 +1182,9 @@ function to dump final memory usage to a file named according to the pattern is controlled by the opt\&.prof_prefix -option\&. Note that +and +prof\&.prefix +options\&. Note that atexit() may allocate memory during application initialization and then deadlock internally when jemalloc in turn calls atexit(), so this option is not universally usable (though the application can register its own @@ -1161,7 +1198,46 @@ Leak reporting enabled/disabled\&. If enabled, use an \fBatexit\fR(3) function to report memory leaks detected by allocation sampling\&. See the opt\&.prof -option for information on analyzing heap profile output\&. This option is disabled by default\&. +option for information on analyzing heap profile output\&. Works only when combined with +opt\&.prof_final, otherwise does nothing\&. This option is disabled by default\&. +.RE +.PP +opt\&.prof_leak_error (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR] +.RS 4 +Similar to +opt\&.prof_leak, but makes the process exit with error code 1 if a memory leak is detected\&. This option supersedes +opt\&.prof_leak, meaning that if both are specified, this option takes precedence\&. When enabled, also enables +opt\&.prof_leak\&. Works only when combined with +opt\&.prof_final, otherwise does nothing\&. This option is disabled by default\&. +.RE +.PP +opt\&.zero_realloc (\fBconst char *\fR) r\- +.RS 4 +Determines the behavior of +realloc() +when passed a value of zero for the new size\&. +\(lqalloc\(rq +treats this as an allocation of size zero (and returns a non\-null result except in case of resource exhaustion)\&. +\(lqfree\(rq +treats this as a deallocation of the pointer, and returns +\fBNULL\fR +without setting +\fIerrno\fR\&. +\(lqabort\(rq +aborts the process if zero is passed\&. The default is +\(lqfree\(rq +on Linux and Windows, and +\(lqalloc\(rq +elsewhere\&. +.sp +There is considerable divergence of behaviors across implementations in handling this case\&. Many have the behavior of +\(lqfree\(rq\&. This can introduce security vulnerabilities, since a +\fBNULL\fR +return value indicates failure, and the continued validity of the passed\-in pointer (per POSIX and C11)\&. +\(lqalloc\(rq +is safe, but can cause leaks in programs that expect the common behavior\&. Programs intended to be portable and leak\-free cannot assume either behavior, and must therefore never call realloc with a size of 0\&. The +\(lqabort\(rq +option enables these testing this behavior\&. .RE .PP thread\&.arena (\fBunsigned\fR) rw @@ -1182,7 +1258,7 @@ Get a pointer to the the value that is returned by the thread\&.allocated mallctl\&. This is useful for avoiding the overhead of repeated mallctl*() -calls\&. +calls\&. Note that the underlying counter should not be modified by the application\&. .RE .PP thread\&.deallocated (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] @@ -1196,7 +1272,23 @@ Get a pointer to the the value that is returned by the thread\&.deallocated mallctl\&. This is useful for avoiding the overhead of repeated mallctl*() -calls\&. +calls\&. Note that the underlying counter should not be modified by the application\&. +.RE +.PP +thread\&.peak\&.read (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] +.RS 4 +Get an approximation of the maximum value of the difference between the number of bytes allocated and the number of bytes deallocated by the calling thread since the last call to +thread\&.peak\&.reset, or since the thread\*(Aqs creation if it has not called +thread\&.peak\&.reset\&. No guarantees are made about the quality of the approximation, but jemalloc currently endeavors to maintain accuracy to within one hundred kilobytes\&. +.RE +.PP +thread\&.peak\&.reset (\fBvoid\fR) \-\- [\fB\-\-enable\-stats\fR] +.RS 4 +Resets the counter for net bytes allocated in the calling thread to zero\&. This affects subsequent calls to +thread\&.peak\&.read, but not the values returned by +thread\&.allocated +or +thread\&.deallocated\&. .RE .PP thread\&.tcache\&.enabled (\fBbool\fR) rw @@ -1224,11 +1316,27 @@ Control whether sampling is currently active for the calling thread\&. This is a prof\&.active; both must be active for the calling thread to sample\&. This flag is enabled by default\&. .RE .PP +thread\&.idle (\fBvoid\fR) \-\- +.RS 4 +Hints to jemalloc that the calling thread will be idle for some nontrivial period of time (say, on the order of seconds), and that doing some cleanup operations may be beneficial\&. There are no guarantees as to what specific operations will be performed; currently this flushes the caller\*(Aqs tcache and may (according to some heuristic) purge its associated arena\&. +.sp +This is not intended to be a general\-purpose background activity mechanism, and threads should not wake up multiple times solely to call it\&. Rather, a thread waiting for a task should do a timed wait first, call +thread\&.idle +if no task appears in the timeout interval, and then do an untimed wait\&. For such a background activity mechanism, see +background_thread\&. +.RE +.PP tcache\&.create (\fBunsigned\fR) r\- .RS 4 Create an explicit thread\-specific cache (tcache) and return an identifier that can be passed to the \fBMALLOCX_TCACHE(\fR\fB\fItc\fR\fR\fB)\fR macro to explicitly use the specified cache rather than the automatically managed one that is used by default\&. Each explicit cache can be used by only one thread at a time; the application must assure that this constraint holds\&. +.sp +If the amount of space supplied for storing the thread\-specific cache identifier does not equal +sizeof(\fBunsigned\fR), no thread\-specific cache will be created, no data will be written to the space pointed by +\fIoldp\fR, and +\fI*oldlenp\fR +will be set to 0\&. .RE .PP tcache\&.flush (\fBunsigned\fR) \-w @@ -1634,6 +1742,12 @@ Maximum size supported by this large size class\&. arenas\&.create (\fBunsigned\fR, \fBextent_hooks_t *\fR) rw .RS 4 Explicitly create a new arena outside the range of automatically managed arenas, with optionally specified extent hooks, and return the new arena index\&. +.sp +If the amount of space supplied for storing the arena index does not equal +sizeof(\fBunsigned\fR), no arena will be created, no data will be written to the space pointed by +\fIoldp\fR, and +\fI*oldlenp\fR +will be set to 0\&. .RE .PP arenas\&.lookup (\fBunsigned\fR, \fBvoid*\fR) rw @@ -1666,7 +1780,16 @@ Dump a memory profile to the specified file, or if NULL is specified, to a file is controlled by the opt\&.prof_prefix -option\&. +and +prof\&.prefix +options\&. +.RE +.PP +prof\&.prefix (\fBconst char *\fR) \-w [\fB\-\-enable\-prof\fR] +.RS 4 +Set the filename prefix for profile dumps\&. See +opt\&.prof_prefix +for the default setting\&. This can be useful to differentiate profile dumps such as from forked processes\&. .RE .PP prof\&.gdump (\fBbool\fR) rw [\fB\-\-enable\-prof\fR] @@ -1676,7 +1799,9 @@ When enabled, trigger a memory profile dump every time the total virtual memory is controlled by the opt\&.prof_prefix -option\&. +and +prof\&.prefix +options\&. .RE .PP prof\&.reset (\fBsize_t\fR) \-w [\fB\-\-enable\-prof\fR] @@ -1752,6 +1877,18 @@ for details)\&. Retained memory is excluded from mapped memory statistics, e\&.g stats\&.mapped\&. .RE .PP +stats\&.zero_reallocs (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] +.RS 4 +Number of times that the +realloc() +was called with a non\-\fBNULL\fR +pointer argument and a +\fB0\fR +size argument\&. This is a fundamentally unsafe pattern in portable programs; see +opt\&.zero_realloc +for details\&. +.RE +.PP stats\&.background_thread\&.num_threads (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Number of @@ -1825,6 +1962,26 @@ is one of the counters in mutex profiling counters\&. .RE .PP +stats\&.mutexes\&.prof_thds_data\&.{counter} (\fBcounter specific type\fR) r\- [\fB\-\-enable\-stats\fR] +.RS 4 +Statistics on +\fIprof\fR +threads data mutex (global scope; profiling related)\&. +{counter} +is one of the counters in +mutex profiling counters\&. +.RE +.PP +stats\&.mutexes\&.prof_dump\&.{counter} (\fBcounter specific type\fR) r\- [\fB\-\-enable\-stats\fR] +.RS 4 +Statistics on +\fIprof\fR +dumping mutex (global scope; profiling related)\&. +{counter} +is one of the counters in +mutex profiling counters\&. +.RE +.PP stats\&.mutexes\&.reset (\fBvoid\fR) \-\- [\fB\-\-enable\-stats\fR] .RS 4 Reset all mutex profile statistics, including global mutexes, arena mutexes and bin mutexes\&. @@ -2242,7 +2399,7 @@ heap_v2/524288 [\&.\&.\&.] @ 0x5f86da8 0x5f5a1dc [\&.\&.\&.] 0x29e4d4e 0xa200316 0xabb2988 [\&.\&.\&.] t*: 13: 6688 [0: 0] - t3: 12: 6496 [0: ] + t3: 12: 6496 [0: 0] t99: 1: 192 [0: 0] [\&.\&.\&.] @@ -2264,9 +2421,9 @@ to indicate descriptions of the corresponding fields\&. / : : [: ] [\&.\&.\&.] - : : [: ] + : : [: ] [\&.\&.\&.] - : : [: ] + : : [: ] [\&.\&.\&.] @ [\&.\&.\&.] [\&.\&.\&.] : : [: ] @@ -2432,7 +2589,8 @@ is not \fInewlen\fR is too large or too small\&. Alternatively, \fI*oldlenp\fR -is too large or too small; in this case as much data as possible are read despite the error\&. +is too large or too small; when it happens, except for a very few cases explicitly documented otherwise, as much data as possible are read despite the error, with the amount of data read being recorded in +\fI*oldlenp\fR\&. .RE .PP ENOENT diff --git a/contrib/jemalloc/include/jemalloc/internal/activity_callback.h b/contrib/jemalloc/include/jemalloc/internal/activity_callback.h new file mode 100644 index 00000000000000..6c2e84e3180dd6 --- /dev/null +++ b/contrib/jemalloc/include/jemalloc/internal/activity_callback.h @@ -0,0 +1,23 @@ +#ifndef JEMALLOC_INTERNAL_ACTIVITY_CALLBACK_H +#define JEMALLOC_INTERNAL_ACTIVITY_CALLBACK_H + +/* + * The callback to be executed "periodically", in response to some amount of + * allocator activity. + * + * This callback need not be computing any sort of peak (although that's the + * intended first use case), but we drive it from the peak counter, so it's + * keeps things tidy to keep it here. + * + * The calls to this thunk get driven by the peak_event module. + */ +#define ACTIVITY_CALLBACK_THUNK_INITIALIZER {NULL, NULL} +typedef void (*activity_callback_t)(void *uctx, uint64_t allocated, + uint64_t deallocated); +typedef struct activity_callback_thunk_s activity_callback_thunk_t; +struct activity_callback_thunk_s { + activity_callback_t callback; + void *uctx; +}; + +#endif /* JEMALLOC_INTERNAL_ACTIVITY_CALLBACK_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/arena_externs.h b/contrib/jemalloc/include/jemalloc/internal/arena_externs.h index a4523ae0c49458..e6fceaafea5022 100644 --- a/contrib/jemalloc/include/jemalloc/internal/arena_externs.h +++ b/contrib/jemalloc/include/jemalloc/internal/arena_externs.h @@ -2,59 +2,67 @@ #define JEMALLOC_INTERNAL_ARENA_EXTERNS_H #include "jemalloc/internal/bin.h" +#include "jemalloc/internal/div.h" #include "jemalloc/internal/extent_dss.h" #include "jemalloc/internal/hook.h" #include "jemalloc/internal/pages.h" #include "jemalloc/internal/stats.h" +/* + * When the amount of pages to be purged exceeds this amount, deferred purge + * should happen. + */ +#define ARENA_DEFERRED_PURGE_NPAGES_THRESHOLD UINT64_C(1024) + extern ssize_t opt_dirty_decay_ms; extern ssize_t opt_muzzy_decay_ms; extern percpu_arena_mode_t opt_percpu_arena; extern const char *percpu_arena_mode_names[]; -extern const uint64_t h_steps[SMOOTHSTEP_NSTEPS]; +extern div_info_t arena_binind_div_info[SC_NBINS]; + extern malloc_mutex_t arenas_lock; +extern emap_t arena_emap_global; extern size_t opt_oversize_threshold; extern size_t oversize_threshold; +/* + * arena_bin_offsets[binind] is the offset of the first bin shard for size class + * binind. + */ +extern uint32_t arena_bin_offsets[SC_NBINS]; + void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms, size_t *nactive, size_t *ndirty, size_t *nmuzzy); void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms, size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats, - bin_stats_t *bstats, arena_stats_large_t *lstats, - arena_stats_extents_t *estats); -void arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, extent_t *extent); -#ifdef JEMALLOC_JET -size_t arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr); -#endif -extent_t *arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, - size_t usize, size_t alignment, bool *zero); + bin_stats_data_t *bstats, arena_stats_large_t *lstats, + pac_estats_t *estats, hpa_shard_stats_t *hpastats, sec_stats_t *secstats); +void arena_handle_deferred_work(tsdn_t *tsdn, arena_t *arena); +edata_t *arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, + size_t usize, size_t alignment, bool zero); void arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, - extent_t *extent); + edata_t *edata); void arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, - extent_t *extent, size_t oldsize); + edata_t *edata, size_t oldsize); void arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, - extent_t *extent, size_t oldsize); -ssize_t arena_dirty_decay_ms_get(arena_t *arena); -bool arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_ms); -ssize_t arena_muzzy_decay_ms_get(arena_t *arena); -bool arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_ms); + edata_t *edata, size_t oldsize); +bool arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, extent_state_t state, + ssize_t decay_ms); +ssize_t arena_decay_ms_get(arena_t *arena, extent_state_t state); void arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all); +uint64_t arena_time_until_deferred(tsdn_t *tsdn, arena_t *arena); +void arena_do_deferred_work(tsdn_t *tsdn, arena_t *arena); void arena_reset(tsd_t *tsd, arena_t *arena); void arena_destroy(tsd_t *tsd, arena_t *arena); -void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, - cache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes); -void arena_alloc_junk_small(void *ptr, const bin_info_t *bin_info, - bool zero); - -typedef void (arena_dalloc_junk_small_t)(void *, const bin_info_t *); -extern arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small; +void arena_cache_bin_fill_small(tsdn_t *tsdn, arena_t *arena, + cache_bin_t *cache_bin, cache_bin_info_t *cache_bin_info, szind_t binind, + const unsigned nfill); void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero); @@ -63,8 +71,12 @@ void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, void arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize); void arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path); -void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, bin_t *bin, - szind_t binind, extent_t *extent, void *ptr); +void arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, edata_t *slab); + +void arena_dalloc_bin_locked_handle_newly_empty(tsdn_t *tsdn, arena_t *arena, + edata_t *slab, bin_t *bin); +void arena_dalloc_bin_locked_handle_newly_nonempty(tsdn_t *tsdn, arena_t *arena, + edata_t *slab, bin_t *bin); void arena_dalloc_small(tsdn_t *tsdn, void *ptr); bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra, bool zero, size_t *newsize); @@ -72,6 +84,9 @@ void *arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize, size_t size, size_t alignment, bool zero, tcache_t *tcache, hook_ralloc_args_t *hook_args); dss_prec_t arena_dss_prec_get(arena_t *arena); +ehooks_t *arena_get_ehooks(arena_t *arena); +extent_hooks_t *arena_set_extent_hooks(tsd_t *tsd, arena_t *arena, + extent_hooks_t *extent_hooks); bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec); ssize_t arena_dirty_decay_ms_default_get(void); bool arena_dirty_decay_ms_default_set(ssize_t decay_ms); @@ -82,14 +97,15 @@ bool arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena, unsigned arena_nthreads_get(arena_t *arena, bool internal); void arena_nthreads_inc(arena_t *arena, bool internal); void arena_nthreads_dec(arena_t *arena, bool internal); -size_t arena_extent_sn_next(arena_t *arena); -arena_t *arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks); +arena_t *arena_new(tsdn_t *tsdn, unsigned ind, const arena_config_t *config); bool arena_init_huge(void); bool arena_is_huge(unsigned arena_ind); arena_t *arena_choose_huge(tsd_t *tsd); -bin_t *arena_bin_choose_lock(tsdn_t *tsdn, arena_t *arena, szind_t binind, +bin_t *arena_bin_choose(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned *binshard); -void arena_boot(sc_data_t *sc_data); +size_t arena_fill_small_fresh(tsdn_t *tsdn, arena_t *arena, szind_t binind, + void **ptrs, size_t nfill, bool zero); +bool arena_boot(sc_data_t *sc_data, base_t *base, bool hpa); void arena_prefork0(tsdn_t *tsdn, arena_t *arena); void arena_prefork1(tsdn_t *tsdn, arena_t *arena); void arena_prefork2(tsdn_t *tsdn, arena_t *arena); @@ -98,6 +114,7 @@ void arena_prefork4(tsdn_t *tsdn, arena_t *arena); void arena_prefork5(tsdn_t *tsdn, arena_t *arena); void arena_prefork6(tsdn_t *tsdn, arena_t *arena); void arena_prefork7(tsdn_t *tsdn, arena_t *arena); +void arena_prefork8(tsdn_t *tsdn, arena_t *arena); void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena); void arena_postfork_child(tsdn_t *tsdn, arena_t *arena); diff --git a/contrib/jemalloc/include/jemalloc/internal/arena_inlines_a.h b/contrib/jemalloc/include/jemalloc/internal/arena_inlines_a.h index 9abf7f6ac702cf..8568358c7fb635 100644 --- a/contrib/jemalloc/include/jemalloc/internal/arena_inlines_a.h +++ b/contrib/jemalloc/include/jemalloc/internal/arena_inlines_a.h @@ -3,7 +3,7 @@ static inline unsigned arena_ind_get(const arena_t *arena) { - return base_ind_get(arena->base); + return arena->ind; } static inline void @@ -21,37 +21,4 @@ arena_internal_get(arena_t *arena) { return atomic_load_zu(&arena->stats.internal, ATOMIC_RELAXED); } -static inline bool -arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes) { - cassert(config_prof); - - if (likely(prof_interval == 0 || !prof_active_get_unlocked())) { - return false; - } - - return prof_accum_add(tsdn, &arena->prof_accum, accumbytes); -} - -static inline void -percpu_arena_update(tsd_t *tsd, unsigned cpu) { - assert(have_percpu_arena); - arena_t *oldarena = tsd_arena_get(tsd); - assert(oldarena != NULL); - unsigned oldind = arena_ind_get(oldarena); - - if (oldind != cpu) { - unsigned newind = cpu; - arena_t *newarena = arena_get(tsd_tsdn(tsd), newind, true); - assert(newarena != NULL); - - /* Set new arena/tcache associations. */ - arena_migrate(tsd, oldind, newind); - tcache_t *tcache = tcache_get(tsd); - if (tcache != NULL) { - tcache_arena_reassociate(tsd_tsdn(tsd), tcache, - newarena); - } - } -} - #endif /* JEMALLOC_INTERNAL_ARENA_INLINES_A_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/arena_inlines_b.h b/contrib/jemalloc/include/jemalloc/internal/arena_inlines_b.h index dd926575fc8a4b..fa81537c469dbe 100644 --- a/contrib/jemalloc/include/jemalloc/internal/arena_inlines_b.h +++ b/contrib/jemalloc/include/jemalloc/internal/arena_inlines_b.h @@ -1,16 +1,20 @@ #ifndef JEMALLOC_INTERNAL_ARENA_INLINES_B_H #define JEMALLOC_INTERNAL_ARENA_INLINES_B_H +#include "jemalloc/internal/div.h" +#include "jemalloc/internal/emap.h" #include "jemalloc/internal/jemalloc_internal_types.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/rtree.h" +#include "jemalloc/internal/safety_check.h" #include "jemalloc/internal/sc.h" #include "jemalloc/internal/sz.h" #include "jemalloc/internal/ticker.h" -JEMALLOC_ALWAYS_INLINE bool -arena_has_default_hooks(arena_t *arena) { - return (extent_hooks_get(arena) == &extent_hooks_default); +static inline arena_t * +arena_get_from_edata(edata_t *edata) { + return (arena_t *)atomic_load_p(&arenas[edata_arena_ind_get(edata)], + ATOMIC_RELAXED); } JEMALLOC_ALWAYS_INLINE arena_t * @@ -34,127 +38,109 @@ arena_choose_maybe_huge(tsd_t *tsd, arena_t *arena, size_t size) { return arena_choose(tsd, NULL); } -JEMALLOC_ALWAYS_INLINE prof_tctx_t * -arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) { +JEMALLOC_ALWAYS_INLINE void +arena_prof_info_get(tsd_t *tsd, const void *ptr, emap_alloc_ctx_t *alloc_ctx, + prof_info_t *prof_info, bool reset_recent) { cassert(config_prof); assert(ptr != NULL); + assert(prof_info != NULL); + + edata_t *edata = NULL; + bool is_slab; /* Static check. */ if (alloc_ctx == NULL) { - const extent_t *extent = iealloc(tsdn, ptr); - if (unlikely(!extent_slab_get(extent))) { - return large_prof_tctx_get(tsdn, extent); - } + edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global, + ptr); + is_slab = edata_slab_get(edata); + } else if (unlikely(!(is_slab = alloc_ctx->slab))) { + edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global, + ptr); + } + + if (unlikely(!is_slab)) { + /* edata must have been initialized at this point. */ + assert(edata != NULL); + large_prof_info_get(tsd, edata, prof_info, reset_recent); } else { - if (unlikely(!alloc_ctx->slab)) { - return large_prof_tctx_get(tsdn, iealloc(tsdn, ptr)); - } + prof_info->alloc_tctx = (prof_tctx_t *)(uintptr_t)1U; + /* + * No need to set other fields in prof_info; they will never be + * accessed if (uintptr_t)alloc_tctx == (uintptr_t)1U. + */ } - return (prof_tctx_t *)(uintptr_t)1U; } JEMALLOC_ALWAYS_INLINE void -arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, - alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) { +arena_prof_tctx_reset(tsd_t *tsd, const void *ptr, + emap_alloc_ctx_t *alloc_ctx) { cassert(config_prof); assert(ptr != NULL); /* Static check. */ if (alloc_ctx == NULL) { - extent_t *extent = iealloc(tsdn, ptr); - if (unlikely(!extent_slab_get(extent))) { - large_prof_tctx_set(tsdn, extent, tctx); + edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd), + &arena_emap_global, ptr); + if (unlikely(!edata_slab_get(edata))) { + large_prof_tctx_reset(edata); } } else { if (unlikely(!alloc_ctx->slab)) { - large_prof_tctx_set(tsdn, iealloc(tsdn, ptr), tctx); + edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd), + &arena_emap_global, ptr); + large_prof_tctx_reset(edata); } } } -static inline void -arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) { +JEMALLOC_ALWAYS_INLINE void +arena_prof_tctx_reset_sampled(tsd_t *tsd, const void *ptr) { cassert(config_prof); assert(ptr != NULL); - extent_t *extent = iealloc(tsdn, ptr); - assert(!extent_slab_get(extent)); + edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global, + ptr); + assert(!edata_slab_get(edata)); - large_prof_tctx_reset(tsdn, extent); -} - -JEMALLOC_ALWAYS_INLINE nstime_t -arena_prof_alloc_time_get(tsdn_t *tsdn, const void *ptr, - alloc_ctx_t *alloc_ctx) { - cassert(config_prof); - assert(ptr != NULL); - - extent_t *extent = iealloc(tsdn, ptr); - /* - * Unlike arena_prof_prof_tctx_{get, set}, we only call this once we're - * sure we have a sampled allocation. - */ - assert(!extent_slab_get(extent)); - return large_prof_alloc_time_get(extent); + large_prof_tctx_reset(edata); } JEMALLOC_ALWAYS_INLINE void -arena_prof_alloc_time_set(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx, - nstime_t t) { +arena_prof_info_set(tsd_t *tsd, edata_t *edata, prof_tctx_t *tctx, + size_t size) { cassert(config_prof); - assert(ptr != NULL); - extent_t *extent = iealloc(tsdn, ptr); - assert(!extent_slab_get(extent)); - large_prof_alloc_time_set(extent, t); + assert(!edata_slab_get(edata)); + large_prof_info_set(edata, tctx, size); } JEMALLOC_ALWAYS_INLINE void arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks) { - tsd_t *tsd; - ticker_t *decay_ticker; - if (unlikely(tsdn_null(tsdn))) { return; } - tsd = tsdn_tsd(tsdn); - decay_ticker = decay_ticker_get(tsd, arena_ind_get(arena)); - if (unlikely(decay_ticker == NULL)) { - return; - } - if (unlikely(ticker_ticks(decay_ticker, nticks))) { + tsd_t *tsd = tsdn_tsd(tsdn); + /* + * We use the ticker_geom_t to avoid having per-arena state in the tsd. + * Instead of having a countdown-until-decay timer running for every + * arena in every thread, we flip a coin once per tick, whose + * probability of coming up heads is 1/nticks; this is effectively the + * operation of the ticker_geom_t. Each arena has the same chance of a + * coinflip coming up heads (1/ARENA_DECAY_NTICKS_PER_UPDATE), so we can + * use a single ticker for all of them. + */ + ticker_geom_t *decay_ticker = tsd_arena_decay_tickerp_get(tsd); + uint64_t *prng_state = tsd_prng_statep_get(tsd); + if (unlikely(ticker_geom_ticks(decay_ticker, prng_state, nticks))) { arena_decay(tsdn, arena, false, false); } } JEMALLOC_ALWAYS_INLINE void arena_decay_tick(tsdn_t *tsdn, arena_t *arena) { - malloc_mutex_assert_not_owner(tsdn, &arena->decay_dirty.mtx); - malloc_mutex_assert_not_owner(tsdn, &arena->decay_muzzy.mtx); - arena_decay_ticks(tsdn, arena, 1); } -/* Purge a single extent to retained / unmapped directly. */ -JEMALLOC_ALWAYS_INLINE void -arena_decay_extent(tsdn_t *tsdn,arena_t *arena, extent_hooks_t **r_extent_hooks, - extent_t *extent) { - size_t extent_size = extent_size_get(extent); - extent_dalloc_wrapper(tsdn, arena, - r_extent_hooks, extent); - if (config_stats) { - /* Update stats accordingly. */ - arena_stats_lock(tsdn, &arena->stats); - arena_stats_add_u64(tsdn, &arena->stats, - &arena->decay_dirty.stats->nmadvise, 1); - arena_stats_add_u64(tsdn, &arena->stats, - &arena->decay_dirty.stats->purged, extent_size >> LG_PAGE); - arena_stats_sub_zu(tsdn, &arena->stats, &arena->stats.mapped, - extent_size); - arena_stats_unlock(tsdn, &arena->stats); - } -} - JEMALLOC_ALWAYS_INLINE void * arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero, tcache_t *tcache, bool slow_path) { @@ -178,21 +164,19 @@ arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero, JEMALLOC_ALWAYS_INLINE arena_t * arena_aalloc(tsdn_t *tsdn, const void *ptr) { - return extent_arena_get(iealloc(tsdn, ptr)); + edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr); + unsigned arena_ind = edata_arena_ind_get(edata); + return (arena_t *)atomic_load_p(&arenas[arena_ind], ATOMIC_RELAXED); } JEMALLOC_ALWAYS_INLINE size_t arena_salloc(tsdn_t *tsdn, const void *ptr) { assert(ptr != NULL); + emap_alloc_ctx_t alloc_ctx; + emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr, &alloc_ctx); + assert(alloc_ctx.szind != SC_NSIZES); - rtree_ctx_t rtree_ctx_fallback; - rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); - - szind_t szind = rtree_szind_read(tsdn, &extents_rtree, rtree_ctx, - (uintptr_t)ptr, true); - assert(szind != SC_NSIZES); - - return sz_index2size(szind); + return sz_index2size(alloc_ctx.szind); } JEMALLOC_ALWAYS_INLINE size_t @@ -206,26 +190,53 @@ arena_vsalloc(tsdn_t *tsdn, const void *ptr) { * failure. */ - rtree_ctx_t rtree_ctx_fallback; - rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); - - extent_t *extent; - szind_t szind; - if (rtree_extent_szind_read(tsdn, &extents_rtree, rtree_ctx, - (uintptr_t)ptr, false, &extent, &szind)) { + emap_full_alloc_ctx_t full_alloc_ctx; + bool missing = emap_full_alloc_ctx_try_lookup(tsdn, &arena_emap_global, + ptr, &full_alloc_ctx); + if (missing) { return 0; } - if (extent == NULL) { + if (full_alloc_ctx.edata == NULL) { return 0; } - assert(extent_state_get(extent) == extent_state_active); + assert(edata_state_get(full_alloc_ctx.edata) == extent_state_active); /* Only slab members should be looked up via interior pointers. */ - assert(extent_addr_get(extent) == ptr || extent_slab_get(extent)); + assert(edata_addr_get(full_alloc_ctx.edata) == ptr + || edata_slab_get(full_alloc_ctx.edata)); + + assert(full_alloc_ctx.szind != SC_NSIZES); + + return sz_index2size(full_alloc_ctx.szind); +} - assert(szind != SC_NSIZES); +JEMALLOC_ALWAYS_INLINE bool +large_dalloc_safety_checks(edata_t *edata, void *ptr, szind_t szind) { + if (!config_opt_safety_checks) { + return false; + } + + /* + * Eagerly detect double free and sized dealloc bugs for large sizes. + * The cost is low enough (as edata will be accessed anyway) to be + * enabled all the time. + */ + if (unlikely(edata == NULL || + edata_state_get(edata) != extent_state_active)) { + safety_check_fail("Invalid deallocation detected: " + "pages being freed (%p) not currently active, " + "possibly caused by double free bugs.", + (uintptr_t)edata_addr_get(edata)); + return true; + } + size_t input_size = sz_index2size(szind); + if (unlikely(input_size != edata_usize_get(edata))) { + safety_check_fail_sized_dealloc(/* current_dealloc */ true, ptr, + /* true_size */ edata_usize_get(edata), input_size); + return true; + } - return sz_index2size(szind); + return false; } static inline void @@ -233,8 +244,13 @@ arena_dalloc_large_no_tcache(tsdn_t *tsdn, void *ptr, szind_t szind) { if (config_prof && unlikely(szind < SC_NBINS)) { arena_dalloc_promoted(tsdn, ptr, NULL, true); } else { - extent_t *extent = iealloc(tsdn, ptr); - large_dalloc(tsdn, extent); + edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, + ptr); + if (large_dalloc_safety_checks(edata, ptr, szind)) { + /* See the comment in isfree. */ + return; + } + large_dalloc(tsdn, edata); } } @@ -242,27 +258,22 @@ static inline void arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr) { assert(ptr != NULL); - rtree_ctx_t rtree_ctx_fallback; - rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); - - szind_t szind; - bool slab; - rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, - true, &szind, &slab); + emap_alloc_ctx_t alloc_ctx; + emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr, &alloc_ctx); if (config_debug) { - extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, - rtree_ctx, (uintptr_t)ptr, true); - assert(szind == extent_szind_get(extent)); - assert(szind < SC_NSIZES); - assert(slab == extent_slab_get(extent)); + edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, + ptr); + assert(alloc_ctx.szind == edata_szind_get(edata)); + assert(alloc_ctx.szind < SC_NSIZES); + assert(alloc_ctx.slab == edata_slab_get(edata)); } - if (likely(slab)) { + if (likely(alloc_ctx.slab)) { /* Small allocation. */ arena_dalloc_small(tsdn, ptr); } else { - arena_dalloc_large_no_tcache(tsdn, ptr, szind); + arena_dalloc_large_no_tcache(tsdn, ptr, alloc_ctx.szind); } } @@ -277,14 +288,19 @@ arena_dalloc_large(tsdn_t *tsdn, void *ptr, tcache_t *tcache, szind_t szind, slow_path); } } else { - extent_t *extent = iealloc(tsdn, ptr); - large_dalloc(tsdn, extent); + edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, + ptr); + if (large_dalloc_safety_checks(edata, ptr, szind)) { + /* See the comment in isfree. */ + return; + } + large_dalloc(tsdn, edata); } } JEMALLOC_ALWAYS_INLINE void arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, - alloc_ctx_t *alloc_ctx, bool slow_path) { + emap_alloc_ctx_t *caller_alloc_ctx, bool slow_path) { assert(!tsdn_null(tsdn) || tcache == NULL); assert(ptr != NULL); @@ -293,34 +309,30 @@ arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, return; } - szind_t szind; - bool slab; - rtree_ctx_t *rtree_ctx; - if (alloc_ctx != NULL) { - szind = alloc_ctx->szind; - slab = alloc_ctx->slab; - assert(szind != SC_NSIZES); + emap_alloc_ctx_t alloc_ctx; + if (caller_alloc_ctx != NULL) { + alloc_ctx = *caller_alloc_ctx; } else { - rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn)); - rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, - (uintptr_t)ptr, true, &szind, &slab); + util_assume(!tsdn_null(tsdn)); + emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr, + &alloc_ctx); } if (config_debug) { - rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn)); - extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, - rtree_ctx, (uintptr_t)ptr, true); - assert(szind == extent_szind_get(extent)); - assert(szind < SC_NSIZES); - assert(slab == extent_slab_get(extent)); + edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, + ptr); + assert(alloc_ctx.szind == edata_szind_get(edata)); + assert(alloc_ctx.szind < SC_NSIZES); + assert(alloc_ctx.slab == edata_slab_get(edata)); } - if (likely(slab)) { + if (likely(alloc_ctx.slab)) { /* Small allocation. */ - tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind, - slow_path); + tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, + alloc_ctx.szind, slow_path); } else { - arena_dalloc_large(tsdn, ptr, tcache, szind, slow_path); + arena_dalloc_large(tsdn, ptr, tcache, alloc_ctx.szind, + slow_path); } } @@ -329,47 +341,43 @@ arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) { assert(ptr != NULL); assert(size <= SC_LARGE_MAXCLASS); - szind_t szind; - bool slab; + emap_alloc_ctx_t alloc_ctx; if (!config_prof || !opt_prof) { /* * There is no risk of being confused by a promoted sampled * object, so base szind and slab on the given size. */ - szind = sz_size2index(size); - slab = (szind < SC_NBINS); + alloc_ctx.szind = sz_size2index(size); + alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS); } if ((config_prof && opt_prof) || config_debug) { - rtree_ctx_t rtree_ctx_fallback; - rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, - &rtree_ctx_fallback); - - rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, - (uintptr_t)ptr, true, &szind, &slab); + emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr, + &alloc_ctx); - assert(szind == sz_size2index(size)); - assert((config_prof && opt_prof) || slab == (szind < SC_NBINS)); + assert(alloc_ctx.szind == sz_size2index(size)); + assert((config_prof && opt_prof) + || alloc_ctx.slab == (alloc_ctx.szind < SC_NBINS)); if (config_debug) { - extent_t *extent = rtree_extent_read(tsdn, - &extents_rtree, rtree_ctx, (uintptr_t)ptr, true); - assert(szind == extent_szind_get(extent)); - assert(slab == extent_slab_get(extent)); + edata_t *edata = emap_edata_lookup(tsdn, + &arena_emap_global, ptr); + assert(alloc_ctx.szind == edata_szind_get(edata)); + assert(alloc_ctx.slab == edata_slab_get(edata)); } } - if (likely(slab)) { + if (likely(alloc_ctx.slab)) { /* Small allocation. */ arena_dalloc_small(tsdn, ptr); } else { - arena_dalloc_large_no_tcache(tsdn, ptr, szind); + arena_dalloc_large_no_tcache(tsdn, ptr, alloc_ctx.szind); } } JEMALLOC_ALWAYS_INLINE void arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, - alloc_ctx_t *alloc_ctx, bool slow_path) { + emap_alloc_ctx_t *caller_alloc_ctx, bool slow_path) { assert(!tsdn_null(tsdn) || tcache == NULL); assert(ptr != NULL); assert(size <= SC_LARGE_MAXCLASS); @@ -379,49 +387,164 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, return; } - szind_t szind; - bool slab; - alloc_ctx_t local_ctx; + emap_alloc_ctx_t alloc_ctx; if (config_prof && opt_prof) { - if (alloc_ctx == NULL) { + if (caller_alloc_ctx == NULL) { /* Uncommon case and should be a static check. */ - rtree_ctx_t rtree_ctx_fallback; - rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, - &rtree_ctx_fallback); - rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, - (uintptr_t)ptr, true, &local_ctx.szind, - &local_ctx.slab); - assert(local_ctx.szind == sz_size2index(size)); - alloc_ctx = &local_ctx; + emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr, + &alloc_ctx); + assert(alloc_ctx.szind == sz_size2index(size)); + } else { + alloc_ctx = *caller_alloc_ctx; } - slab = alloc_ctx->slab; - szind = alloc_ctx->szind; } else { /* * There is no risk of being confused by a promoted sampled * object, so base szind and slab on the given size. */ - szind = sz_size2index(size); - slab = (szind < SC_NBINS); + alloc_ctx.szind = sz_size2index(size); + alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS); } if (config_debug) { - rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn)); - rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, - (uintptr_t)ptr, true, &szind, &slab); - extent_t *extent = rtree_extent_read(tsdn, - &extents_rtree, rtree_ctx, (uintptr_t)ptr, true); - assert(szind == extent_szind_get(extent)); - assert(slab == extent_slab_get(extent)); + edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, + ptr); + assert(alloc_ctx.szind == edata_szind_get(edata)); + assert(alloc_ctx.slab == edata_slab_get(edata)); } - if (likely(slab)) { + if (likely(alloc_ctx.slab)) { /* Small allocation. */ - tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind, - slow_path); + tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, + alloc_ctx.szind, slow_path); } else { - arena_dalloc_large(tsdn, ptr, tcache, szind, slow_path); + arena_dalloc_large(tsdn, ptr, tcache, alloc_ctx.szind, + slow_path); + } +} + +static inline void +arena_cache_oblivious_randomize(tsdn_t *tsdn, arena_t *arena, edata_t *edata, + size_t alignment) { + assert(edata_base_get(edata) == edata_addr_get(edata)); + + if (alignment < PAGE) { + unsigned lg_range = LG_PAGE - + lg_floor(CACHELINE_CEILING(alignment)); + size_t r; + if (!tsdn_null(tsdn)) { + tsd_t *tsd = tsdn_tsd(tsdn); + r = (size_t)prng_lg_range_u64( + tsd_prng_statep_get(tsd), lg_range); + } else { + uint64_t stack_value = (uint64_t)(uintptr_t)&r; + r = (size_t)prng_lg_range_u64(&stack_value, lg_range); + } + uintptr_t random_offset = ((uintptr_t)r) << (LG_PAGE - + lg_range); + edata->e_addr = (void *)((uintptr_t)edata->e_addr + + random_offset); + assert(ALIGNMENT_ADDR2BASE(edata->e_addr, alignment) == + edata->e_addr); + } +} + +/* + * The dalloc bin info contains just the information that the common paths need + * during tcache flushes. By force-inlining these paths, and using local copies + * of data (so that the compiler knows it's constant), we avoid a whole bunch of + * redundant loads and stores by leaving this information in registers. + */ +typedef struct arena_dalloc_bin_locked_info_s arena_dalloc_bin_locked_info_t; +struct arena_dalloc_bin_locked_info_s { + div_info_t div_info; + uint32_t nregs; + uint64_t ndalloc; +}; + +JEMALLOC_ALWAYS_INLINE size_t +arena_slab_regind(arena_dalloc_bin_locked_info_t *info, szind_t binind, + edata_t *slab, const void *ptr) { + size_t diff, regind; + + /* Freeing a pointer outside the slab can cause assertion failure. */ + assert((uintptr_t)ptr >= (uintptr_t)edata_addr_get(slab)); + assert((uintptr_t)ptr < (uintptr_t)edata_past_get(slab)); + /* Freeing an interior pointer can cause assertion failure. */ + assert(((uintptr_t)ptr - (uintptr_t)edata_addr_get(slab)) % + (uintptr_t)bin_infos[binind].reg_size == 0); + + diff = (size_t)((uintptr_t)ptr - (uintptr_t)edata_addr_get(slab)); + + /* Avoid doing division with a variable divisor. */ + regind = div_compute(&info->div_info, diff); + + assert(regind < bin_infos[binind].nregs); + + return regind; +} + +JEMALLOC_ALWAYS_INLINE void +arena_dalloc_bin_locked_begin(arena_dalloc_bin_locked_info_t *info, + szind_t binind) { + info->div_info = arena_binind_div_info[binind]; + info->nregs = bin_infos[binind].nregs; + info->ndalloc = 0; +} + +/* + * Does the deallocation work associated with freeing a single pointer (a + * "step") in between a arena_dalloc_bin_locked begin and end call. + * + * Returns true if arena_slab_dalloc must be called on slab. Doesn't do + * stats updates, which happen during finish (this lets running counts get left + * in a register). + */ +JEMALLOC_ALWAYS_INLINE bool +arena_dalloc_bin_locked_step(tsdn_t *tsdn, arena_t *arena, bin_t *bin, + arena_dalloc_bin_locked_info_t *info, szind_t binind, edata_t *slab, + void *ptr) { + const bin_info_t *bin_info = &bin_infos[binind]; + size_t regind = arena_slab_regind(info, binind, slab, ptr); + slab_data_t *slab_data = edata_slab_data_get(slab); + + assert(edata_nfree_get(slab) < bin_info->nregs); + /* Freeing an unallocated pointer can cause assertion failure. */ + assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind)); + + bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind); + edata_nfree_inc(slab); + + if (config_stats) { + info->ndalloc++; + } + + unsigned nfree = edata_nfree_get(slab); + if (nfree == bin_info->nregs) { + arena_dalloc_bin_locked_handle_newly_empty(tsdn, arena, slab, + bin); + return true; + } else if (nfree == 1 && slab != bin->slabcur) { + arena_dalloc_bin_locked_handle_newly_nonempty(tsdn, arena, slab, + bin); } + return false; +} + +JEMALLOC_ALWAYS_INLINE void +arena_dalloc_bin_locked_finish(tsdn_t *tsdn, arena_t *arena, bin_t *bin, + arena_dalloc_bin_locked_info_t *info) { + if (config_stats) { + bin->stats.ndalloc += info->ndalloc; + assert(bin->stats.curregs >= (size_t)info->ndalloc); + bin->stats.curregs -= (size_t)info->ndalloc; + } +} + +static inline bin_t * +arena_get_bin(arena_t *arena, szind_t binind, unsigned binshard) { + bin_t *shard0 = (bin_t *)((uintptr_t)arena + arena_bin_offsets[binind]); + return shard0 + binshard; } #endif /* JEMALLOC_INTERNAL_ARENA_INLINES_B_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/arena_stats.h b/contrib/jemalloc/include/jemalloc/internal/arena_stats.h index 23949ed9261657..15f1d345f9e8c7 100644 --- a/contrib/jemalloc/include/jemalloc/internal/arena_stats.h +++ b/contrib/jemalloc/include/jemalloc/internal/arena_stats.h @@ -2,77 +2,41 @@ #define JEMALLOC_INTERNAL_ARENA_STATS_H #include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/lockedint.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/mutex_prof.h" +#include "jemalloc/internal/pa.h" #include "jemalloc/internal/sc.h" JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS -/* - * In those architectures that support 64-bit atomics, we use atomic updates for - * our 64-bit values. Otherwise, we use a plain uint64_t and synchronize - * externally. - */ -#ifdef JEMALLOC_ATOMIC_U64 -typedef atomic_u64_t arena_stats_u64_t; -#else -/* Must hold the arena stats mutex while reading atomically. */ -typedef uint64_t arena_stats_u64_t; -#endif - typedef struct arena_stats_large_s arena_stats_large_t; struct arena_stats_large_s { /* * Total number of allocation/deallocation requests served directly by * the arena. */ - arena_stats_u64_t nmalloc; - arena_stats_u64_t ndalloc; + locked_u64_t nmalloc; + locked_u64_t ndalloc; /* * Number of allocation requests that correspond to this size class. * This includes requests served by tcache, though tcache only * periodically merges into this counter. */ - arena_stats_u64_t nrequests; /* Partially derived. */ + locked_u64_t nrequests; /* Partially derived. */ /* * Number of tcache fills / flushes for large (similarly, periodically * merged). Note that there is no large tcache batch-fill currently * (i.e. only fill 1 at a time); however flush may be batched. */ - arena_stats_u64_t nfills; /* Partially derived. */ - arena_stats_u64_t nflushes; /* Partially derived. */ + locked_u64_t nfills; /* Partially derived. */ + locked_u64_t nflushes; /* Partially derived. */ /* Current number of allocations of this size class. */ size_t curlextents; /* Derived. */ }; -typedef struct arena_stats_decay_s arena_stats_decay_t; -struct arena_stats_decay_s { - /* Total number of purge sweeps. */ - arena_stats_u64_t npurge; - /* Total number of madvise calls made. */ - arena_stats_u64_t nmadvise; - /* Total number of pages purged. */ - arena_stats_u64_t purged; -}; - -typedef struct arena_stats_extents_s arena_stats_extents_t; -struct arena_stats_extents_s { - /* - * Stats for a given index in the range [0, SC_NPSIZES] in an extents_t. - * We track both bytes and # of extents: two extents in the same bucket - * may have different sizes if adjacent size classes differ by more than - * a page, so bytes cannot always be derived from # of extents. - */ - atomic_zu_t ndirty; - atomic_zu_t dirty_bytes; - atomic_zu_t nmuzzy; - atomic_zu_t muzzy_bytes; - atomic_zu_t nretained; - atomic_zu_t retained_bytes; -}; - /* * Arena stats. Note that fields marked "derived" are not directly maintained * within the arena code; rather their values are derived during stats merge @@ -80,43 +44,36 @@ struct arena_stats_extents_s { */ typedef struct arena_stats_s arena_stats_t; struct arena_stats_s { -#ifndef JEMALLOC_ATOMIC_U64 - malloc_mutex_t mtx; -#endif - - /* Number of bytes currently mapped, excluding retained memory. */ - atomic_zu_t mapped; /* Partially derived. */ + LOCKEDINT_MTX_DECLARE(mtx) /* - * Number of unused virtual memory bytes currently retained. Retained - * bytes are technically mapped (though always decommitted or purged), - * but they are excluded from the mapped statistic (above). + * resident includes the base stats -- that's why it lives here and not + * in pa_shard_stats_t. */ - atomic_zu_t retained; /* Derived. */ - - /* Number of extent_t structs allocated by base, but not being used. */ - atomic_zu_t extent_avail; - - arena_stats_decay_t decay_dirty; - arena_stats_decay_t decay_muzzy; + size_t base; /* Derived. */ + size_t resident; /* Derived. */ + size_t metadata_thp; /* Derived. */ + size_t mapped; /* Derived. */ - atomic_zu_t base; /* Derived. */ atomic_zu_t internal; - atomic_zu_t resident; /* Derived. */ - atomic_zu_t metadata_thp; - atomic_zu_t allocated_large; /* Derived. */ - arena_stats_u64_t nmalloc_large; /* Derived. */ - arena_stats_u64_t ndalloc_large; /* Derived. */ - arena_stats_u64_t nfills_large; /* Derived. */ - arena_stats_u64_t nflushes_large; /* Derived. */ - arena_stats_u64_t nrequests_large; /* Derived. */ + size_t allocated_large; /* Derived. */ + uint64_t nmalloc_large; /* Derived. */ + uint64_t ndalloc_large; /* Derived. */ + uint64_t nfills_large; /* Derived. */ + uint64_t nflushes_large; /* Derived. */ + uint64_t nrequests_large; /* Derived. */ - /* VM space had to be leaked (undocumented). Normally 0. */ - atomic_zu_t abandoned_vm; + /* + * The stats logically owned by the pa_shard in the same arena. This + * lives here only because it's convenient for the purposes of the ctl + * module -- it only knows about the single arena_stats. + */ + pa_shard_stats_t pa_shard_stats; /* Number of bytes cached in tcache associated with this arena. */ - atomic_zu_t tcache_bytes; /* Derived. */ + size_t tcache_bytes; /* Derived. */ + size_t tcache_stashed_bytes; /* Derived. */ mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes]; @@ -134,138 +91,24 @@ arena_stats_init(tsdn_t *tsdn, arena_stats_t *arena_stats) { assert(((char *)arena_stats)[i] == 0); } } -#ifndef JEMALLOC_ATOMIC_U64 - if (malloc_mutex_init(&arena_stats->mtx, "arena_stats", + if (LOCKEDINT_MTX_INIT(arena_stats->mtx, "arena_stats", WITNESS_RANK_ARENA_STATS, malloc_mutex_rank_exclusive)) { return true; } -#endif /* Memory is zeroed, so there is no need to clear stats. */ return false; } -static inline void -arena_stats_lock(tsdn_t *tsdn, arena_stats_t *arena_stats) { -#ifndef JEMALLOC_ATOMIC_U64 - malloc_mutex_lock(tsdn, &arena_stats->mtx); -#endif -} - -static inline void -arena_stats_unlock(tsdn_t *tsdn, arena_stats_t *arena_stats) { -#ifndef JEMALLOC_ATOMIC_U64 - malloc_mutex_unlock(tsdn, &arena_stats->mtx); -#endif -} - -static inline uint64_t -arena_stats_read_u64(tsdn_t *tsdn, arena_stats_t *arena_stats, - arena_stats_u64_t *p) { -#ifdef JEMALLOC_ATOMIC_U64 - return atomic_load_u64(p, ATOMIC_RELAXED); -#else - malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); - return *p; -#endif -} - -static inline void -arena_stats_add_u64(tsdn_t *tsdn, arena_stats_t *arena_stats, - arena_stats_u64_t *p, uint64_t x) { -#ifdef JEMALLOC_ATOMIC_U64 - atomic_fetch_add_u64(p, x, ATOMIC_RELAXED); -#else - malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); - *p += x; -#endif -} - -static inline void -arena_stats_sub_u64(tsdn_t *tsdn, arena_stats_t *arena_stats, - arena_stats_u64_t *p, uint64_t x) { -#ifdef JEMALLOC_ATOMIC_U64 - uint64_t r = atomic_fetch_sub_u64(p, x, ATOMIC_RELAXED); - assert(r - x <= r); -#else - malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); - *p -= x; - assert(*p + x >= *p); -#endif -} - -/* - * Non-atomically sets *dst += src. *dst needs external synchronization. - * This lets us avoid the cost of a fetch_add when its unnecessary (note that - * the types here are atomic). - */ -static inline void -arena_stats_accum_u64(arena_stats_u64_t *dst, uint64_t src) { -#ifdef JEMALLOC_ATOMIC_U64 - uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED); - atomic_store_u64(dst, src + cur_dst, ATOMIC_RELAXED); -#else - *dst += src; -#endif -} - -static inline size_t -arena_stats_read_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, - atomic_zu_t *p) { -#ifdef JEMALLOC_ATOMIC_U64 - return atomic_load_zu(p, ATOMIC_RELAXED); -#else - malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); - return atomic_load_zu(p, ATOMIC_RELAXED); -#endif -} - -static inline void -arena_stats_add_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, - atomic_zu_t *p, size_t x) { -#ifdef JEMALLOC_ATOMIC_U64 - atomic_fetch_add_zu(p, x, ATOMIC_RELAXED); -#else - malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); - size_t cur = atomic_load_zu(p, ATOMIC_RELAXED); - atomic_store_zu(p, cur + x, ATOMIC_RELAXED); -#endif -} - -static inline void -arena_stats_sub_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, - atomic_zu_t *p, size_t x) { -#ifdef JEMALLOC_ATOMIC_U64 - size_t r = atomic_fetch_sub_zu(p, x, ATOMIC_RELAXED); - assert(r - x <= r); -#else - malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); - size_t cur = atomic_load_zu(p, ATOMIC_RELAXED); - atomic_store_zu(p, cur - x, ATOMIC_RELAXED); -#endif -} - -/* Like the _u64 variant, needs an externally synchronized *dst. */ -static inline void -arena_stats_accum_zu(atomic_zu_t *dst, size_t src) { - size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED); - atomic_store_zu(dst, src + cur_dst, ATOMIC_RELAXED); -} - static inline void arena_stats_large_flush_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats, szind_t szind, uint64_t nrequests) { - arena_stats_lock(tsdn, arena_stats); + LOCKEDINT_MTX_LOCK(tsdn, arena_stats->mtx); arena_stats_large_t *lstats = &arena_stats->lstats[szind - SC_NBINS]; - arena_stats_add_u64(tsdn, arena_stats, &lstats->nrequests, nrequests); - arena_stats_add_u64(tsdn, arena_stats, &lstats->nflushes, 1); - arena_stats_unlock(tsdn, arena_stats); -} - -static inline void -arena_stats_mapped_add(tsdn_t *tsdn, arena_stats_t *arena_stats, size_t size) { - arena_stats_lock(tsdn, arena_stats); - arena_stats_add_zu(tsdn, arena_stats, &arena_stats->mapped, size); - arena_stats_unlock(tsdn, arena_stats); + locked_inc_u64(tsdn, LOCKEDINT_MTX(arena_stats->mtx), + &lstats->nrequests, nrequests); + locked_inc_u64(tsdn, LOCKEDINT_MTX(arena_stats->mtx), + &lstats->nflushes, 1); + LOCKEDINT_MTX_UNLOCK(tsdn, arena_stats->mtx); } #endif /* JEMALLOC_INTERNAL_ARENA_STATS_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/arena_structs.h b/contrib/jemalloc/include/jemalloc/internal/arena_structs.h new file mode 100644 index 00000000000000..e2a5a4087bcc96 --- /dev/null +++ b/contrib/jemalloc/include/jemalloc/internal/arena_structs.h @@ -0,0 +1,101 @@ +#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_H +#define JEMALLOC_INTERNAL_ARENA_STRUCTS_H + +#include "jemalloc/internal/arena_stats.h" +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/bin.h" +#include "jemalloc/internal/bitmap.h" +#include "jemalloc/internal/counter.h" +#include "jemalloc/internal/ecache.h" +#include "jemalloc/internal/edata_cache.h" +#include "jemalloc/internal/extent_dss.h" +#include "jemalloc/internal/jemalloc_internal_types.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/nstime.h" +#include "jemalloc/internal/pa.h" +#include "jemalloc/internal/ql.h" +#include "jemalloc/internal/sc.h" +#include "jemalloc/internal/ticker.h" + +struct arena_s { + /* + * Number of threads currently assigned to this arena. Each thread has + * two distinct assignments, one for application-serving allocation, and + * the other for internal metadata allocation. Internal metadata must + * not be allocated from arenas explicitly created via the arenas.create + * mallctl, because the arena..reset mallctl indiscriminately + * discards all allocations for the affected arena. + * + * 0: Application allocation. + * 1: Internal metadata allocation. + * + * Synchronization: atomic. + */ + atomic_u_t nthreads[2]; + + /* Next bin shard for binding new threads. Synchronization: atomic. */ + atomic_u_t binshard_next; + + /* + * When percpu_arena is enabled, to amortize the cost of reading / + * updating the current CPU id, track the most recent thread accessing + * this arena, and only read CPU if there is a mismatch. + */ + tsdn_t *last_thd; + + /* Synchronization: internal. */ + arena_stats_t stats; + + /* + * Lists of tcaches and cache_bin_array_descriptors for extant threads + * associated with this arena. Stats from these are merged + * incrementally, and at exit if opt_stats_print is enabled. + * + * Synchronization: tcache_ql_mtx. + */ + ql_head(tcache_slow_t) tcache_ql; + ql_head(cache_bin_array_descriptor_t) cache_bin_array_descriptor_ql; + malloc_mutex_t tcache_ql_mtx; + + /* + * Represents a dss_prec_t, but atomically. + * + * Synchronization: atomic. + */ + atomic_u_t dss_prec; + + /* + * Extant large allocations. + * + * Synchronization: large_mtx. + */ + edata_list_active_t large; + /* Synchronizes all large allocation/update/deallocation. */ + malloc_mutex_t large_mtx; + + /* The page-level allocator shard this arena uses. */ + pa_shard_t pa_shard; + + /* + * A cached copy of base->ind. This can get accessed on hot paths; + * looking it up in base requires an extra pointer hop / cache miss. + */ + unsigned ind; + + /* + * Base allocator, from which arena metadata are allocated. + * + * Synchronization: internal. + */ + base_t *base; + /* Used to determine uptime. Read-only after initialization. */ + nstime_t create_time; + + /* + * The arena is allocated alongside its bins; really this is a + * dynamically sized array determined by the binshard settings. + */ + bin_t bins[0]; +}; + +#endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/arena_structs_a.h b/contrib/jemalloc/include/jemalloc/internal/arena_structs_a.h deleted file mode 100644 index 46aa77c884b7e3..00000000000000 --- a/contrib/jemalloc/include/jemalloc/internal/arena_structs_a.h +++ /dev/null @@ -1,11 +0,0 @@ -#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H -#define JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H - -#include "jemalloc/internal/bitmap.h" - -struct arena_slab_data_s { - /* Per region allocated/deallocated bitmap. */ - bitmap_t bitmap[BITMAP_GROUPS_MAX]; -}; - -#endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/arena_structs_b.h b/contrib/jemalloc/include/jemalloc/internal/arena_structs_b.h deleted file mode 100644 index eeab57fd6e5c51..00000000000000 --- a/contrib/jemalloc/include/jemalloc/internal/arena_structs_b.h +++ /dev/null @@ -1,232 +0,0 @@ -#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H -#define JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H - -#include "jemalloc/internal/arena_stats.h" -#include "jemalloc/internal/atomic.h" -#include "jemalloc/internal/bin.h" -#include "jemalloc/internal/bitmap.h" -#include "jemalloc/internal/extent_dss.h" -#include "jemalloc/internal/jemalloc_internal_types.h" -#include "jemalloc/internal/mutex.h" -#include "jemalloc/internal/nstime.h" -#include "jemalloc/internal/ql.h" -#include "jemalloc/internal/sc.h" -#include "jemalloc/internal/smoothstep.h" -#include "jemalloc/internal/ticker.h" - -struct arena_decay_s { - /* Synchronizes all non-atomic fields. */ - malloc_mutex_t mtx; - /* - * True if a thread is currently purging the extents associated with - * this decay structure. - */ - bool purging; - /* - * Approximate time in milliseconds from the creation of a set of unused - * dirty pages until an equivalent set of unused dirty pages is purged - * and/or reused. - */ - atomic_zd_t time_ms; - /* time / SMOOTHSTEP_NSTEPS. */ - nstime_t interval; - /* - * Time at which the current decay interval logically started. We do - * not actually advance to a new epoch until sometime after it starts - * because of scheduling and computation delays, and it is even possible - * to completely skip epochs. In all cases, during epoch advancement we - * merge all relevant activity into the most recently recorded epoch. - */ - nstime_t epoch; - /* Deadline randomness generator. */ - uint64_t jitter_state; - /* - * Deadline for current epoch. This is the sum of interval and per - * epoch jitter which is a uniform random variable in [0..interval). - * Epochs always advance by precise multiples of interval, but we - * randomize the deadline to reduce the likelihood of arenas purging in - * lockstep. - */ - nstime_t deadline; - /* - * Number of unpurged pages at beginning of current epoch. During epoch - * advancement we use the delta between arena->decay_*.nunpurged and - * extents_npages_get(&arena->extents_*) to determine how many dirty - * pages, if any, were generated. - */ - size_t nunpurged; - /* - * Trailing log of how many unused dirty pages were generated during - * each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last - * element is the most recent epoch. Corresponding epoch times are - * relative to epoch. - */ - size_t backlog[SMOOTHSTEP_NSTEPS]; - - /* - * Pointer to associated stats. These stats are embedded directly in - * the arena's stats due to how stats structures are shared between the - * arena and ctl code. - * - * Synchronization: Same as associated arena's stats field. */ - arena_stats_decay_t *stats; - /* Peak number of pages in associated extents. Used for debug only. */ - uint64_t ceil_npages; -}; - -struct arena_s { - /* - * Number of threads currently assigned to this arena. Each thread has - * two distinct assignments, one for application-serving allocation, and - * the other for internal metadata allocation. Internal metadata must - * not be allocated from arenas explicitly created via the arenas.create - * mallctl, because the arena..reset mallctl indiscriminately - * discards all allocations for the affected arena. - * - * 0: Application allocation. - * 1: Internal metadata allocation. - * - * Synchronization: atomic. - */ - atomic_u_t nthreads[2]; - - /* Next bin shard for binding new threads. Synchronization: atomic. */ - atomic_u_t binshard_next; - - /* - * When percpu_arena is enabled, to amortize the cost of reading / - * updating the current CPU id, track the most recent thread accessing - * this arena, and only read CPU if there is a mismatch. - */ - tsdn_t *last_thd; - - /* Synchronization: internal. */ - arena_stats_t stats; - - /* - * Lists of tcaches and cache_bin_array_descriptors for extant threads - * associated with this arena. Stats from these are merged - * incrementally, and at exit if opt_stats_print is enabled. - * - * Synchronization: tcache_ql_mtx. - */ - ql_head(tcache_t) tcache_ql; - ql_head(cache_bin_array_descriptor_t) cache_bin_array_descriptor_ql; - malloc_mutex_t tcache_ql_mtx; - - /* Synchronization: internal. */ - prof_accum_t prof_accum; - - /* - * PRNG state for cache index randomization of large allocation base - * pointers. - * - * Synchronization: atomic. - */ - atomic_zu_t offset_state; - - /* - * Extent serial number generator state. - * - * Synchronization: atomic. - */ - atomic_zu_t extent_sn_next; - - /* - * Represents a dss_prec_t, but atomically. - * - * Synchronization: atomic. - */ - atomic_u_t dss_prec; - - /* - * Number of pages in active extents. - * - * Synchronization: atomic. - */ - atomic_zu_t nactive; - - /* - * Extant large allocations. - * - * Synchronization: large_mtx. - */ - extent_list_t large; - /* Synchronizes all large allocation/update/deallocation. */ - malloc_mutex_t large_mtx; - - /* - * Collections of extents that were previously allocated. These are - * used when allocating extents, in an attempt to re-use address space. - * - * Synchronization: internal. - */ - extents_t extents_dirty; - extents_t extents_muzzy; - extents_t extents_retained; - - /* - * Decay-based purging state, responsible for scheduling extent state - * transitions. - * - * Synchronization: internal. - */ - arena_decay_t decay_dirty; /* dirty --> muzzy */ - arena_decay_t decay_muzzy; /* muzzy --> retained */ - - /* - * Next extent size class in a growing series to use when satisfying a - * request via the extent hooks (only if opt_retain). This limits the - * number of disjoint virtual memory ranges so that extent merging can - * be effective even if multiple arenas' extent allocation requests are - * highly interleaved. - * - * retain_grow_limit is the max allowed size ind to expand (unless the - * required size is greater). Default is no limit, and controlled - * through mallctl only. - * - * Synchronization: extent_grow_mtx - */ - pszind_t extent_grow_next; - pszind_t retain_grow_limit; - malloc_mutex_t extent_grow_mtx; - - /* - * Available extent structures that were allocated via - * base_alloc_extent(). - * - * Synchronization: extent_avail_mtx. - */ - extent_tree_t extent_avail; - atomic_zu_t extent_avail_cnt; - malloc_mutex_t extent_avail_mtx; - - /* - * bins is used to store heaps of free regions. - * - * Synchronization: internal. - */ - bins_t bins[SC_NBINS]; - - /* - * Base allocator, from which arena metadata are allocated. - * - * Synchronization: internal. - */ - base_t *base; - /* Used to determine uptime. Read-only after initialization. */ - nstime_t create_time; -}; - -/* Used in conjunction with tsd for fast arena-related context lookup. */ -struct arena_tdata_s { - ticker_t decay_ticker; -}; - -/* Used to pass rtree lookup context down the path. */ -struct alloc_ctx_s { - szind_t szind; - bool slab; -}; - -#endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/arena_types.h b/contrib/jemalloc/include/jemalloc/internal/arena_types.h index 624937e4f59647..d0e1291762a6e5 100644 --- a/contrib/jemalloc/include/jemalloc/internal/arena_types.h +++ b/contrib/jemalloc/include/jemalloc/internal/arena_types.h @@ -3,21 +3,14 @@ #include "jemalloc/internal/sc.h" -/* Maximum number of regions in one slab. */ -#define LG_SLAB_MAXREGS (LG_PAGE - SC_LG_TINY_MIN) -#define SLAB_MAXREGS (1U << LG_SLAB_MAXREGS) - /* Default decay times in milliseconds. */ #define DIRTY_DECAY_MS_DEFAULT ZD(10 * 1000) #define MUZZY_DECAY_MS_DEFAULT (0) /* Number of event ticks between time checks. */ -#define DECAY_NTICKS_PER_UPDATE 1000 +#define ARENA_DECAY_NTICKS_PER_UPDATE 1000 -typedef struct arena_slab_data_s arena_slab_data_t; typedef struct arena_decay_s arena_decay_t; typedef struct arena_s arena_t; -typedef struct arena_tdata_s arena_tdata_t; -typedef struct alloc_ctx_s alloc_ctx_t; typedef enum { percpu_arena_mode_names_base = 0, /* Used for options processing. */ @@ -48,4 +41,18 @@ typedef enum { */ #define OVERSIZE_THRESHOLD_DEFAULT (8 << 20) +struct arena_config_s { + /* extent hooks to be used for the arena */ + extent_hooks_t *extent_hooks; + + /* + * Use extent hooks for metadata (base) allocations when true. + */ + bool metadata_use_hooks; +}; + +typedef struct arena_config_s arena_config_t; + +extern const arena_config_t arena_config_default; + #endif /* JEMALLOC_INTERNAL_ARENA_TYPES_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/atomic.h b/contrib/jemalloc/include/jemalloc/internal/atomic.h index a76f54cee3f4be..c0f73122accd99 100644 --- a/contrib/jemalloc/include/jemalloc/internal/atomic.h +++ b/contrib/jemalloc/include/jemalloc/internal/atomic.h @@ -51,6 +51,27 @@ #define ATOMIC_ACQ_REL atomic_memory_order_acq_rel #define ATOMIC_SEQ_CST atomic_memory_order_seq_cst +/* + * Another convenience -- simple atomic helper functions. + */ +#define JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(type, short_type, \ + lg_size) \ + JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, lg_size) \ + ATOMIC_INLINE void \ + atomic_load_add_store_##short_type(atomic_##short_type##_t *a, \ + type inc) { \ + type oldval = atomic_load_##short_type(a, ATOMIC_RELAXED); \ + type newval = oldval + inc; \ + atomic_store_##short_type(a, newval, ATOMIC_RELAXED); \ + } \ + ATOMIC_INLINE void \ + atomic_load_sub_store_##short_type(atomic_##short_type##_t *a, \ + type inc) { \ + type oldval = atomic_load_##short_type(a, ATOMIC_RELAXED); \ + type newval = oldval - inc; \ + atomic_store_##short_type(a, newval, ATOMIC_RELAXED); \ + } + /* * Not all platforms have 64-bit atomics. If we do, this #define exposes that * fact. @@ -67,18 +88,18 @@ JEMALLOC_GENERATE_ATOMICS(void *, p, LG_SIZEOF_PTR) */ JEMALLOC_GENERATE_ATOMICS(bool, b, 0) -JEMALLOC_GENERATE_INT_ATOMICS(unsigned, u, LG_SIZEOF_INT) +JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(unsigned, u, LG_SIZEOF_INT) -JEMALLOC_GENERATE_INT_ATOMICS(size_t, zu, LG_SIZEOF_PTR) +JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(size_t, zu, LG_SIZEOF_PTR) -JEMALLOC_GENERATE_INT_ATOMICS(ssize_t, zd, LG_SIZEOF_PTR) +JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(ssize_t, zd, LG_SIZEOF_PTR) -JEMALLOC_GENERATE_INT_ATOMICS(uint8_t, u8, 0) +JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(uint8_t, u8, 0) -JEMALLOC_GENERATE_INT_ATOMICS(uint32_t, u32, 2) +JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(uint32_t, u32, 2) #ifdef JEMALLOC_ATOMIC_U64 -JEMALLOC_GENERATE_INT_ATOMICS(uint64_t, u64, 3) +JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(uint64_t, u64, 3) #endif #undef ATOMIC_INLINE diff --git a/contrib/jemalloc/include/jemalloc/internal/background_thread_externs.h b/contrib/jemalloc/include/jemalloc/internal/background_thread_externs.h index 0f997e18beecbc..6ae3c8d89d4bb2 100644 --- a/contrib/jemalloc/include/jemalloc/internal/background_thread_externs.h +++ b/contrib/jemalloc/include/jemalloc/internal/background_thread_externs.h @@ -12,8 +12,9 @@ extern background_thread_info_t *background_thread_info; bool background_thread_create(tsd_t *tsd, unsigned arena_ind); bool background_threads_enable(tsd_t *tsd); bool background_threads_disable(tsd_t *tsd); -void background_thread_interval_check(tsdn_t *tsdn, arena_t *arena, - arena_decay_t *decay, size_t npages_new); +bool background_thread_is_started(background_thread_info_t* info); +void background_thread_wakeup_early(background_thread_info_t *info, + nstime_t *remaining_sleep); void background_thread_prefork0(tsdn_t *tsdn); void background_thread_prefork1(tsdn_t *tsdn); void background_thread_postfork_parent(tsdn_t *tsdn); @@ -27,6 +28,6 @@ extern int pthread_create_wrapper(pthread_t *__restrict, const pthread_attr_t *, void *(*)(void *), void *__restrict); #endif bool background_thread_boot0(void); -bool background_thread_boot1(tsdn_t *tsdn); +bool background_thread_boot1(tsdn_t *tsdn, base_t *base); #endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/background_thread_inlines.h b/contrib/jemalloc/include/jemalloc/internal/background_thread_inlines.h index f85e86fa375bd5..92c5febe70fd75 100644 --- a/contrib/jemalloc/include/jemalloc/internal/background_thread_inlines.h +++ b/contrib/jemalloc/include/jemalloc/internal/background_thread_inlines.h @@ -45,18 +45,4 @@ background_thread_indefinite_sleep(background_thread_info_t *info) { return atomic_load_b(&info->indefinite_sleep, ATOMIC_ACQUIRE); } -JEMALLOC_ALWAYS_INLINE void -arena_background_thread_inactivity_check(tsdn_t *tsdn, arena_t *arena, - bool is_background_thread) { - if (!background_thread_enabled() || is_background_thread) { - return; - } - background_thread_info_t *info = - arena_background_thread_info_get(arena); - if (background_thread_indefinite_sleep(info)) { - background_thread_interval_check(tsdn, arena, - &arena->decay_dirty, 0); - } -} - #endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/background_thread_structs.h b/contrib/jemalloc/include/jemalloc/internal/background_thread_structs.h index c02aa434c7dab8..83a9198460299a 100644 --- a/contrib/jemalloc/include/jemalloc/internal/background_thread_structs.h +++ b/contrib/jemalloc/include/jemalloc/internal/background_thread_structs.h @@ -11,6 +11,17 @@ #define MAX_BACKGROUND_THREAD_LIMIT MALLOCX_ARENA_LIMIT #define DEFAULT_NUM_BACKGROUND_THREAD 4 +/* + * These exist only as a transitional state. Eventually, deferral should be + * part of the PAI, and each implementation can indicate wait times with more + * specificity. + */ +#define BACKGROUND_THREAD_HPA_INTERVAL_MAX_UNINITIALIZED (-2) +#define BACKGROUND_THREAD_HPA_INTERVAL_MAX_DEFAULT_WHEN_ENABLED 5000 + +#define BACKGROUND_THREAD_DEFERRED_MIN UINT64_C(0) +#define BACKGROUND_THREAD_DEFERRED_MAX UINT64_MAX + typedef enum { background_thread_stopped, background_thread_started, @@ -48,6 +59,7 @@ struct background_thread_stats_s { size_t num_threads; uint64_t num_runs; nstime_t run_interval; + mutex_prof_data_t max_counter_per_bg_thd; }; typedef struct background_thread_stats_s background_thread_stats_t; diff --git a/contrib/jemalloc/include/jemalloc/internal/base.h b/contrib/jemalloc/include/jemalloc/internal/base.h new file mode 100644 index 00000000000000..9b2c9fb10b99a0 --- /dev/null +++ b/contrib/jemalloc/include/jemalloc/internal/base.h @@ -0,0 +1,110 @@ +#ifndef JEMALLOC_INTERNAL_BASE_H +#define JEMALLOC_INTERNAL_BASE_H + +#include "jemalloc/internal/edata.h" +#include "jemalloc/internal/ehooks.h" +#include "jemalloc/internal/mutex.h" + +enum metadata_thp_mode_e { + metadata_thp_disabled = 0, + /* + * Lazily enable hugepage for metadata. To avoid high RSS caused by THP + * + low usage arena (i.e. THP becomes a significant percentage), the + * "auto" option only starts using THP after a base allocator used up + * the first THP region. Starting from the second hugepage (in a single + * arena), "auto" behaves the same as "always", i.e. madvise hugepage + * right away. + */ + metadata_thp_auto = 1, + metadata_thp_always = 2, + metadata_thp_mode_limit = 3 +}; +typedef enum metadata_thp_mode_e metadata_thp_mode_t; + +#define METADATA_THP_DEFAULT metadata_thp_disabled +extern metadata_thp_mode_t opt_metadata_thp; +extern const char *metadata_thp_mode_names[]; + + +/* Embedded at the beginning of every block of base-managed virtual memory. */ +typedef struct base_block_s base_block_t; +struct base_block_s { + /* Total size of block's virtual memory mapping. */ + size_t size; + + /* Next block in list of base's blocks. */ + base_block_t *next; + + /* Tracks unused trailing space. */ + edata_t edata; +}; + +typedef struct base_s base_t; +struct base_s { + /* + * User-configurable extent hook functions. + */ + ehooks_t ehooks; + + /* + * User-configurable extent hook functions for metadata allocations. + */ + ehooks_t ehooks_base; + + /* Protects base_alloc() and base_stats_get() operations. */ + malloc_mutex_t mtx; + + /* Using THP when true (metadata_thp auto mode). */ + bool auto_thp_switched; + /* + * Most recent size class in the series of increasingly large base + * extents. Logarithmic spacing between subsequent allocations ensures + * that the total number of distinct mappings remains small. + */ + pszind_t pind_last; + + /* Serial number generation state. */ + size_t extent_sn_next; + + /* Chain of all blocks associated with base. */ + base_block_t *blocks; + + /* Heap of extents that track unused trailing space within blocks. */ + edata_heap_t avail[SC_NSIZES]; + + /* Stats, only maintained if config_stats. */ + size_t allocated; + size_t resident; + size_t mapped; + /* Number of THP regions touched. */ + size_t n_thp; +}; + +static inline unsigned +base_ind_get(const base_t *base) { + return ehooks_ind_get(&base->ehooks); +} + +static inline bool +metadata_thp_enabled(void) { + return (opt_metadata_thp != metadata_thp_disabled); +} + +base_t *b0get(void); +base_t *base_new(tsdn_t *tsdn, unsigned ind, + const extent_hooks_t *extent_hooks, bool metadata_use_hooks); +void base_delete(tsdn_t *tsdn, base_t *base); +ehooks_t *base_ehooks_get(base_t *base); +ehooks_t *base_ehooks_get_for_metadata(base_t *base); +extent_hooks_t *base_extent_hooks_set(base_t *base, + extent_hooks_t *extent_hooks); +void *base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment); +edata_t *base_alloc_edata(tsdn_t *tsdn, base_t *base); +void base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, + size_t *resident, size_t *mapped, size_t *n_thp); +void base_prefork(tsdn_t *tsdn, base_t *base); +void base_postfork_parent(tsdn_t *tsdn, base_t *base); +void base_postfork_child(tsdn_t *tsdn, base_t *base); +bool base_boot(tsdn_t *tsdn); + +#endif /* JEMALLOC_INTERNAL_BASE_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/base_externs.h b/contrib/jemalloc/include/jemalloc/internal/base_externs.h deleted file mode 100644 index 7b705c9b4d1cb3..00000000000000 --- a/contrib/jemalloc/include/jemalloc/internal/base_externs.h +++ /dev/null @@ -1,22 +0,0 @@ -#ifndef JEMALLOC_INTERNAL_BASE_EXTERNS_H -#define JEMALLOC_INTERNAL_BASE_EXTERNS_H - -extern metadata_thp_mode_t opt_metadata_thp; -extern const char *metadata_thp_mode_names[]; - -base_t *b0get(void); -base_t *base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks); -void base_delete(tsdn_t *tsdn, base_t *base); -extent_hooks_t *base_extent_hooks_get(base_t *base); -extent_hooks_t *base_extent_hooks_set(base_t *base, - extent_hooks_t *extent_hooks); -void *base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment); -extent_t *base_alloc_extent(tsdn_t *tsdn, base_t *base); -void base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, - size_t *resident, size_t *mapped, size_t *n_thp); -void base_prefork(tsdn_t *tsdn, base_t *base); -void base_postfork_parent(tsdn_t *tsdn, base_t *base); -void base_postfork_child(tsdn_t *tsdn, base_t *base); -bool base_boot(tsdn_t *tsdn); - -#endif /* JEMALLOC_INTERNAL_BASE_EXTERNS_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/base_inlines.h b/contrib/jemalloc/include/jemalloc/internal/base_inlines.h deleted file mode 100644 index aec0e2e1e1c56a..00000000000000 --- a/contrib/jemalloc/include/jemalloc/internal/base_inlines.h +++ /dev/null @@ -1,13 +0,0 @@ -#ifndef JEMALLOC_INTERNAL_BASE_INLINES_H -#define JEMALLOC_INTERNAL_BASE_INLINES_H - -static inline unsigned -base_ind_get(const base_t *base) { - return base->ind; -} - -static inline bool -metadata_thp_enabled(void) { - return (opt_metadata_thp != metadata_thp_disabled); -} -#endif /* JEMALLOC_INTERNAL_BASE_INLINES_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/base_structs.h b/contrib/jemalloc/include/jemalloc/internal/base_structs.h deleted file mode 100644 index 07f214eb2f2a80..00000000000000 --- a/contrib/jemalloc/include/jemalloc/internal/base_structs.h +++ /dev/null @@ -1,59 +0,0 @@ -#ifndef JEMALLOC_INTERNAL_BASE_STRUCTS_H -#define JEMALLOC_INTERNAL_BASE_STRUCTS_H - -#include "jemalloc/internal/jemalloc_internal_types.h" -#include "jemalloc/internal/mutex.h" -#include "jemalloc/internal/sc.h" - -/* Embedded at the beginning of every block of base-managed virtual memory. */ -struct base_block_s { - /* Total size of block's virtual memory mapping. */ - size_t size; - - /* Next block in list of base's blocks. */ - base_block_t *next; - - /* Tracks unused trailing space. */ - extent_t extent; -}; - -struct base_s { - /* Associated arena's index within the arenas array. */ - unsigned ind; - - /* - * User-configurable extent hook functions. Points to an - * extent_hooks_t. - */ - atomic_p_t extent_hooks; - - /* Protects base_alloc() and base_stats_get() operations. */ - malloc_mutex_t mtx; - - /* Using THP when true (metadata_thp auto mode). */ - bool auto_thp_switched; - /* - * Most recent size class in the series of increasingly large base - * extents. Logarithmic spacing between subsequent allocations ensures - * that the total number of distinct mappings remains small. - */ - pszind_t pind_last; - - /* Serial number generation state. */ - size_t extent_sn_next; - - /* Chain of all blocks associated with base. */ - base_block_t *blocks; - - /* Heap of extents that track unused trailing space within blocks. */ - extent_heap_t avail[SC_NSIZES]; - - /* Stats, only maintained if config_stats. */ - size_t allocated; - size_t resident; - size_t mapped; - /* Number of THP regions touched. */ - size_t n_thp; -}; - -#endif /* JEMALLOC_INTERNAL_BASE_STRUCTS_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/base_types.h b/contrib/jemalloc/include/jemalloc/internal/base_types.h deleted file mode 100644 index b6db77df7c6c0e..00000000000000 --- a/contrib/jemalloc/include/jemalloc/internal/base_types.h +++ /dev/null @@ -1,33 +0,0 @@ -#ifndef JEMALLOC_INTERNAL_BASE_TYPES_H -#define JEMALLOC_INTERNAL_BASE_TYPES_H - -typedef struct base_block_s base_block_t; -typedef struct base_s base_t; - -#define METADATA_THP_DEFAULT metadata_thp_disabled - -/* - * In auto mode, arenas switch to huge pages for the base allocator on the - * second base block. a0 switches to thp on the 5th block (after 20 megabytes - * of metadata), since more metadata (e.g. rtree nodes) come from a0's base. - */ - -#define BASE_AUTO_THP_THRESHOLD 2 -#define BASE_AUTO_THP_THRESHOLD_A0 5 - -typedef enum { - metadata_thp_disabled = 0, - /* - * Lazily enable hugepage for metadata. To avoid high RSS caused by THP - * + low usage arena (i.e. THP becomes a significant percentage), the - * "auto" option only starts using THP after a base allocator used up - * the first THP region. Starting from the second hugepage (in a single - * arena), "auto" behaves the same as "always", i.e. madvise hugepage - * right away. - */ - metadata_thp_auto = 1, - metadata_thp_always = 2, - metadata_thp_mode_limit = 3 -} metadata_thp_mode_t; - -#endif /* JEMALLOC_INTERNAL_BASE_TYPES_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/bin.h b/contrib/jemalloc/include/jemalloc/internal/bin.h index 8547e89309bc6a..63f97395e91d75 100644 --- a/contrib/jemalloc/include/jemalloc/internal/bin.h +++ b/contrib/jemalloc/include/jemalloc/internal/bin.h @@ -3,8 +3,7 @@ #include "jemalloc/internal/bin_stats.h" #include "jemalloc/internal/bin_types.h" -#include "jemalloc/internal/extent_types.h" -#include "jemalloc/internal/extent_structs.h" +#include "jemalloc/internal/edata.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/sc.h" @@ -12,74 +11,34 @@ * A bin contains a set of extents that are currently being used for slab * allocations. */ - -/* - * Read-only information associated with each element of arena_t's bins array - * is stored separately, partly to reduce memory usage (only one copy, rather - * than one per arena), but mainly to avoid false cacheline sharing. - * - * Each slab has the following layout: - * - * /--------------------\ - * | region 0 | - * |--------------------| - * | region 1 | - * |--------------------| - * | ... | - * | ... | - * | ... | - * |--------------------| - * | region nregs-1 | - * \--------------------/ - */ -typedef struct bin_info_s bin_info_t; -struct bin_info_s { - /* Size of regions in a slab for this bin's size class. */ - size_t reg_size; - - /* Total size of a slab for this bin's size class. */ - size_t slab_size; - - /* Total number of regions in a slab for this bin's size class. */ - uint32_t nregs; - - /* Number of sharded bins in each arena for this size class. */ - uint32_t n_shards; - - /* - * Metadata used to manipulate bitmaps for slabs associated with this - * bin. - */ - bitmap_info_t bitmap_info; -}; - -extern bin_info_t bin_infos[SC_NBINS]; - typedef struct bin_s bin_t; struct bin_s { /* All operations on bin_t fields require lock ownership. */ malloc_mutex_t lock; + /* + * Bin statistics. These get touched every time the lock is acquired, + * so put them close by in the hopes of getting some cache locality. + */ + bin_stats_t stats; + /* * Current slab being used to service allocations of this bin's size * class. slabcur is independent of slabs_{nonfull,full}; whenever * slabcur is reassigned, the previous slab must be deallocated or * inserted into slabs_{nonfull,full}. */ - extent_t *slabcur; + edata_t *slabcur; /* * Heap of non-full slabs. This heap is used to assure that new * allocations come from the non-full slab that is oldest/lowest in * memory. */ - extent_heap_t slabs_nonfull; + edata_heap_t slabs_nonfull; /* List used to track full slabs. */ - extent_list_t slabs_full; - - /* Bin statistics. */ - bin_stats_t stats; + edata_list_active_t slabs_full; }; /* A set of sharded bins of the same size class. */ @@ -92,7 +51,6 @@ struct bins_s { void bin_shard_sizes_boot(unsigned bin_shards[SC_NBINS]); bool bin_update_shard_size(unsigned bin_shards[SC_NBINS], size_t start_size, size_t end_size, size_t nshards); -void bin_boot(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]); /* Initializes a bin to empty. Returns true on error. */ bool bin_init(bin_t *bin); @@ -104,19 +62,20 @@ void bin_postfork_child(tsdn_t *tsdn, bin_t *bin); /* Stats. */ static inline void -bin_stats_merge(tsdn_t *tsdn, bin_stats_t *dst_bin_stats, bin_t *bin) { +bin_stats_merge(tsdn_t *tsdn, bin_stats_data_t *dst_bin_stats, bin_t *bin) { malloc_mutex_lock(tsdn, &bin->lock); malloc_mutex_prof_accum(tsdn, &dst_bin_stats->mutex_data, &bin->lock); - dst_bin_stats->nmalloc += bin->stats.nmalloc; - dst_bin_stats->ndalloc += bin->stats.ndalloc; - dst_bin_stats->nrequests += bin->stats.nrequests; - dst_bin_stats->curregs += bin->stats.curregs; - dst_bin_stats->nfills += bin->stats.nfills; - dst_bin_stats->nflushes += bin->stats.nflushes; - dst_bin_stats->nslabs += bin->stats.nslabs; - dst_bin_stats->reslabs += bin->stats.reslabs; - dst_bin_stats->curslabs += bin->stats.curslabs; - dst_bin_stats->nonfull_slabs += bin->stats.nonfull_slabs; + bin_stats_t *stats = &dst_bin_stats->stats_data; + stats->nmalloc += bin->stats.nmalloc; + stats->ndalloc += bin->stats.ndalloc; + stats->nrequests += bin->stats.nrequests; + stats->curregs += bin->stats.curregs; + stats->nfills += bin->stats.nfills; + stats->nflushes += bin->stats.nflushes; + stats->nslabs += bin->stats.nslabs; + stats->reslabs += bin->stats.reslabs; + stats->curslabs += bin->stats.curslabs; + stats->nonfull_slabs += bin->stats.nonfull_slabs; malloc_mutex_unlock(tsdn, &bin->lock); } diff --git a/contrib/jemalloc/include/jemalloc/internal/bin_info.h b/contrib/jemalloc/include/jemalloc/internal/bin_info.h new file mode 100644 index 00000000000000..7fe65c866a10b1 --- /dev/null +++ b/contrib/jemalloc/include/jemalloc/internal/bin_info.h @@ -0,0 +1,50 @@ +#ifndef JEMALLOC_INTERNAL_BIN_INFO_H +#define JEMALLOC_INTERNAL_BIN_INFO_H + +#include "jemalloc/internal/bitmap.h" + +/* + * Read-only information associated with each element of arena_t's bins array + * is stored separately, partly to reduce memory usage (only one copy, rather + * than one per arena), but mainly to avoid false cacheline sharing. + * + * Each slab has the following layout: + * + * /--------------------\ + * | region 0 | + * |--------------------| + * | region 1 | + * |--------------------| + * | ... | + * | ... | + * | ... | + * |--------------------| + * | region nregs-1 | + * \--------------------/ + */ +typedef struct bin_info_s bin_info_t; +struct bin_info_s { + /* Size of regions in a slab for this bin's size class. */ + size_t reg_size; + + /* Total size of a slab for this bin's size class. */ + size_t slab_size; + + /* Total number of regions in a slab for this bin's size class. */ + uint32_t nregs; + + /* Number of sharded bins in each arena for this size class. */ + uint32_t n_shards; + + /* + * Metadata used to manipulate bitmaps for slabs associated with this + * bin. + */ + bitmap_info_t bitmap_info; +}; + +extern bin_info_t bin_infos[SC_NBINS]; + +void bin_info_boot(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]); + +#endif /* JEMALLOC_INTERNAL_BIN_INFO_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/bin_stats.h b/contrib/jemalloc/include/jemalloc/internal/bin_stats.h index d04519c8244cc0..0b99297c070a5a 100644 --- a/contrib/jemalloc/include/jemalloc/internal/bin_stats.h +++ b/contrib/jemalloc/include/jemalloc/internal/bin_stats.h @@ -47,8 +47,11 @@ struct bin_stats_s { /* Current size of nonfull slabs heap in this bin. */ size_t nonfull_slabs; +}; +typedef struct bin_stats_data_s bin_stats_data_t; +struct bin_stats_data_s { + bin_stats_t stats_data; mutex_prof_data_t mutex_data; }; - #endif /* JEMALLOC_INTERNAL_BIN_STATS_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/bin_types.h b/contrib/jemalloc/include/jemalloc/internal/bin_types.h index 3533606b90c4c7..945e8326c63d41 100644 --- a/contrib/jemalloc/include/jemalloc/internal/bin_types.h +++ b/contrib/jemalloc/include/jemalloc/internal/bin_types.h @@ -3,7 +3,7 @@ #include "jemalloc/internal/sc.h" -#define BIN_SHARDS_MAX (1 << EXTENT_BITS_BINSHARD_WIDTH) +#define BIN_SHARDS_MAX (1 << EDATA_BITS_BINSHARD_WIDTH) #define N_BIN_SHARDS_DEFAULT 1 /* Used in TSD static initializer only. Real init in arena_bind(). */ diff --git a/contrib/jemalloc/include/jemalloc/internal/bit_util.h b/contrib/jemalloc/include/jemalloc/internal/bit_util.h index c045eb86878aab..bac59140fbb051 100644 --- a/contrib/jemalloc/include/jemalloc/internal/bit_util.h +++ b/contrib/jemalloc/include/jemalloc/internal/bit_util.h @@ -3,144 +3,383 @@ #include "jemalloc/internal/assert.h" -#define BIT_UTIL_INLINE static inline - /* Sanity check. */ #if !defined(JEMALLOC_INTERNAL_FFSLL) || !defined(JEMALLOC_INTERNAL_FFSL) \ || !defined(JEMALLOC_INTERNAL_FFS) # error JEMALLOC_INTERNAL_FFS{,L,LL} should have been defined by configure #endif +/* + * Unlike the builtins and posix ffs functions, our ffs requires a non-zero + * input, and returns the position of the lowest bit set (as opposed to the + * posix versions, which return 1 larger than that position and use a return + * value of zero as a sentinel. This tends to simplify logic in callers, and + * allows for consistency with the builtins we build fls on top of. + */ +static inline unsigned +ffs_llu(unsigned long long x) { + util_assume(x != 0); + return JEMALLOC_INTERNAL_FFSLL(x) - 1; +} -BIT_UTIL_INLINE unsigned -ffs_llu(unsigned long long bitmap) { - return JEMALLOC_INTERNAL_FFSLL(bitmap); +static inline unsigned +ffs_lu(unsigned long x) { + util_assume(x != 0); + return JEMALLOC_INTERNAL_FFSL(x) - 1; } -BIT_UTIL_INLINE unsigned -ffs_lu(unsigned long bitmap) { - return JEMALLOC_INTERNAL_FFSL(bitmap); +static inline unsigned +ffs_u(unsigned x) { + util_assume(x != 0); + return JEMALLOC_INTERNAL_FFS(x) - 1; } -BIT_UTIL_INLINE unsigned -ffs_u(unsigned bitmap) { - return JEMALLOC_INTERNAL_FFS(bitmap); +#define DO_FLS_SLOW(x, suffix) do { \ + util_assume(x != 0); \ + x |= (x >> 1); \ + x |= (x >> 2); \ + x |= (x >> 4); \ + x |= (x >> 8); \ + x |= (x >> 16); \ + if (sizeof(x) > 4) { \ + /* \ + * If sizeof(x) is 4, then the expression "x >> 32" \ + * will generate compiler warnings even if the code \ + * never executes. This circumvents the warning, and \ + * gets compiled out in optimized builds. \ + */ \ + int constant_32 = sizeof(x) * 4; \ + x |= (x >> constant_32); \ + } \ + x++; \ + if (x == 0) { \ + return 8 * sizeof(x) - 1; \ + } \ + return ffs_##suffix(x) - 1; \ +} while(0) + +static inline unsigned +fls_llu_slow(unsigned long long x) { + DO_FLS_SLOW(x, llu); } -#ifdef JEMALLOC_INTERNAL_POPCOUNTL -BIT_UTIL_INLINE unsigned +static inline unsigned +fls_lu_slow(unsigned long x) { + DO_FLS_SLOW(x, lu); +} + +static inline unsigned +fls_u_slow(unsigned x) { + DO_FLS_SLOW(x, u); +} + +#undef DO_FLS_SLOW + +#ifdef JEMALLOC_HAVE_BUILTIN_CLZ +static inline unsigned +fls_llu(unsigned long long x) { + util_assume(x != 0); + /* + * Note that the xor here is more naturally written as subtraction; the + * last bit set is the number of bits in the type minus the number of + * leading zero bits. But GCC implements that as: + * bsr edi, edi + * mov eax, 31 + * xor edi, 31 + * sub eax, edi + * If we write it as xor instead, then we get + * bsr eax, edi + * as desired. + */ + return (8 * sizeof(x) - 1) ^ __builtin_clzll(x); +} + +static inline unsigned +fls_lu(unsigned long x) { + util_assume(x != 0); + return (8 * sizeof(x) - 1) ^ __builtin_clzl(x); +} + +static inline unsigned +fls_u(unsigned x) { + util_assume(x != 0); + return (8 * sizeof(x) - 1) ^ __builtin_clz(x); +} +#elif defined(_MSC_VER) + +#if LG_SIZEOF_PTR == 3 +#define DO_BSR64(bit, x) _BitScanReverse64(&bit, x) +#else +/* + * This never actually runs; we're just dodging a compiler error for the + * never-taken branch where sizeof(void *) == 8. + */ +#define DO_BSR64(bit, x) bit = 0; unreachable() +#endif + +#define DO_FLS(x) do { \ + if (x == 0) { \ + return 8 * sizeof(x); \ + } \ + unsigned long bit; \ + if (sizeof(x) == 4) { \ + _BitScanReverse(&bit, (unsigned)x); \ + return (unsigned)bit; \ + } \ + if (sizeof(x) == 8 && sizeof(void *) == 8) { \ + DO_BSR64(bit, x); \ + return (unsigned)bit; \ + } \ + if (sizeof(x) == 8 && sizeof(void *) == 4) { \ + /* Dodge a compiler warning, as above. */ \ + int constant_32 = sizeof(x) * 4; \ + if (_BitScanReverse(&bit, \ + (unsigned)(x >> constant_32))) { \ + return 32 + (unsigned)bit; \ + } else { \ + _BitScanReverse(&bit, (unsigned)x); \ + return (unsigned)bit; \ + } \ + } \ + unreachable(); \ +} while (0) + +static inline unsigned +fls_llu(unsigned long long x) { + DO_FLS(x); +} + +static inline unsigned +fls_lu(unsigned long x) { + DO_FLS(x); +} + +static inline unsigned +fls_u(unsigned x) { + DO_FLS(x); +} + +#undef DO_FLS +#undef DO_BSR64 +#else + +static inline unsigned +fls_llu(unsigned long long x) { + return fls_llu_slow(x); +} + +static inline unsigned +fls_lu(unsigned long x) { + return fls_lu_slow(x); +} + +static inline unsigned +fls_u(unsigned x) { + return fls_u_slow(x); +} +#endif + +#if LG_SIZEOF_LONG_LONG > 3 +# error "Haven't implemented popcount for 16-byte ints." +#endif + +#define DO_POPCOUNT(x, type) do { \ + /* \ + * Algorithm from an old AMD optimization reference manual. \ + * We're putting a little bit more work than you might expect \ + * into the no-instrinsic case, since we only support the \ + * GCC intrinsics spelling of popcount (for now). Detecting \ + * whether or not the popcount builtin is actually useable in \ + * MSVC is nontrivial. \ + */ \ + \ + type bmul = (type)0x0101010101010101ULL; \ + \ + /* \ + * Replace each 2 bits with the sideways sum of the original \ + * values. 0x5 = 0b0101. \ + * \ + * You might expect this to be: \ + * x = (x & 0x55...) + ((x >> 1) & 0x55...). \ + * That costs an extra mask relative to this, though. \ + */ \ + x = x - ((x >> 1) & (0x55U * bmul)); \ + /* Replace each 4 bits with their sideays sum. 0x3 = 0b0011. */\ + x = (x & (bmul * 0x33U)) + ((x >> 2) & (bmul * 0x33U)); \ + /* \ + * Replace each 8 bits with their sideways sum. Note that we \ + * can't overflow within each 4-bit sum here, so we can skip \ + * the initial mask. \ + */ \ + x = (x + (x >> 4)) & (bmul * 0x0FU); \ + /* \ + * None of the partial sums in this multiplication (viewed in \ + * base-256) can overflow into the next digit. So the least \ + * significant byte of the product will be the least \ + * significant byte of the original value, the second least \ + * significant byte will be the sum of the two least \ + * significant bytes of the original value, and so on. \ + * Importantly, the high byte will be the byte-wise sum of all \ + * the bytes of the original value. \ + */ \ + x = x * bmul; \ + x >>= ((sizeof(x) - 1) * 8); \ + return (unsigned)x; \ +} while(0) + +static inline unsigned +popcount_u_slow(unsigned bitmap) { + DO_POPCOUNT(bitmap, unsigned); +} + +static inline unsigned +popcount_lu_slow(unsigned long bitmap) { + DO_POPCOUNT(bitmap, unsigned long); +} + +static inline unsigned +popcount_llu_slow(unsigned long long bitmap) { + DO_POPCOUNT(bitmap, unsigned long long); +} + +#undef DO_POPCOUNT + +static inline unsigned +popcount_u(unsigned bitmap) { +#ifdef JEMALLOC_INTERNAL_POPCOUNT + return JEMALLOC_INTERNAL_POPCOUNT(bitmap); +#else + return popcount_u_slow(bitmap); +#endif +} + +static inline unsigned popcount_lu(unsigned long bitmap) { - return JEMALLOC_INTERNAL_POPCOUNTL(bitmap); +#ifdef JEMALLOC_INTERNAL_POPCOUNTL + return JEMALLOC_INTERNAL_POPCOUNTL(bitmap); +#else + return popcount_lu_slow(bitmap); +#endif } + +static inline unsigned +popcount_llu(unsigned long long bitmap) { +#ifdef JEMALLOC_INTERNAL_POPCOUNTLL + return JEMALLOC_INTERNAL_POPCOUNTLL(bitmap); +#else + return popcount_llu_slow(bitmap); #endif +} /* * Clears first unset bit in bitmap, and returns * place of bit. bitmap *must not* be 0. */ -BIT_UTIL_INLINE size_t +static inline size_t cfs_lu(unsigned long* bitmap) { - size_t bit = ffs_lu(*bitmap) - 1; + util_assume(*bitmap != 0); + size_t bit = ffs_lu(*bitmap); *bitmap ^= ZU(1) << bit; return bit; } -BIT_UTIL_INLINE unsigned -ffs_zu(size_t bitmap) { +static inline unsigned +ffs_zu(size_t x) { #if LG_SIZEOF_PTR == LG_SIZEOF_INT - return ffs_u(bitmap); + return ffs_u(x); #elif LG_SIZEOF_PTR == LG_SIZEOF_LONG - return ffs_lu(bitmap); + return ffs_lu(x); #elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG - return ffs_llu(bitmap); + return ffs_llu(x); #else #error No implementation for size_t ffs() #endif } -BIT_UTIL_INLINE unsigned -ffs_u64(uint64_t bitmap) { +static inline unsigned +fls_zu(size_t x) { +#if LG_SIZEOF_PTR == LG_SIZEOF_INT + return fls_u(x); +#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG + return fls_lu(x); +#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG + return fls_llu(x); +#else +#error No implementation for size_t fls() +#endif +} + + +static inline unsigned +ffs_u64(uint64_t x) { #if LG_SIZEOF_LONG == 3 - return ffs_lu(bitmap); + return ffs_lu(x); #elif LG_SIZEOF_LONG_LONG == 3 - return ffs_llu(bitmap); + return ffs_llu(x); #else #error No implementation for 64-bit ffs() #endif } -BIT_UTIL_INLINE unsigned -ffs_u32(uint32_t bitmap) { +static inline unsigned +fls_u64(uint64_t x) { +#if LG_SIZEOF_LONG == 3 + return fls_lu(x); +#elif LG_SIZEOF_LONG_LONG == 3 + return fls_llu(x); +#else +#error No implementation for 64-bit fls() +#endif +} + +static inline unsigned +ffs_u32(uint32_t x) { #if LG_SIZEOF_INT == 2 - return ffs_u(bitmap); + return ffs_u(x); #else #error No implementation for 32-bit ffs() #endif - return ffs_u(bitmap); + return ffs_u(x); +} + +static inline unsigned +fls_u32(uint32_t x) { +#if LG_SIZEOF_INT == 2 + return fls_u(x); +#else +#error No implementation for 32-bit fls() +#endif + return fls_u(x); } -BIT_UTIL_INLINE uint64_t +static inline uint64_t pow2_ceil_u64(uint64_t x) { -#if (defined(__amd64__) || defined(__x86_64__) || defined(JEMALLOC_HAVE_BUILTIN_CLZ)) - if(unlikely(x <= 1)) { + if (unlikely(x <= 1)) { return x; } - size_t msb_on_index; -#if (defined(__amd64__) || defined(__x86_64__)) - asm ("bsrq %1, %0" - : "=r"(msb_on_index) // Outputs. - : "r"(x-1) // Inputs. - ); -#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ)) - msb_on_index = (63 ^ __builtin_clzll(x - 1)); -#endif + size_t msb_on_index = fls_u64(x - 1); + /* + * Range-check; it's on the callers to ensure that the result of this + * call won't overflow. + */ assert(msb_on_index < 63); return 1ULL << (msb_on_index + 1); -#else - x--; - x |= x >> 1; - x |= x >> 2; - x |= x >> 4; - x |= x >> 8; - x |= x >> 16; - x |= x >> 32; - x++; - return x; -#endif } -BIT_UTIL_INLINE uint32_t +static inline uint32_t pow2_ceil_u32(uint32_t x) { -#if ((defined(__i386__) || defined(JEMALLOC_HAVE_BUILTIN_CLZ)) && (!defined(__s390__))) - if(unlikely(x <= 1)) { - return x; + if (unlikely(x <= 1)) { + return x; } - size_t msb_on_index; -#if (defined(__i386__)) - asm ("bsr %1, %0" - : "=r"(msb_on_index) // Outputs. - : "r"(x-1) // Inputs. - ); -#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ)) - msb_on_index = (31 ^ __builtin_clz(x - 1)); -#endif + size_t msb_on_index = fls_u32(x - 1); + /* As above. */ assert(msb_on_index < 31); return 1U << (msb_on_index + 1); -#else - x--; - x |= x >> 1; - x |= x >> 2; - x |= x >> 4; - x |= x >> 8; - x |= x >> 16; - x++; - return x; -#endif } /* Compute the smallest power of 2 that is >= x. */ -BIT_UTIL_INLINE size_t +static inline size_t pow2_ceil_zu(size_t x) { #if (LG_SIZEOF_PTR == 3) return pow2_ceil_u64(x); @@ -149,77 +388,21 @@ pow2_ceil_zu(size_t x) { #endif } -#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__)) -BIT_UTIL_INLINE unsigned -lg_floor(size_t x) { - size_t ret; - assert(x != 0); - - asm ("bsr %1, %0" - : "=r"(ret) // Outputs. - : "r"(x) // Inputs. - ); - assert(ret < UINT_MAX); - return (unsigned)ret; -} -#elif (defined(_MSC_VER)) -BIT_UTIL_INLINE unsigned +static inline unsigned lg_floor(size_t x) { - unsigned long ret; - - assert(x != 0); - + util_assume(x != 0); #if (LG_SIZEOF_PTR == 3) - _BitScanReverse64(&ret, x); -#elif (LG_SIZEOF_PTR == 2) - _BitScanReverse(&ret, x); + return fls_u64(x); #else -# error "Unsupported type size for lg_floor()" + return fls_u32(x); #endif - assert(ret < UINT_MAX); - return (unsigned)ret; } -#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ)) -BIT_UTIL_INLINE unsigned -lg_floor(size_t x) { - assert(x != 0); -#if (LG_SIZEOF_PTR == LG_SIZEOF_INT) - return ((8 << LG_SIZEOF_PTR) - 1) - __builtin_clz(x); -#elif (LG_SIZEOF_PTR == LG_SIZEOF_LONG) - return ((8 << LG_SIZEOF_PTR) - 1) - __builtin_clzl(x); -#else -# error "Unsupported type size for lg_floor()" -#endif -} -#else -BIT_UTIL_INLINE unsigned -lg_floor(size_t x) { - assert(x != 0); - - x |= (x >> 1); - x |= (x >> 2); - x |= (x >> 4); - x |= (x >> 8); - x |= (x >> 16); -#if (LG_SIZEOF_PTR == 3) - x |= (x >> 32); -#endif - if (x == SIZE_T_MAX) { - return (8 << LG_SIZEOF_PTR) - 1; - } - x++; - return ffs_zu(x) - 2; -} -#endif - -BIT_UTIL_INLINE unsigned +static inline unsigned lg_ceil(size_t x) { return lg_floor(x) + ((x & (x - 1)) == 0 ? 0 : 1); } -#undef BIT_UTIL_INLINE - /* A compile-time version of lg_floor and lg_ceil. */ #define LG_FLOOR_1(x) 0 #define LG_FLOOR_2(x) (x < (1ULL << 1) ? LG_FLOOR_1(x) : 1 + LG_FLOOR_1(x >> 1)) diff --git a/contrib/jemalloc/include/jemalloc/internal/bitmap.h b/contrib/jemalloc/include/jemalloc/internal/bitmap.h index c3f9cb490f67ee..dc19454d46eedc 100644 --- a/contrib/jemalloc/include/jemalloc/internal/bitmap.h +++ b/contrib/jemalloc/include/jemalloc/internal/bitmap.h @@ -1,7 +1,6 @@ #ifndef JEMALLOC_INTERNAL_BITMAP_H #define JEMALLOC_INTERNAL_BITMAP_H -#include "jemalloc/internal/arena_types.h" #include "jemalloc/internal/bit_util.h" #include "jemalloc/internal/sc.h" @@ -9,9 +8,9 @@ typedef unsigned long bitmap_t; #define LG_SIZEOF_BITMAP LG_SIZEOF_LONG /* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */ -#if LG_SLAB_MAXREGS > LG_CEIL(SC_NSIZES) +#if SC_LG_SLAB_MAXREGS > LG_CEIL(SC_NSIZES) /* Maximum bitmap bit count is determined by maximum regions per slab. */ -# define LG_BITMAP_MAXBITS LG_SLAB_MAXREGS +# define LG_BITMAP_MAXBITS SC_LG_SLAB_MAXREGS #else /* Maximum bitmap bit count is determined by number of extent size classes. */ # define LG_BITMAP_MAXBITS LG_CEIL(SC_NSIZES) @@ -273,7 +272,7 @@ bitmap_ffu(const bitmap_t *bitmap, const bitmap_info_t *binfo, size_t min_bit) { } return bitmap_ffu(bitmap, binfo, sib_base); } - bit += ((size_t)(ffs_lu(group_masked) - 1)) << + bit += ((size_t)ffs_lu(group_masked)) << (lg_bits_per_group - LG_BITMAP_GROUP_NBITS); } assert(bit >= min_bit); @@ -285,9 +284,9 @@ bitmap_ffu(const bitmap_t *bitmap, const bitmap_info_t *binfo, size_t min_bit) { - 1); size_t bit; do { - bit = ffs_lu(g); - if (bit != 0) { - return (i << LG_BITMAP_GROUP_NBITS) + (bit - 1); + if (g != 0) { + bit = ffs_lu(g); + return (i << LG_BITMAP_GROUP_NBITS) + bit; } i++; g = bitmap[i]; @@ -308,20 +307,20 @@ bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) { #ifdef BITMAP_USE_TREE i = binfo->nlevels - 1; g = bitmap[binfo->levels[i].group_offset]; - bit = ffs_lu(g) - 1; + bit = ffs_lu(g); while (i > 0) { i--; g = bitmap[binfo->levels[i].group_offset + bit]; - bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffs_lu(g) - 1); + bit = (bit << LG_BITMAP_GROUP_NBITS) + ffs_lu(g); } #else i = 0; g = bitmap[0]; - while ((bit = ffs_lu(g)) == 0) { + while (g == 0) { i++; g = bitmap[i]; } - bit = (i << LG_BITMAP_GROUP_NBITS) + (bit - 1); + bit = (i << LG_BITMAP_GROUP_NBITS) + ffs_lu(g); #endif bitmap_set(bitmap, binfo, bit); return bit; diff --git a/contrib/jemalloc/include/jemalloc/internal/buf_writer.h b/contrib/jemalloc/include/jemalloc/internal/buf_writer.h new file mode 100644 index 00000000000000..37aa6de5b3c3af --- /dev/null +++ b/contrib/jemalloc/include/jemalloc/internal/buf_writer.h @@ -0,0 +1,32 @@ +#ifndef JEMALLOC_INTERNAL_BUF_WRITER_H +#define JEMALLOC_INTERNAL_BUF_WRITER_H + +/* + * Note: when using the buffered writer, cbopaque is passed to write_cb only + * when the buffer is flushed. It would make a difference if cbopaque points + * to something that's changing for each write_cb call, or something that + * affects write_cb in a way dependent on the content of the output string. + * However, the most typical usage case in practice is that cbopaque points to + * some "option like" content for the write_cb, so it doesn't matter. + */ + +typedef struct { + write_cb_t *write_cb; + void *cbopaque; + char *buf; + size_t buf_size; + size_t buf_end; + bool internal_buf; +} buf_writer_t; + +bool buf_writer_init(tsdn_t *tsdn, buf_writer_t *buf_writer, + write_cb_t *write_cb, void *cbopaque, char *buf, size_t buf_len); +void buf_writer_flush(buf_writer_t *buf_writer); +write_cb_t buf_writer_cb; +void buf_writer_terminate(tsdn_t *tsdn, buf_writer_t *buf_writer); + +typedef ssize_t (read_cb_t)(void *read_cbopaque, void *buf, size_t limit); +void buf_writer_pipe(buf_writer_t *buf_writer, read_cb_t *read_cb, + void *read_cbopaque); + +#endif /* JEMALLOC_INTERNAL_BUF_WRITER_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/cache_bin.h b/contrib/jemalloc/include/jemalloc/internal/cache_bin.h index d14556a3da8a2a..caf5be338d6a41 100644 --- a/contrib/jemalloc/include/jemalloc/internal/cache_bin.h +++ b/contrib/jemalloc/include/jemalloc/internal/cache_bin.h @@ -2,6 +2,7 @@ #define JEMALLOC_INTERNAL_CACHE_BIN_H #include "jemalloc/internal/ql.h" +#include "jemalloc/internal/sz.h" /* * The cache_bins are the mechanism that the tcache and the arena use to @@ -13,14 +14,38 @@ * of the tcache at all. */ +/* + * The size in bytes of each cache bin stack. We also use this to indicate + * *counts* of individual objects. + */ +typedef uint16_t cache_bin_sz_t; /* - * The count of the number of cached allocations in a bin. We make this signed - * so that negative numbers can encode "invalid" states (e.g. a low water mark - * of -1 for a cache that has been depleted). + * Leave a noticeable mark pattern on the cache bin stack boundaries, in case a + * bug starts leaking those. Make it look like the junk pattern but be distinct + * from it. */ -typedef int32_t cache_bin_sz_t; +static const uintptr_t cache_bin_preceding_junk = + (uintptr_t)0x7a7a7a7a7a7a7a7aULL; +/* Note: a7 vs. 7a above -- this tells you which pointer leaked. */ +static const uintptr_t cache_bin_trailing_junk = + (uintptr_t)0xa7a7a7a7a7a7a7a7ULL; +/* + * That implies the following value, for the maximum number of items in any + * individual bin. The cache bins track their bounds looking just at the low + * bits of a pointer, compared against a cache_bin_sz_t. So that's + * 1 << (sizeof(cache_bin_sz_t) * 8) + * bytes spread across pointer sized objects to get the maximum. + */ +#define CACHE_BIN_NCACHED_MAX (((size_t)1 << sizeof(cache_bin_sz_t) * 8) \ + / sizeof(void *) - 1) + +/* + * This lives inside the cache_bin (for locality reasons), and is initialized + * alongside it, but is otherwise not modified by any cache bin operations. + * It's logically public and maintained by its callers. + */ typedef struct cache_bin_stats_s cache_bin_stats_t; struct cache_bin_stats_s { /* @@ -36,34 +61,75 @@ struct cache_bin_stats_s { */ typedef struct cache_bin_info_s cache_bin_info_t; struct cache_bin_info_s { - /* Upper limit on ncached. */ cache_bin_sz_t ncached_max; }; +/* + * Responsible for caching allocations associated with a single size. + * + * Several pointers are used to track the stack. To save on metadata bytes, + * only the stack_head is a full sized pointer (which is dereferenced on the + * fastpath), while the others store only the low 16 bits -- this is correct + * because a single stack never takes more space than 2^16 bytes, and at the + * same time only equality checks are performed on the low bits. + * + * (low addr) (high addr) + * |------stashed------|------available------|------cached-----| + * ^ ^ ^ ^ + * low_bound(derived) low_bits_full stack_head low_bits_empty + */ typedef struct cache_bin_s cache_bin_t; struct cache_bin_s { - /* Min # cached since last GC. */ - cache_bin_sz_t low_water; - /* # of cached objects. */ - cache_bin_sz_t ncached; /* - * ncached and stats are both modified frequently. Let's keep them + * The stack grows down. Whenever the bin is nonempty, the head points + * to an array entry containing a valid allocation. When it is empty, + * the head points to one element past the owned array. + */ + void **stack_head; + /* + * cur_ptr and stats are both modified frequently. Let's keep them * close so that they have a higher chance of being on the same * cacheline, thus less write-backs. */ cache_bin_stats_t tstats; + /* - * Stack of available objects. + * The low bits of the address of the first item in the stack that + * hasn't been used since the last GC, to track the low water mark (min + * # of cached items). * - * To make use of adjacent cacheline prefetch, the items in the avail - * stack goes to higher address for newer allocations. avail points - * just above the available space, which means that - * avail[-ncached, ... -1] are available items and the lowest item will - * be allocated first. + * Since the stack grows down, this is a higher address than + * low_bits_full. */ - void **avail; + uint16_t low_bits_low_water; + + /* + * The low bits of the value that stack_head will take on when the array + * is full (of cached & stashed items). But remember that stack_head + * always points to a valid item when the array is nonempty -- this is + * in the array. + * + * Recall that since the stack grows down, this is the lowest available + * address in the array for caching. Only adjusted when stashing items. + */ + uint16_t low_bits_full; + + /* + * The low bits of the value that stack_head will take on when the array + * is empty. + * + * The stack grows down -- this is one past the highest address in the + * array. Immutable after initialization. + */ + uint16_t low_bits_empty; }; +/* + * The cache_bins live inside the tcache, but the arena (by design) isn't + * supposed to know much about tcache internals. To let the arena iterate over + * associated bins, we keep (with the tcache) a linked list of + * cache_bin_array_descriptor_ts that tell the arena how to find the bins. + */ typedef struct cache_bin_array_descriptor_s cache_bin_array_descriptor_t; struct cache_bin_array_descriptor_s { /* @@ -72,37 +138,214 @@ struct cache_bin_array_descriptor_s { */ ql_elm(cache_bin_array_descriptor_t) link; /* Pointers to the tcache bins. */ - cache_bin_t *bins_small; - cache_bin_t *bins_large; + cache_bin_t *bins; }; static inline void cache_bin_array_descriptor_init(cache_bin_array_descriptor_t *descriptor, - cache_bin_t *bins_small, cache_bin_t *bins_large) { + cache_bin_t *bins) { ql_elm_new(descriptor, link); - descriptor->bins_small = bins_small; - descriptor->bins_large = bins_large; + descriptor->bins = bins; } -JEMALLOC_ALWAYS_INLINE void * -cache_bin_alloc_easy(cache_bin_t *bin, bool *success) { - void *ret; +JEMALLOC_ALWAYS_INLINE bool +cache_bin_nonfast_aligned(const void *ptr) { + if (!config_uaf_detection) { + return false; + } + /* + * Currently we use alignment to decide which pointer to junk & stash on + * dealloc (for catching use-after-free). In some common cases a + * page-aligned check is needed already (sdalloc w/ config_prof), so we + * are getting it more or less for free -- no added instructions on + * free_fastpath. + * + * Another way of deciding which pointer to sample, is adding another + * thread_event to pick one every N bytes. That also adds no cost on + * the fastpath, however it will tend to pick large allocations which is + * not the desired behavior. + */ + return ((uintptr_t)ptr & san_cache_bin_nonfast_mask) == 0; +} + +/* Returns ncached_max: Upper limit on ncached. */ +static inline cache_bin_sz_t +cache_bin_info_ncached_max(cache_bin_info_t *info) { + return info->ncached_max; +} + +/* + * Internal. + * + * Asserts that the pointer associated with earlier is <= the one associated + * with later. + */ +static inline void +cache_bin_assert_earlier(cache_bin_t *bin, uint16_t earlier, uint16_t later) { + if (earlier > later) { + assert(bin->low_bits_full > bin->low_bits_empty); + } +} - bin->ncached--; +/* + * Internal. + * + * Does difference calculations that handle wraparound correctly. Earlier must + * be associated with the position earlier in memory. + */ +static inline uint16_t +cache_bin_diff(cache_bin_t *bin, uint16_t earlier, uint16_t later, bool racy) { + /* + * When it's racy, bin->low_bits_full can be modified concurrently. It + * can cross the uint16_t max value and become less than + * bin->low_bits_empty at the time of the check. + */ + if (!racy) { + cache_bin_assert_earlier(bin, earlier, later); + } + return later - earlier; +} +/* + * Number of items currently cached in the bin, without checking ncached_max. + * We require specifying whether or not the request is racy or not (i.e. whether + * or not concurrent modifications are possible). + */ +static inline cache_bin_sz_t +cache_bin_ncached_get_internal(cache_bin_t *bin, bool racy) { + cache_bin_sz_t diff = cache_bin_diff(bin, + (uint16_t)(uintptr_t)bin->stack_head, bin->low_bits_empty, racy); + cache_bin_sz_t n = diff / sizeof(void *); /* - * Check for both bin->ncached == 0 and ncached < low_water - * in a single branch. + * We have undefined behavior here; if this function is called from the + * arena stats updating code, then stack_head could change from the + * first line to the next one. Morally, these loads should be atomic, + * but compilers won't currently generate comparisons with in-memory + * operands against atomics, and these variables get accessed on the + * fast paths. This should still be "safe" in the sense of generating + * the correct assembly for the foreseeable future, though. */ - if (unlikely(bin->ncached <= bin->low_water)) { - bin->low_water = bin->ncached; - if (bin->ncached == -1) { - bin->ncached = 0; - *success = false; - return NULL; - } + assert(n == 0 || *(bin->stack_head) != NULL || racy); + return n; +} + +/* + * Number of items currently cached in the bin, with checking ncached_max. The + * caller must know that no concurrent modification of the cache_bin is + * possible. + */ +static inline cache_bin_sz_t +cache_bin_ncached_get_local(cache_bin_t *bin, cache_bin_info_t *info) { + cache_bin_sz_t n = cache_bin_ncached_get_internal(bin, + /* racy */ false); + assert(n <= cache_bin_info_ncached_max(info)); + return n; +} + +/* + * Internal. + * + * A pointer to the position one past the end of the backing array. + * + * Do not call if racy, because both 'bin->stack_head' and 'bin->low_bits_full' + * are subject to concurrent modifications. + */ +static inline void ** +cache_bin_empty_position_get(cache_bin_t *bin) { + cache_bin_sz_t diff = cache_bin_diff(bin, + (uint16_t)(uintptr_t)bin->stack_head, bin->low_bits_empty, + /* racy */ false); + uintptr_t empty_bits = (uintptr_t)bin->stack_head + diff; + void **ret = (void **)empty_bits; + + assert(ret >= bin->stack_head); + + return ret; +} + +/* + * Internal. + * + * Calculates low bits of the lower bound of the usable cache bin's range (see + * cache_bin_t visual representation above). + * + * No values are concurrently modified, so should be safe to read in a + * multithreaded environment. Currently concurrent access happens only during + * arena statistics collection. + */ +static inline uint16_t +cache_bin_low_bits_low_bound_get(cache_bin_t *bin, cache_bin_info_t *info) { + return (uint16_t)bin->low_bits_empty - + info->ncached_max * sizeof(void *); +} + +/* + * Internal. + * + * A pointer to the position with the lowest address of the backing array. + */ +static inline void ** +cache_bin_low_bound_get(cache_bin_t *bin, cache_bin_info_t *info) { + cache_bin_sz_t ncached_max = cache_bin_info_ncached_max(info); + void **ret = cache_bin_empty_position_get(bin) - ncached_max; + assert(ret <= bin->stack_head); + + return ret; +} + +/* + * As the name implies. This is important since it's not correct to try to + * batch fill a nonempty cache bin. + */ +static inline void +cache_bin_assert_empty(cache_bin_t *bin, cache_bin_info_t *info) { + assert(cache_bin_ncached_get_local(bin, info) == 0); + assert(cache_bin_empty_position_get(bin) == bin->stack_head); +} + +/* + * Get low water, but without any of the correctness checking we do for the + * caller-usable version, if we are temporarily breaking invariants (like + * ncached >= low_water during flush). + */ +static inline cache_bin_sz_t +cache_bin_low_water_get_internal(cache_bin_t *bin) { + return cache_bin_diff(bin, bin->low_bits_low_water, + bin->low_bits_empty, /* racy */ false) / sizeof(void *); +} + +/* Returns the numeric value of low water in [0, ncached]. */ +static inline cache_bin_sz_t +cache_bin_low_water_get(cache_bin_t *bin, cache_bin_info_t *info) { + cache_bin_sz_t low_water = cache_bin_low_water_get_internal(bin); + assert(low_water <= cache_bin_info_ncached_max(info)); + assert(low_water <= cache_bin_ncached_get_local(bin, info)); + + cache_bin_assert_earlier(bin, (uint16_t)(uintptr_t)bin->stack_head, + bin->low_bits_low_water); + + return low_water; +} + +/* + * Indicates that the current cache bin position should be the low water mark + * going forward. + */ +static inline void +cache_bin_low_water_set(cache_bin_t *bin) { + bin->low_bits_low_water = (uint16_t)(uintptr_t)bin->stack_head; +} + +static inline void +cache_bin_low_water_adjust(cache_bin_t *bin) { + if (cache_bin_ncached_get_internal(bin, /* racy */ false) + < cache_bin_low_water_get_internal(bin)) { + cache_bin_low_water_set(bin); } +} +JEMALLOC_ALWAYS_INLINE void * +cache_bin_alloc_impl(cache_bin_t *bin, bool *success, bool adjust_low_water) { /* * success (instead of ret) should be checked upon the return of this * function. We avoid checking (ret == NULL) because there is never a @@ -110,22 +353,318 @@ cache_bin_alloc_easy(cache_bin_t *bin, bool *success) { * and eagerly checking ret would cause pipeline stall (waiting for the * cacheline). */ - *success = true; - ret = *(bin->avail - (bin->ncached + 1)); - return ret; + /* + * This may read from the empty position; however the loaded value won't + * be used. It's safe because the stack has one more slot reserved. + */ + void *ret = *bin->stack_head; + uint16_t low_bits = (uint16_t)(uintptr_t)bin->stack_head; + void **new_head = bin->stack_head + 1; + + /* + * Note that the low water mark is at most empty; if we pass this check, + * we know we're non-empty. + */ + if (likely(low_bits != bin->low_bits_low_water)) { + bin->stack_head = new_head; + *success = true; + return ret; + } + if (!adjust_low_water) { + *success = false; + return NULL; + } + /* + * In the fast-path case where we call alloc_easy and then alloc, the + * previous checking and computation is optimized away -- we didn't + * actually commit any of our operations. + */ + if (likely(low_bits != bin->low_bits_empty)) { + bin->stack_head = new_head; + bin->low_bits_low_water = (uint16_t)(uintptr_t)new_head; + *success = true; + return ret; + } + *success = false; + return NULL; +} + +/* + * Allocate an item out of the bin, failing if we're at the low-water mark. + */ +JEMALLOC_ALWAYS_INLINE void * +cache_bin_alloc_easy(cache_bin_t *bin, bool *success) { + /* We don't look at info if we're not adjusting low-water. */ + return cache_bin_alloc_impl(bin, success, false); +} + +/* + * Allocate an item out of the bin, even if we're currently at the low-water + * mark (and failing only if the bin is empty). + */ +JEMALLOC_ALWAYS_INLINE void * +cache_bin_alloc(cache_bin_t *bin, bool *success) { + return cache_bin_alloc_impl(bin, success, true); +} + +JEMALLOC_ALWAYS_INLINE cache_bin_sz_t +cache_bin_alloc_batch(cache_bin_t *bin, size_t num, void **out) { + cache_bin_sz_t n = cache_bin_ncached_get_internal(bin, + /* racy */ false); + if (n > num) { + n = (cache_bin_sz_t)num; + } + memcpy(out, bin->stack_head, n * sizeof(void *)); + bin->stack_head += n; + cache_bin_low_water_adjust(bin); + + return n; } JEMALLOC_ALWAYS_INLINE bool -cache_bin_dalloc_easy(cache_bin_t *bin, cache_bin_info_t *bin_info, void *ptr) { - if (unlikely(bin->ncached == bin_info->ncached_max)) { +cache_bin_full(cache_bin_t *bin) { + return ((uint16_t)(uintptr_t)bin->stack_head == bin->low_bits_full); +} + +/* + * Free an object into the given bin. Fails only if the bin is full. + */ +JEMALLOC_ALWAYS_INLINE bool +cache_bin_dalloc_easy(cache_bin_t *bin, void *ptr) { + if (unlikely(cache_bin_full(bin))) { return false; } - assert(bin->ncached < bin_info->ncached_max); - bin->ncached++; - *(bin->avail - bin->ncached) = ptr; + + bin->stack_head--; + *bin->stack_head = ptr; + cache_bin_assert_earlier(bin, bin->low_bits_full, + (uint16_t)(uintptr_t)bin->stack_head); return true; } +/* Returns false if failed to stash (i.e. bin is full). */ +JEMALLOC_ALWAYS_INLINE bool +cache_bin_stash(cache_bin_t *bin, void *ptr) { + if (cache_bin_full(bin)) { + return false; + } + + /* Stash at the full position, in the [full, head) range. */ + uint16_t low_bits_head = (uint16_t)(uintptr_t)bin->stack_head; + /* Wraparound handled as well. */ + uint16_t diff = cache_bin_diff(bin, bin->low_bits_full, low_bits_head, + /* racy */ false); + *(void **)((uintptr_t)bin->stack_head - diff) = ptr; + + assert(!cache_bin_full(bin)); + bin->low_bits_full += sizeof(void *); + cache_bin_assert_earlier(bin, bin->low_bits_full, low_bits_head); + + return true; +} + +/* + * Get the number of stashed pointers. + * + * When called from a thread not owning the TLS (i.e. racy = true), it's + * important to keep in mind that 'bin->stack_head' and 'bin->low_bits_full' can + * be modified concurrently and almost none assertions about their values can be + * made. + */ +JEMALLOC_ALWAYS_INLINE cache_bin_sz_t +cache_bin_nstashed_get_internal(cache_bin_t *bin, cache_bin_info_t *info, + bool racy) { + cache_bin_sz_t ncached_max = cache_bin_info_ncached_max(info); + uint16_t low_bits_low_bound = cache_bin_low_bits_low_bound_get(bin, + info); + + cache_bin_sz_t n = cache_bin_diff(bin, low_bits_low_bound, + bin->low_bits_full, racy) / sizeof(void *); + assert(n <= ncached_max); + + if (!racy) { + /* Below are for assertions only. */ + void **low_bound = cache_bin_low_bound_get(bin, info); + + assert((uint16_t)(uintptr_t)low_bound == low_bits_low_bound); + void *stashed = *(low_bound + n - 1); + bool aligned = cache_bin_nonfast_aligned(stashed); +#ifdef JEMALLOC_JET + /* Allow arbitrary pointers to be stashed in tests. */ + aligned = true; +#endif + assert(n == 0 || (stashed != NULL && aligned)); + } + + return n; +} + +JEMALLOC_ALWAYS_INLINE cache_bin_sz_t +cache_bin_nstashed_get_local(cache_bin_t *bin, cache_bin_info_t *info) { + cache_bin_sz_t n = cache_bin_nstashed_get_internal(bin, info, + /* racy */ false); + assert(n <= cache_bin_info_ncached_max(info)); + return n; +} + +/* + * Obtain a racy view of the number of items currently in the cache bin, in the + * presence of possible concurrent modifications. + */ +static inline void +cache_bin_nitems_get_remote(cache_bin_t *bin, cache_bin_info_t *info, + cache_bin_sz_t *ncached, cache_bin_sz_t *nstashed) { + cache_bin_sz_t n = cache_bin_ncached_get_internal(bin, /* racy */ true); + assert(n <= cache_bin_info_ncached_max(info)); + *ncached = n; + + n = cache_bin_nstashed_get_internal(bin, info, /* racy */ true); + assert(n <= cache_bin_info_ncached_max(info)); + *nstashed = n; + /* Note that cannot assert ncached + nstashed <= ncached_max (racy). */ +} + +/* + * Filling and flushing are done in batch, on arrays of void *s. For filling, + * the arrays go forward, and can be accessed with ordinary array arithmetic. + * For flushing, we work from the end backwards, and so need to use special + * accessors that invert the usual ordering. + * + * This is important for maintaining first-fit; the arena code fills with + * earliest objects first, and so those are the ones we should return first for + * cache_bin_alloc calls. When flushing, we should flush the objects that we + * wish to return later; those at the end of the array. This is better for the + * first-fit heuristic as well as for cache locality; the most recently freed + * objects are the ones most likely to still be in cache. + * + * This all sounds very hand-wavey and theoretical, but reverting the ordering + * on one or the other pathway leads to measurable slowdowns. + */ + +typedef struct cache_bin_ptr_array_s cache_bin_ptr_array_t; +struct cache_bin_ptr_array_s { + cache_bin_sz_t n; + void **ptr; +}; + +/* + * Declare a cache_bin_ptr_array_t sufficient for nval items. + * + * In the current implementation, this could be just part of a + * cache_bin_ptr_array_init_... call, since we reuse the cache bin stack memory. + * Indirecting behind a macro, though, means experimenting with linked-list + * representations is easy (since they'll require an alloca in the calling + * frame). + */ +#define CACHE_BIN_PTR_ARRAY_DECLARE(name, nval) \ + cache_bin_ptr_array_t name; \ + name.n = (nval) + +/* + * Start a fill. The bin must be empty, and This must be followed by a + * finish_fill call before doing any alloc/dalloc operations on the bin. + */ +static inline void +cache_bin_init_ptr_array_for_fill(cache_bin_t *bin, cache_bin_info_t *info, + cache_bin_ptr_array_t *arr, cache_bin_sz_t nfill) { + cache_bin_assert_empty(bin, info); + arr->ptr = cache_bin_empty_position_get(bin) - nfill; +} + +/* + * While nfill in cache_bin_init_ptr_array_for_fill is the number we *intend* to + * fill, nfilled here is the number we actually filled (which may be less, in + * case of OOM. + */ +static inline void +cache_bin_finish_fill(cache_bin_t *bin, cache_bin_info_t *info, + cache_bin_ptr_array_t *arr, cache_bin_sz_t nfilled) { + cache_bin_assert_empty(bin, info); + void **empty_position = cache_bin_empty_position_get(bin); + if (nfilled < arr->n) { + memmove(empty_position - nfilled, empty_position - arr->n, + nfilled * sizeof(void *)); + } + bin->stack_head = empty_position - nfilled; +} + +/* + * Same deal, but with flush. Unlike fill (which can fail), the user must flush + * everything we give them. + */ +static inline void +cache_bin_init_ptr_array_for_flush(cache_bin_t *bin, cache_bin_info_t *info, + cache_bin_ptr_array_t *arr, cache_bin_sz_t nflush) { + arr->ptr = cache_bin_empty_position_get(bin) - nflush; + assert(cache_bin_ncached_get_local(bin, info) == 0 + || *arr->ptr != NULL); +} + +static inline void +cache_bin_finish_flush(cache_bin_t *bin, cache_bin_info_t *info, + cache_bin_ptr_array_t *arr, cache_bin_sz_t nflushed) { + unsigned rem = cache_bin_ncached_get_local(bin, info) - nflushed; + memmove(bin->stack_head + nflushed, bin->stack_head, + rem * sizeof(void *)); + bin->stack_head = bin->stack_head + nflushed; + cache_bin_low_water_adjust(bin); +} + +static inline void +cache_bin_init_ptr_array_for_stashed(cache_bin_t *bin, szind_t binind, + cache_bin_info_t *info, cache_bin_ptr_array_t *arr, + cache_bin_sz_t nstashed) { + assert(nstashed > 0); + assert(cache_bin_nstashed_get_local(bin, info) == nstashed); + + void **low_bound = cache_bin_low_bound_get(bin, info); + arr->ptr = low_bound; + assert(*arr->ptr != NULL); +} + +static inline void +cache_bin_finish_flush_stashed(cache_bin_t *bin, cache_bin_info_t *info) { + void **low_bound = cache_bin_low_bound_get(bin, info); + + /* Reset the bin local full position. */ + bin->low_bits_full = (uint16_t)(uintptr_t)low_bound; + assert(cache_bin_nstashed_get_local(bin, info) == 0); +} + +/* + * Initialize a cache_bin_info to represent up to the given number of items in + * the cache_bins it is associated with. + */ +void cache_bin_info_init(cache_bin_info_t *bin_info, + cache_bin_sz_t ncached_max); +/* + * Given an array of initialized cache_bin_info_ts, determine how big an + * allocation is required to initialize a full set of cache_bin_ts. + */ +void cache_bin_info_compute_alloc(cache_bin_info_t *infos, szind_t ninfos, + size_t *size, size_t *alignment); + +/* + * Actually initialize some cache bins. Callers should allocate the backing + * memory indicated by a call to cache_bin_compute_alloc. They should then + * preincrement, call init once for each bin and info, and then call + * cache_bin_postincrement. *alloc_cur will then point immediately past the end + * of the allocation. + */ +void cache_bin_preincrement(cache_bin_info_t *infos, szind_t ninfos, + void *alloc, size_t *cur_offset); +void cache_bin_postincrement(cache_bin_info_t *infos, szind_t ninfos, + void *alloc, size_t *cur_offset); +void cache_bin_init(cache_bin_t *bin, cache_bin_info_t *info, void *alloc, + size_t *cur_offset); + +/* + * If a cache bin was zero initialized (either because it lives in static or + * thread-local storage, or was memset to 0), this function indicates whether or + * not cache_bin_init was called on it. + */ +bool cache_bin_still_zero_initialized(cache_bin_t *bin); + #endif /* JEMALLOC_INTERNAL_CACHE_BIN_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/counter.h b/contrib/jemalloc/include/jemalloc/internal/counter.h new file mode 100644 index 00000000000000..79abf0648b87bf --- /dev/null +++ b/contrib/jemalloc/include/jemalloc/internal/counter.h @@ -0,0 +1,34 @@ +#ifndef JEMALLOC_INTERNAL_COUNTER_H +#define JEMALLOC_INTERNAL_COUNTER_H + +#include "jemalloc/internal/mutex.h" + +typedef struct counter_accum_s { + LOCKEDINT_MTX_DECLARE(mtx) + locked_u64_t accumbytes; + uint64_t interval; +} counter_accum_t; + +JEMALLOC_ALWAYS_INLINE bool +counter_accum(tsdn_t *tsdn, counter_accum_t *counter, uint64_t bytes) { + uint64_t interval = counter->interval; + assert(interval > 0); + LOCKEDINT_MTX_LOCK(tsdn, counter->mtx); + /* + * If the event moves fast enough (and/or if the event handling is slow + * enough), extreme overflow can cause counter trigger coalescing. + * This is an intentional mechanism that avoids rate-limiting + * allocation. + */ + bool overflow = locked_inc_mod_u64(tsdn, LOCKEDINT_MTX(counter->mtx), + &counter->accumbytes, bytes, interval); + LOCKEDINT_MTX_UNLOCK(tsdn, counter->mtx); + return overflow; +} + +bool counter_accum_init(counter_accum_t *counter, uint64_t interval); +void counter_prefork(tsdn_t *tsdn, counter_accum_t *counter); +void counter_postfork_parent(tsdn_t *tsdn, counter_accum_t *counter); +void counter_postfork_child(tsdn_t *tsdn, counter_accum_t *counter); + +#endif /* JEMALLOC_INTERNAL_COUNTER_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/ctl.h b/contrib/jemalloc/include/jemalloc/internal/ctl.h index 1d1aacc6f417da..63d27f8aad3785 100644 --- a/contrib/jemalloc/include/jemalloc/internal/ctl.h +++ b/contrib/jemalloc/include/jemalloc/internal/ctl.h @@ -42,9 +42,11 @@ typedef struct ctl_arena_stats_s { uint64_t nfills_small; uint64_t nflushes_small; - bin_stats_t bstats[SC_NBINS]; + bin_stats_data_t bstats[SC_NBINS]; arena_stats_large_t lstats[SC_NSIZES - SC_NBINS]; - arena_stats_extents_t estats[SC_NPSIZES]; + pac_estats_t estats[SC_NPSIZES]; + hpa_shard_stats_t hpastats; + sec_stats_t secstats; } ctl_arena_stats_t; typedef struct ctl_stats_s { @@ -96,13 +98,17 @@ typedef struct ctl_arenas_s { int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen); int ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp); - int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen); +int ctl_mibnametomib(tsd_t *tsd, size_t *mib, size_t miblen, const char *name, + size_t *miblenp); +int ctl_bymibname(tsd_t *tsd, size_t *mib, size_t miblen, const char *name, + size_t *miblenp, void *oldp, size_t *oldlenp, void *newp, size_t newlen); bool ctl_boot(void); void ctl_prefork(tsdn_t *tsdn); void ctl_postfork_parent(tsdn_t *tsdn); void ctl_postfork_child(tsdn_t *tsdn); +void ctl_mtx_assert_held(tsdn_t *tsdn); #define xmallctl(name, oldp, oldlenp, newp, newlen) do { \ if (je_mallctl(name, oldp, oldlenp, newp, newlen) \ @@ -131,4 +137,23 @@ void ctl_postfork_child(tsdn_t *tsdn); } \ } while (0) +#define xmallctlmibnametomib(mib, miblen, name, miblenp) do { \ + if (ctl_mibnametomib(tsd_fetch(), mib, miblen, name, miblenp) \ + != 0) { \ + malloc_write( \ + ": Failure in ctl_mibnametomib()\n"); \ + abort(); \ + } \ +} while (0) + +#define xmallctlbymibname(mib, miblen, name, miblenp, oldp, oldlenp, \ + newp, newlen) do { \ + if (ctl_bymibname(tsd_fetch(), mib, miblen, name, miblenp, \ + oldp, oldlenp, newp, newlen) != 0) { \ + malloc_write( \ + ": Failure in ctl_bymibname()\n"); \ + abort(); \ + } \ +} while (0) + #endif /* JEMALLOC_INTERNAL_CTL_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/decay.h b/contrib/jemalloc/include/jemalloc/internal/decay.h new file mode 100644 index 00000000000000..cf6a9d22c0107b --- /dev/null +++ b/contrib/jemalloc/include/jemalloc/internal/decay.h @@ -0,0 +1,186 @@ +#ifndef JEMALLOC_INTERNAL_DECAY_H +#define JEMALLOC_INTERNAL_DECAY_H + +#include "jemalloc/internal/smoothstep.h" + +#define DECAY_UNBOUNDED_TIME_TO_PURGE ((uint64_t)-1) + +/* + * The decay_t computes the number of pages we should purge at any given time. + * Page allocators inform a decay object when pages enter a decay-able state + * (i.e. dirty or muzzy), and query it to determine how many pages should be + * purged at any given time. + * + * This is mostly a single-threaded data structure and doesn't care about + * synchronization at all; it's the caller's responsibility to manage their + * synchronization on their own. There are two exceptions: + * 1) It's OK to racily call decay_ms_read (i.e. just the simplest state query). + * 2) The mtx and purging fields live (and are initialized) here, but are + * logically owned by the page allocator. This is just a convenience (since + * those fields would be duplicated for both the dirty and muzzy states + * otherwise). + */ +typedef struct decay_s decay_t; +struct decay_s { + /* Synchronizes all non-atomic fields. */ + malloc_mutex_t mtx; + /* + * True if a thread is currently purging the extents associated with + * this decay structure. + */ + bool purging; + /* + * Approximate time in milliseconds from the creation of a set of unused + * dirty pages until an equivalent set of unused dirty pages is purged + * and/or reused. + */ + atomic_zd_t time_ms; + /* time / SMOOTHSTEP_NSTEPS. */ + nstime_t interval; + /* + * Time at which the current decay interval logically started. We do + * not actually advance to a new epoch until sometime after it starts + * because of scheduling and computation delays, and it is even possible + * to completely skip epochs. In all cases, during epoch advancement we + * merge all relevant activity into the most recently recorded epoch. + */ + nstime_t epoch; + /* Deadline randomness generator. */ + uint64_t jitter_state; + /* + * Deadline for current epoch. This is the sum of interval and per + * epoch jitter which is a uniform random variable in [0..interval). + * Epochs always advance by precise multiples of interval, but we + * randomize the deadline to reduce the likelihood of arenas purging in + * lockstep. + */ + nstime_t deadline; + /* + * The number of pages we cap ourselves at in the current epoch, per + * decay policies. Updated on an epoch change. After an epoch change, + * the caller should take steps to try to purge down to this amount. + */ + size_t npages_limit; + /* + * Number of unpurged pages at beginning of current epoch. During epoch + * advancement we use the delta between arena->decay_*.nunpurged and + * ecache_npages_get(&arena->ecache_*) to determine how many dirty pages, + * if any, were generated. + */ + size_t nunpurged; + /* + * Trailing log of how many unused dirty pages were generated during + * each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last + * element is the most recent epoch. Corresponding epoch times are + * relative to epoch. + * + * Updated only on epoch advance, triggered by + * decay_maybe_advance_epoch, below. + */ + size_t backlog[SMOOTHSTEP_NSTEPS]; + + /* Peak number of pages in associated extents. Used for debug only. */ + uint64_t ceil_npages; +}; + +/* + * The current decay time setting. This is the only public access to a decay_t + * that's allowed without holding mtx. + */ +static inline ssize_t +decay_ms_read(const decay_t *decay) { + return atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED); +} + +/* + * See the comment on the struct field -- the limit on pages we should allow in + * this decay state this epoch. + */ +static inline size_t +decay_npages_limit_get(const decay_t *decay) { + return decay->npages_limit; +} + +/* How many unused dirty pages were generated during the last epoch. */ +static inline size_t +decay_epoch_npages_delta(const decay_t *decay) { + return decay->backlog[SMOOTHSTEP_NSTEPS - 1]; +} + +/* + * Current epoch duration, in nanoseconds. Given that new epochs are started + * somewhat haphazardly, this is not necessarily exactly the time between any + * two calls to decay_maybe_advance_epoch; see the comments on fields in the + * decay_t. + */ +static inline uint64_t +decay_epoch_duration_ns(const decay_t *decay) { + return nstime_ns(&decay->interval); +} + +static inline bool +decay_immediately(const decay_t *decay) { + ssize_t decay_ms = decay_ms_read(decay); + return decay_ms == 0; +} + +static inline bool +decay_disabled(const decay_t *decay) { + ssize_t decay_ms = decay_ms_read(decay); + return decay_ms < 0; +} + +/* Returns true if decay is enabled and done gradually. */ +static inline bool +decay_gradually(const decay_t *decay) { + ssize_t decay_ms = decay_ms_read(decay); + return decay_ms > 0; +} + +/* + * Returns true if the passed in decay time setting is valid. + * < -1 : invalid + * -1 : never decay + * 0 : decay immediately + * > 0 : some positive decay time, up to a maximum allowed value of + * NSTIME_SEC_MAX * 1000, which corresponds to decaying somewhere in the early + * 27th century. By that time, we expect to have implemented alternate purging + * strategies. + */ +bool decay_ms_valid(ssize_t decay_ms); + +/* + * As a precondition, the decay_t must be zeroed out (as if with memset). + * + * Returns true on error. + */ +bool decay_init(decay_t *decay, nstime_t *cur_time, ssize_t decay_ms); + +/* + * Given an already-initialized decay_t, reinitialize it with the given decay + * time. The decay_t must have previously been initialized (and should not then + * be zeroed). + */ +void decay_reinit(decay_t *decay, nstime_t *cur_time, ssize_t decay_ms); + +/* + * Compute how many of 'npages_new' pages we would need to purge in 'time'. + */ +uint64_t decay_npages_purge_in(decay_t *decay, nstime_t *time, + size_t npages_new); + +/* Returns true if the epoch advanced and there are pages to purge. */ +bool decay_maybe_advance_epoch(decay_t *decay, nstime_t *new_time, + size_t current_npages); + +/* + * Calculates wait time until a number of pages in the interval + * [0.5 * npages_threshold .. 1.5 * npages_threshold] should be purged. + * + * Returns number of nanoseconds or DECAY_UNBOUNDED_TIME_TO_PURGE in case of + * indefinite wait. + */ +uint64_t decay_ns_until_purge(decay_t *decay, size_t npages_current, + uint64_t npages_threshold); + +#endif /* JEMALLOC_INTERNAL_DECAY_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/ecache.h b/contrib/jemalloc/include/jemalloc/internal/ecache.h new file mode 100644 index 00000000000000..71cae3e34c3806 --- /dev/null +++ b/contrib/jemalloc/include/jemalloc/internal/ecache.h @@ -0,0 +1,55 @@ +#ifndef JEMALLOC_INTERNAL_ECACHE_H +#define JEMALLOC_INTERNAL_ECACHE_H + +#include "jemalloc/internal/eset.h" +#include "jemalloc/internal/san.h" +#include "jemalloc/internal/mutex.h" + +typedef struct ecache_s ecache_t; +struct ecache_s { + malloc_mutex_t mtx; + eset_t eset; + eset_t guarded_eset; + /* All stored extents must be in the same state. */ + extent_state_t state; + /* The index of the ehooks the ecache is associated with. */ + unsigned ind; + /* + * If true, delay coalescing until eviction; otherwise coalesce during + * deallocation. + */ + bool delay_coalesce; +}; + +static inline size_t +ecache_npages_get(ecache_t *ecache) { + return eset_npages_get(&ecache->eset) + + eset_npages_get(&ecache->guarded_eset); +} + +/* Get the number of extents in the given page size index. */ +static inline size_t +ecache_nextents_get(ecache_t *ecache, pszind_t ind) { + return eset_nextents_get(&ecache->eset, ind) + + eset_nextents_get(&ecache->guarded_eset, ind); +} + +/* Get the sum total bytes of the extents in the given page size index. */ +static inline size_t +ecache_nbytes_get(ecache_t *ecache, pszind_t ind) { + return eset_nbytes_get(&ecache->eset, ind) + + eset_nbytes_get(&ecache->guarded_eset, ind); +} + +static inline unsigned +ecache_ind_get(ecache_t *ecache) { + return ecache->ind; +} + +bool ecache_init(tsdn_t *tsdn, ecache_t *ecache, extent_state_t state, + unsigned ind, bool delay_coalesce); +void ecache_prefork(tsdn_t *tsdn, ecache_t *ecache); +void ecache_postfork_parent(tsdn_t *tsdn, ecache_t *ecache); +void ecache_postfork_child(tsdn_t *tsdn, ecache_t *ecache); + +#endif /* JEMALLOC_INTERNAL_ECACHE_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/edata.h b/contrib/jemalloc/include/jemalloc/internal/edata.h new file mode 100644 index 00000000000000..af039ea734aff1 --- /dev/null +++ b/contrib/jemalloc/include/jemalloc/internal/edata.h @@ -0,0 +1,698 @@ +#ifndef JEMALLOC_INTERNAL_EDATA_H +#define JEMALLOC_INTERNAL_EDATA_H + +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/bin_info.h" +#include "jemalloc/internal/bit_util.h" +#include "jemalloc/internal/hpdata.h" +#include "jemalloc/internal/nstime.h" +#include "jemalloc/internal/ph.h" +#include "jemalloc/internal/ql.h" +#include "jemalloc/internal/sc.h" +#include "jemalloc/internal/slab_data.h" +#include "jemalloc/internal/sz.h" +#include "jemalloc/internal/typed_list.h" + +/* + * sizeof(edata_t) is 128 bytes on 64-bit architectures. Ensure the alignment + * to free up the low bits in the rtree leaf. + */ +#define EDATA_ALIGNMENT 128 + +enum extent_state_e { + extent_state_active = 0, + extent_state_dirty = 1, + extent_state_muzzy = 2, + extent_state_retained = 3, + extent_state_transition = 4, /* States below are intermediate. */ + extent_state_merging = 5, + extent_state_max = 5 /* Sanity checking only. */ +}; +typedef enum extent_state_e extent_state_t; + +enum extent_head_state_e { + EXTENT_NOT_HEAD, + EXTENT_IS_HEAD /* See comments in ehooks_default_merge_impl(). */ +}; +typedef enum extent_head_state_e extent_head_state_t; + +/* + * Which implementation of the page allocator interface, (PAI, defined in + * pai.h) owns the given extent? + */ +enum extent_pai_e { + EXTENT_PAI_PAC = 0, + EXTENT_PAI_HPA = 1 +}; +typedef enum extent_pai_e extent_pai_t; + +struct e_prof_info_s { + /* Time when this was allocated. */ + nstime_t e_prof_alloc_time; + /* Allocation request size. */ + size_t e_prof_alloc_size; + /* Points to a prof_tctx_t. */ + atomic_p_t e_prof_tctx; + /* + * Points to a prof_recent_t for the allocation; NULL + * means the recent allocation record no longer exists. + * Protected by prof_recent_alloc_mtx. + */ + atomic_p_t e_prof_recent_alloc; +}; +typedef struct e_prof_info_s e_prof_info_t; + +/* + * The information about a particular edata that lives in an emap. Space is + * more precious there (the information, plus the edata pointer, has to live in + * a 64-bit word if we want to enable a packed representation. + * + * There are two things that are special about the information here: + * - It's quicker to access. You have one fewer pointer hop, since finding the + * edata_t associated with an item always requires accessing the rtree leaf in + * which this data is stored. + * - It can be read unsynchronized, and without worrying about lifetime issues. + */ +typedef struct edata_map_info_s edata_map_info_t; +struct edata_map_info_s { + bool slab; + szind_t szind; +}; + +typedef struct edata_cmp_summary_s edata_cmp_summary_t; +struct edata_cmp_summary_s { + uint64_t sn; + uintptr_t addr; +}; + +/* Extent (span of pages). Use accessor functions for e_* fields. */ +typedef struct edata_s edata_t; +ph_structs(edata_avail, edata_t); +ph_structs(edata_heap, edata_t); +struct edata_s { + /* + * Bitfield containing several fields: + * + * a: arena_ind + * b: slab + * c: committed + * p: pai + * z: zeroed + * g: guarded + * t: state + * i: szind + * f: nfree + * s: bin_shard + * + * 00000000 ... 0000ssss ssffffff ffffiiii iiiitttg zpcbaaaa aaaaaaaa + * + * arena_ind: Arena from which this extent came, or all 1 bits if + * unassociated. + * + * slab: The slab flag indicates whether the extent is used for a slab + * of small regions. This helps differentiate small size classes, + * and it indicates whether interior pointers can be looked up via + * iealloc(). + * + * committed: The committed flag indicates whether physical memory is + * committed to the extent, whether explicitly or implicitly + * as on a system that overcommits and satisfies physical + * memory needs on demand via soft page faults. + * + * pai: The pai flag is an extent_pai_t. + * + * zeroed: The zeroed flag is used by extent recycling code to track + * whether memory is zero-filled. + * + * guarded: The guarded flag is use by the sanitizer to track whether + * the extent has page guards around it. + * + * state: The state flag is an extent_state_t. + * + * szind: The szind flag indicates usable size class index for + * allocations residing in this extent, regardless of whether the + * extent is a slab. Extent size and usable size often differ + * even for non-slabs, either due to sz_large_pad or promotion of + * sampled small regions. + * + * nfree: Number of free regions in slab. + * + * bin_shard: the shard of the bin from which this extent came. + */ + uint64_t e_bits; +#define MASK(CURRENT_FIELD_WIDTH, CURRENT_FIELD_SHIFT) ((((((uint64_t)0x1U) << (CURRENT_FIELD_WIDTH)) - 1)) << (CURRENT_FIELD_SHIFT)) + +#define EDATA_BITS_ARENA_WIDTH MALLOCX_ARENA_BITS +#define EDATA_BITS_ARENA_SHIFT 0 +#define EDATA_BITS_ARENA_MASK MASK(EDATA_BITS_ARENA_WIDTH, EDATA_BITS_ARENA_SHIFT) + +#define EDATA_BITS_SLAB_WIDTH 1 +#define EDATA_BITS_SLAB_SHIFT (EDATA_BITS_ARENA_WIDTH + EDATA_BITS_ARENA_SHIFT) +#define EDATA_BITS_SLAB_MASK MASK(EDATA_BITS_SLAB_WIDTH, EDATA_BITS_SLAB_SHIFT) + +#define EDATA_BITS_COMMITTED_WIDTH 1 +#define EDATA_BITS_COMMITTED_SHIFT (EDATA_BITS_SLAB_WIDTH + EDATA_BITS_SLAB_SHIFT) +#define EDATA_BITS_COMMITTED_MASK MASK(EDATA_BITS_COMMITTED_WIDTH, EDATA_BITS_COMMITTED_SHIFT) + +#define EDATA_BITS_PAI_WIDTH 1 +#define EDATA_BITS_PAI_SHIFT (EDATA_BITS_COMMITTED_WIDTH + EDATA_BITS_COMMITTED_SHIFT) +#define EDATA_BITS_PAI_MASK MASK(EDATA_BITS_PAI_WIDTH, EDATA_BITS_PAI_SHIFT) + +#define EDATA_BITS_ZEROED_WIDTH 1 +#define EDATA_BITS_ZEROED_SHIFT (EDATA_BITS_PAI_WIDTH + EDATA_BITS_PAI_SHIFT) +#define EDATA_BITS_ZEROED_MASK MASK(EDATA_BITS_ZEROED_WIDTH, EDATA_BITS_ZEROED_SHIFT) + +#define EDATA_BITS_GUARDED_WIDTH 1 +#define EDATA_BITS_GUARDED_SHIFT (EDATA_BITS_ZEROED_WIDTH + EDATA_BITS_ZEROED_SHIFT) +#define EDATA_BITS_GUARDED_MASK MASK(EDATA_BITS_GUARDED_WIDTH, EDATA_BITS_GUARDED_SHIFT) + +#define EDATA_BITS_STATE_WIDTH 3 +#define EDATA_BITS_STATE_SHIFT (EDATA_BITS_GUARDED_WIDTH + EDATA_BITS_GUARDED_SHIFT) +#define EDATA_BITS_STATE_MASK MASK(EDATA_BITS_STATE_WIDTH, EDATA_BITS_STATE_SHIFT) + +#define EDATA_BITS_SZIND_WIDTH LG_CEIL(SC_NSIZES) +#define EDATA_BITS_SZIND_SHIFT (EDATA_BITS_STATE_WIDTH + EDATA_BITS_STATE_SHIFT) +#define EDATA_BITS_SZIND_MASK MASK(EDATA_BITS_SZIND_WIDTH, EDATA_BITS_SZIND_SHIFT) + +#define EDATA_BITS_NFREE_WIDTH (SC_LG_SLAB_MAXREGS + 1) +#define EDATA_BITS_NFREE_SHIFT (EDATA_BITS_SZIND_WIDTH + EDATA_BITS_SZIND_SHIFT) +#define EDATA_BITS_NFREE_MASK MASK(EDATA_BITS_NFREE_WIDTH, EDATA_BITS_NFREE_SHIFT) + +#define EDATA_BITS_BINSHARD_WIDTH 6 +#define EDATA_BITS_BINSHARD_SHIFT (EDATA_BITS_NFREE_WIDTH + EDATA_BITS_NFREE_SHIFT) +#define EDATA_BITS_BINSHARD_MASK MASK(EDATA_BITS_BINSHARD_WIDTH, EDATA_BITS_BINSHARD_SHIFT) + +#define EDATA_BITS_IS_HEAD_WIDTH 1 +#define EDATA_BITS_IS_HEAD_SHIFT (EDATA_BITS_BINSHARD_WIDTH + EDATA_BITS_BINSHARD_SHIFT) +#define EDATA_BITS_IS_HEAD_MASK MASK(EDATA_BITS_IS_HEAD_WIDTH, EDATA_BITS_IS_HEAD_SHIFT) + + /* Pointer to the extent that this structure is responsible for. */ + void *e_addr; + + union { + /* + * Extent size and serial number associated with the extent + * structure (different than the serial number for the extent at + * e_addr). + * + * ssssssss [...] ssssssss ssssnnnn nnnnnnnn + */ + size_t e_size_esn; + #define EDATA_SIZE_MASK ((size_t)~(PAGE-1)) + #define EDATA_ESN_MASK ((size_t)PAGE-1) + /* Base extent size, which may not be a multiple of PAGE. */ + size_t e_bsize; + }; + + /* + * If this edata is a user allocation from an HPA, it comes out of some + * pageslab (we don't yet support huegpage allocations that don't fit + * into pageslabs). This tracks it. + */ + hpdata_t *e_ps; + + /* + * Serial number. These are not necessarily unique; splitting an extent + * results in two extents with the same serial number. + */ + uint64_t e_sn; + + union { + /* + * List linkage used when the edata_t is active; either in + * arena's large allocations or bin_t's slabs_full. + */ + ql_elm(edata_t) ql_link_active; + /* + * Pairing heap linkage. Used whenever the extent is inactive + * (in the page allocators), or when it is active and in + * slabs_nonfull, or when the edata_t is unassociated with an + * extent and sitting in an edata_cache. + */ + union { + edata_heap_link_t heap_link; + edata_avail_link_t avail_link; + }; + }; + + union { + /* + * List linkage used when the extent is inactive: + * - Stashed dirty extents + * - Ecache LRU functionality. + */ + ql_elm(edata_t) ql_link_inactive; + /* Small region slab metadata. */ + slab_data_t e_slab_data; + + /* Profiling data, used for large objects. */ + e_prof_info_t e_prof_info; + }; +}; + +TYPED_LIST(edata_list_active, edata_t, ql_link_active) +TYPED_LIST(edata_list_inactive, edata_t, ql_link_inactive) + +static inline unsigned +edata_arena_ind_get(const edata_t *edata) { + unsigned arena_ind = (unsigned)((edata->e_bits & + EDATA_BITS_ARENA_MASK) >> EDATA_BITS_ARENA_SHIFT); + assert(arena_ind < MALLOCX_ARENA_LIMIT); + + return arena_ind; +} + +static inline szind_t +edata_szind_get_maybe_invalid(const edata_t *edata) { + szind_t szind = (szind_t)((edata->e_bits & EDATA_BITS_SZIND_MASK) >> + EDATA_BITS_SZIND_SHIFT); + assert(szind <= SC_NSIZES); + return szind; +} + +static inline szind_t +edata_szind_get(const edata_t *edata) { + szind_t szind = edata_szind_get_maybe_invalid(edata); + assert(szind < SC_NSIZES); /* Never call when "invalid". */ + return szind; +} + +static inline size_t +edata_usize_get(const edata_t *edata) { + return sz_index2size(edata_szind_get(edata)); +} + +static inline unsigned +edata_binshard_get(const edata_t *edata) { + unsigned binshard = (unsigned)((edata->e_bits & + EDATA_BITS_BINSHARD_MASK) >> EDATA_BITS_BINSHARD_SHIFT); + assert(binshard < bin_infos[edata_szind_get(edata)].n_shards); + return binshard; +} + +static inline uint64_t +edata_sn_get(const edata_t *edata) { + return edata->e_sn; +} + +static inline extent_state_t +edata_state_get(const edata_t *edata) { + return (extent_state_t)((edata->e_bits & EDATA_BITS_STATE_MASK) >> + EDATA_BITS_STATE_SHIFT); +} + +static inline bool +edata_guarded_get(const edata_t *edata) { + return (bool)((edata->e_bits & EDATA_BITS_GUARDED_MASK) >> + EDATA_BITS_GUARDED_SHIFT); +} + +static inline bool +edata_zeroed_get(const edata_t *edata) { + return (bool)((edata->e_bits & EDATA_BITS_ZEROED_MASK) >> + EDATA_BITS_ZEROED_SHIFT); +} + +static inline bool +edata_committed_get(const edata_t *edata) { + return (bool)((edata->e_bits & EDATA_BITS_COMMITTED_MASK) >> + EDATA_BITS_COMMITTED_SHIFT); +} + +static inline extent_pai_t +edata_pai_get(const edata_t *edata) { + return (extent_pai_t)((edata->e_bits & EDATA_BITS_PAI_MASK) >> + EDATA_BITS_PAI_SHIFT); +} + +static inline bool +edata_slab_get(const edata_t *edata) { + return (bool)((edata->e_bits & EDATA_BITS_SLAB_MASK) >> + EDATA_BITS_SLAB_SHIFT); +} + +static inline unsigned +edata_nfree_get(const edata_t *edata) { + assert(edata_slab_get(edata)); + return (unsigned)((edata->e_bits & EDATA_BITS_NFREE_MASK) >> + EDATA_BITS_NFREE_SHIFT); +} + +static inline void * +edata_base_get(const edata_t *edata) { + assert(edata->e_addr == PAGE_ADDR2BASE(edata->e_addr) || + !edata_slab_get(edata)); + return PAGE_ADDR2BASE(edata->e_addr); +} + +static inline void * +edata_addr_get(const edata_t *edata) { + assert(edata->e_addr == PAGE_ADDR2BASE(edata->e_addr) || + !edata_slab_get(edata)); + return edata->e_addr; +} + +static inline size_t +edata_size_get(const edata_t *edata) { + return (edata->e_size_esn & EDATA_SIZE_MASK); +} + +static inline size_t +edata_esn_get(const edata_t *edata) { + return (edata->e_size_esn & EDATA_ESN_MASK); +} + +static inline size_t +edata_bsize_get(const edata_t *edata) { + return edata->e_bsize; +} + +static inline hpdata_t * +edata_ps_get(const edata_t *edata) { + assert(edata_pai_get(edata) == EXTENT_PAI_HPA); + return edata->e_ps; +} + +static inline void * +edata_before_get(const edata_t *edata) { + return (void *)((uintptr_t)edata_base_get(edata) - PAGE); +} + +static inline void * +edata_last_get(const edata_t *edata) { + return (void *)((uintptr_t)edata_base_get(edata) + + edata_size_get(edata) - PAGE); +} + +static inline void * +edata_past_get(const edata_t *edata) { + return (void *)((uintptr_t)edata_base_get(edata) + + edata_size_get(edata)); +} + +static inline slab_data_t * +edata_slab_data_get(edata_t *edata) { + assert(edata_slab_get(edata)); + return &edata->e_slab_data; +} + +static inline const slab_data_t * +edata_slab_data_get_const(const edata_t *edata) { + assert(edata_slab_get(edata)); + return &edata->e_slab_data; +} + +static inline prof_tctx_t * +edata_prof_tctx_get(const edata_t *edata) { + return (prof_tctx_t *)atomic_load_p(&edata->e_prof_info.e_prof_tctx, + ATOMIC_ACQUIRE); +} + +static inline const nstime_t * +edata_prof_alloc_time_get(const edata_t *edata) { + return &edata->e_prof_info.e_prof_alloc_time; +} + +static inline size_t +edata_prof_alloc_size_get(const edata_t *edata) { + return edata->e_prof_info.e_prof_alloc_size; +} + +static inline prof_recent_t * +edata_prof_recent_alloc_get_dont_call_directly(const edata_t *edata) { + return (prof_recent_t *)atomic_load_p( + &edata->e_prof_info.e_prof_recent_alloc, ATOMIC_RELAXED); +} + +static inline void +edata_arena_ind_set(edata_t *edata, unsigned arena_ind) { + edata->e_bits = (edata->e_bits & ~EDATA_BITS_ARENA_MASK) | + ((uint64_t)arena_ind << EDATA_BITS_ARENA_SHIFT); +} + +static inline void +edata_binshard_set(edata_t *edata, unsigned binshard) { + /* The assertion assumes szind is set already. */ + assert(binshard < bin_infos[edata_szind_get(edata)].n_shards); + edata->e_bits = (edata->e_bits & ~EDATA_BITS_BINSHARD_MASK) | + ((uint64_t)binshard << EDATA_BITS_BINSHARD_SHIFT); +} + +static inline void +edata_addr_set(edata_t *edata, void *addr) { + edata->e_addr = addr; +} + +static inline void +edata_size_set(edata_t *edata, size_t size) { + assert((size & ~EDATA_SIZE_MASK) == 0); + edata->e_size_esn = size | (edata->e_size_esn & ~EDATA_SIZE_MASK); +} + +static inline void +edata_esn_set(edata_t *edata, size_t esn) { + edata->e_size_esn = (edata->e_size_esn & ~EDATA_ESN_MASK) | (esn & + EDATA_ESN_MASK); +} + +static inline void +edata_bsize_set(edata_t *edata, size_t bsize) { + edata->e_bsize = bsize; +} + +static inline void +edata_ps_set(edata_t *edata, hpdata_t *ps) { + assert(edata_pai_get(edata) == EXTENT_PAI_HPA); + edata->e_ps = ps; +} + +static inline void +edata_szind_set(edata_t *edata, szind_t szind) { + assert(szind <= SC_NSIZES); /* SC_NSIZES means "invalid". */ + edata->e_bits = (edata->e_bits & ~EDATA_BITS_SZIND_MASK) | + ((uint64_t)szind << EDATA_BITS_SZIND_SHIFT); +} + +static inline void +edata_nfree_set(edata_t *edata, unsigned nfree) { + assert(edata_slab_get(edata)); + edata->e_bits = (edata->e_bits & ~EDATA_BITS_NFREE_MASK) | + ((uint64_t)nfree << EDATA_BITS_NFREE_SHIFT); +} + +static inline void +edata_nfree_binshard_set(edata_t *edata, unsigned nfree, unsigned binshard) { + /* The assertion assumes szind is set already. */ + assert(binshard < bin_infos[edata_szind_get(edata)].n_shards); + edata->e_bits = (edata->e_bits & + (~EDATA_BITS_NFREE_MASK & ~EDATA_BITS_BINSHARD_MASK)) | + ((uint64_t)binshard << EDATA_BITS_BINSHARD_SHIFT) | + ((uint64_t)nfree << EDATA_BITS_NFREE_SHIFT); +} + +static inline void +edata_nfree_inc(edata_t *edata) { + assert(edata_slab_get(edata)); + edata->e_bits += ((uint64_t)1U << EDATA_BITS_NFREE_SHIFT); +} + +static inline void +edata_nfree_dec(edata_t *edata) { + assert(edata_slab_get(edata)); + edata->e_bits -= ((uint64_t)1U << EDATA_BITS_NFREE_SHIFT); +} + +static inline void +edata_nfree_sub(edata_t *edata, uint64_t n) { + assert(edata_slab_get(edata)); + edata->e_bits -= (n << EDATA_BITS_NFREE_SHIFT); +} + +static inline void +edata_sn_set(edata_t *edata, uint64_t sn) { + edata->e_sn = sn; +} + +static inline void +edata_state_set(edata_t *edata, extent_state_t state) { + edata->e_bits = (edata->e_bits & ~EDATA_BITS_STATE_MASK) | + ((uint64_t)state << EDATA_BITS_STATE_SHIFT); +} + +static inline void +edata_guarded_set(edata_t *edata, bool guarded) { + edata->e_bits = (edata->e_bits & ~EDATA_BITS_GUARDED_MASK) | + ((uint64_t)guarded << EDATA_BITS_GUARDED_SHIFT); +} + +static inline void +edata_zeroed_set(edata_t *edata, bool zeroed) { + edata->e_bits = (edata->e_bits & ~EDATA_BITS_ZEROED_MASK) | + ((uint64_t)zeroed << EDATA_BITS_ZEROED_SHIFT); +} + +static inline void +edata_committed_set(edata_t *edata, bool committed) { + edata->e_bits = (edata->e_bits & ~EDATA_BITS_COMMITTED_MASK) | + ((uint64_t)committed << EDATA_BITS_COMMITTED_SHIFT); +} + +static inline void +edata_pai_set(edata_t *edata, extent_pai_t pai) { + edata->e_bits = (edata->e_bits & ~EDATA_BITS_PAI_MASK) | + ((uint64_t)pai << EDATA_BITS_PAI_SHIFT); +} + +static inline void +edata_slab_set(edata_t *edata, bool slab) { + edata->e_bits = (edata->e_bits & ~EDATA_BITS_SLAB_MASK) | + ((uint64_t)slab << EDATA_BITS_SLAB_SHIFT); +} + +static inline void +edata_prof_tctx_set(edata_t *edata, prof_tctx_t *tctx) { + atomic_store_p(&edata->e_prof_info.e_prof_tctx, tctx, ATOMIC_RELEASE); +} + +static inline void +edata_prof_alloc_time_set(edata_t *edata, nstime_t *t) { + nstime_copy(&edata->e_prof_info.e_prof_alloc_time, t); +} + +static inline void +edata_prof_alloc_size_set(edata_t *edata, size_t size) { + edata->e_prof_info.e_prof_alloc_size = size; +} + +static inline void +edata_prof_recent_alloc_set_dont_call_directly(edata_t *edata, + prof_recent_t *recent_alloc) { + atomic_store_p(&edata->e_prof_info.e_prof_recent_alloc, recent_alloc, + ATOMIC_RELAXED); +} + +static inline bool +edata_is_head_get(edata_t *edata) { + return (bool)((edata->e_bits & EDATA_BITS_IS_HEAD_MASK) >> + EDATA_BITS_IS_HEAD_SHIFT); +} + +static inline void +edata_is_head_set(edata_t *edata, bool is_head) { + edata->e_bits = (edata->e_bits & ~EDATA_BITS_IS_HEAD_MASK) | + ((uint64_t)is_head << EDATA_BITS_IS_HEAD_SHIFT); +} + +static inline bool +edata_state_in_transition(extent_state_t state) { + return state >= extent_state_transition; +} + +/* + * Because this function is implemented as a sequence of bitfield modifications, + * even though each individual bit is properly initialized, we technically read + * uninitialized data within it. This is mostly fine, since most callers get + * their edatas from zeroing sources, but callers who make stack edata_ts need + * to manually zero them. + */ +static inline void +edata_init(edata_t *edata, unsigned arena_ind, void *addr, size_t size, + bool slab, szind_t szind, uint64_t sn, extent_state_t state, bool zeroed, + bool committed, extent_pai_t pai, extent_head_state_t is_head) { + assert(addr == PAGE_ADDR2BASE(addr) || !slab); + + edata_arena_ind_set(edata, arena_ind); + edata_addr_set(edata, addr); + edata_size_set(edata, size); + edata_slab_set(edata, slab); + edata_szind_set(edata, szind); + edata_sn_set(edata, sn); + edata_state_set(edata, state); + edata_guarded_set(edata, false); + edata_zeroed_set(edata, zeroed); + edata_committed_set(edata, committed); + edata_pai_set(edata, pai); + edata_is_head_set(edata, is_head == EXTENT_IS_HEAD); + if (config_prof) { + edata_prof_tctx_set(edata, NULL); + } +} + +static inline void +edata_binit(edata_t *edata, void *addr, size_t bsize, uint64_t sn) { + edata_arena_ind_set(edata, (1U << MALLOCX_ARENA_BITS) - 1); + edata_addr_set(edata, addr); + edata_bsize_set(edata, bsize); + edata_slab_set(edata, false); + edata_szind_set(edata, SC_NSIZES); + edata_sn_set(edata, sn); + edata_state_set(edata, extent_state_active); + edata_guarded_set(edata, false); + edata_zeroed_set(edata, true); + edata_committed_set(edata, true); + /* + * This isn't strictly true, but base allocated extents never get + * deallocated and can't be looked up in the emap, but no sense in + * wasting a state bit to encode this fact. + */ + edata_pai_set(edata, EXTENT_PAI_PAC); +} + +static inline int +edata_esn_comp(const edata_t *a, const edata_t *b) { + size_t a_esn = edata_esn_get(a); + size_t b_esn = edata_esn_get(b); + + return (a_esn > b_esn) - (a_esn < b_esn); +} + +static inline int +edata_ead_comp(const edata_t *a, const edata_t *b) { + uintptr_t a_eaddr = (uintptr_t)a; + uintptr_t b_eaddr = (uintptr_t)b; + + return (a_eaddr > b_eaddr) - (a_eaddr < b_eaddr); +} + +static inline edata_cmp_summary_t +edata_cmp_summary_get(const edata_t *edata) { + return (edata_cmp_summary_t){edata_sn_get(edata), + (uintptr_t)edata_addr_get(edata)}; +} + +static inline int +edata_cmp_summary_comp(edata_cmp_summary_t a, edata_cmp_summary_t b) { + int ret; + ret = (a.sn > b.sn) - (a.sn < b.sn); + if (ret != 0) { + return ret; + } + ret = (a.addr > b.addr) - (a.addr < b.addr); + return ret; +} + +static inline int +edata_snad_comp(const edata_t *a, const edata_t *b) { + edata_cmp_summary_t a_cmp = edata_cmp_summary_get(a); + edata_cmp_summary_t b_cmp = edata_cmp_summary_get(b); + + return edata_cmp_summary_comp(a_cmp, b_cmp); +} + +static inline int +edata_esnead_comp(const edata_t *a, const edata_t *b) { + int ret; + + ret = edata_esn_comp(a, b); + if (ret != 0) { + return ret; + } + + ret = edata_ead_comp(a, b); + return ret; +} + +ph_proto(, edata_avail, edata_t) +ph_proto(, edata_heap, edata_t) + +#endif /* JEMALLOC_INTERNAL_EDATA_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/edata_cache.h b/contrib/jemalloc/include/jemalloc/internal/edata_cache.h new file mode 100644 index 00000000000000..8b6c0ef794996a --- /dev/null +++ b/contrib/jemalloc/include/jemalloc/internal/edata_cache.h @@ -0,0 +1,49 @@ +#ifndef JEMALLOC_INTERNAL_EDATA_CACHE_H +#define JEMALLOC_INTERNAL_EDATA_CACHE_H + +#include "jemalloc/internal/base.h" + +/* For tests only. */ +#define EDATA_CACHE_FAST_FILL 4 + +/* + * A cache of edata_t structures allocated via base_alloc_edata (as opposed to + * the underlying extents they describe). The contents of returned edata_t + * objects are garbage and cannot be relied upon. + */ + +typedef struct edata_cache_s edata_cache_t; +struct edata_cache_s { + edata_avail_t avail; + atomic_zu_t count; + malloc_mutex_t mtx; + base_t *base; +}; + +bool edata_cache_init(edata_cache_t *edata_cache, base_t *base); +edata_t *edata_cache_get(tsdn_t *tsdn, edata_cache_t *edata_cache); +void edata_cache_put(tsdn_t *tsdn, edata_cache_t *edata_cache, edata_t *edata); + +void edata_cache_prefork(tsdn_t *tsdn, edata_cache_t *edata_cache); +void edata_cache_postfork_parent(tsdn_t *tsdn, edata_cache_t *edata_cache); +void edata_cache_postfork_child(tsdn_t *tsdn, edata_cache_t *edata_cache); + +/* + * An edata_cache_small is like an edata_cache, but it relies on external + * synchronization and avoids first-fit strategies. + */ + +typedef struct edata_cache_fast_s edata_cache_fast_t; +struct edata_cache_fast_s { + edata_list_inactive_t list; + edata_cache_t *fallback; + bool disabled; +}; + +void edata_cache_fast_init(edata_cache_fast_t *ecs, edata_cache_t *fallback); +edata_t *edata_cache_fast_get(tsdn_t *tsdn, edata_cache_fast_t *ecs); +void edata_cache_fast_put(tsdn_t *tsdn, edata_cache_fast_t *ecs, + edata_t *edata); +void edata_cache_fast_disable(tsdn_t *tsdn, edata_cache_fast_t *ecs); + +#endif /* JEMALLOC_INTERNAL_EDATA_CACHE_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/ehooks.h b/contrib/jemalloc/include/jemalloc/internal/ehooks.h new file mode 100644 index 00000000000000..8d9513e258a889 --- /dev/null +++ b/contrib/jemalloc/include/jemalloc/internal/ehooks.h @@ -0,0 +1,412 @@ +#ifndef JEMALLOC_INTERNAL_EHOOKS_H +#define JEMALLOC_INTERNAL_EHOOKS_H + +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/extent_mmap.h" + +/* + * This module is the internal interface to the extent hooks (both + * user-specified and external). Eventually, this will give us the flexibility + * to use multiple different versions of user-visible extent-hook APIs under a + * single user interface. + * + * Current API expansions (not available to anyone but the default hooks yet): + * - Head state tracking. Hooks can decide whether or not to merge two + * extents based on whether or not one of them is the head (i.e. was + * allocated on its own). The later extent loses its "head" status. + */ + +extern const extent_hooks_t ehooks_default_extent_hooks; + +typedef struct ehooks_s ehooks_t; +struct ehooks_s { + /* + * The user-visible id that goes with the ehooks (i.e. that of the base + * they're a part of, the associated arena's index within the arenas + * array). + */ + unsigned ind; + /* Logically an extent_hooks_t *. */ + atomic_p_t ptr; +}; + +extern const extent_hooks_t ehooks_default_extent_hooks; + +/* + * These are not really part of the public API. Each hook has a fast-path for + * the default-hooks case that can avoid various small inefficiencies: + * - Forgetting tsd and then calling tsd_get within the hook. + * - Getting more state than necessary out of the extent_t. + * - Doing arena_ind -> arena -> arena_ind lookups. + * By making the calls to these functions visible to the compiler, it can move + * those extra bits of computation down below the fast-paths where they get ignored. + */ +void *ehooks_default_alloc_impl(tsdn_t *tsdn, void *new_addr, size_t size, + size_t alignment, bool *zero, bool *commit, unsigned arena_ind); +bool ehooks_default_dalloc_impl(void *addr, size_t size); +void ehooks_default_destroy_impl(void *addr, size_t size); +bool ehooks_default_commit_impl(void *addr, size_t offset, size_t length); +bool ehooks_default_decommit_impl(void *addr, size_t offset, size_t length); +#ifdef PAGES_CAN_PURGE_LAZY +bool ehooks_default_purge_lazy_impl(void *addr, size_t offset, size_t length); +#endif +#ifdef PAGES_CAN_PURGE_FORCED +bool ehooks_default_purge_forced_impl(void *addr, size_t offset, size_t length); +#endif +bool ehooks_default_split_impl(); +/* + * Merge is the only default extent hook we declare -- see the comment in + * ehooks_merge. + */ +bool ehooks_default_merge(extent_hooks_t *extent_hooks, void *addr_a, + size_t size_a, void *addr_b, size_t size_b, bool committed, + unsigned arena_ind); +bool ehooks_default_merge_impl(tsdn_t *tsdn, void *addr_a, void *addr_b); +void ehooks_default_zero_impl(void *addr, size_t size); +void ehooks_default_guard_impl(void *guard1, void *guard2); +void ehooks_default_unguard_impl(void *guard1, void *guard2); + +/* + * We don't officially support reentrancy from wtihin the extent hooks. But + * various people who sit within throwing distance of the jemalloc team want + * that functionality in certain limited cases. The default reentrancy guards + * assert that we're not reentrant from a0 (since it's the bootstrap arena, + * where reentrant allocations would be redirected), which we would incorrectly + * trigger in cases where a0 has extent hooks (those hooks themselves can't be + * reentrant, then, but there are reasonable uses for such functionality, like + * putting internal metadata on hugepages). Therefore, we use the raw + * reentrancy guards. + * + * Eventually, we need to think more carefully about whether and where we + * support allocating from within extent hooks (and what that means for things + * like profiling, stats collection, etc.), and document what the guarantee is. + */ +static inline void +ehooks_pre_reentrancy(tsdn_t *tsdn) { + tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn); + tsd_pre_reentrancy_raw(tsd); +} + +static inline void +ehooks_post_reentrancy(tsdn_t *tsdn) { + tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn); + tsd_post_reentrancy_raw(tsd); +} + +/* Beginning of the public API. */ +void ehooks_init(ehooks_t *ehooks, extent_hooks_t *extent_hooks, unsigned ind); + +static inline unsigned +ehooks_ind_get(const ehooks_t *ehooks) { + return ehooks->ind; +} + +static inline void +ehooks_set_extent_hooks_ptr(ehooks_t *ehooks, extent_hooks_t *extent_hooks) { + atomic_store_p(&ehooks->ptr, extent_hooks, ATOMIC_RELEASE); +} + +static inline extent_hooks_t * +ehooks_get_extent_hooks_ptr(ehooks_t *ehooks) { + return (extent_hooks_t *)atomic_load_p(&ehooks->ptr, ATOMIC_ACQUIRE); +} + +static inline bool +ehooks_are_default(ehooks_t *ehooks) { + return ehooks_get_extent_hooks_ptr(ehooks) == + &ehooks_default_extent_hooks; +} + +/* + * In some cases, a caller needs to allocate resources before attempting to call + * a hook. If that hook is doomed to fail, this is wasteful. We therefore + * include some checks for such cases. + */ +static inline bool +ehooks_dalloc_will_fail(ehooks_t *ehooks) { + if (ehooks_are_default(ehooks)) { + return opt_retain; + } else { + return ehooks_get_extent_hooks_ptr(ehooks)->dalloc == NULL; + } +} + +static inline bool +ehooks_split_will_fail(ehooks_t *ehooks) { + return ehooks_get_extent_hooks_ptr(ehooks)->split == NULL; +} + +static inline bool +ehooks_merge_will_fail(ehooks_t *ehooks) { + return ehooks_get_extent_hooks_ptr(ehooks)->merge == NULL; +} + +static inline bool +ehooks_guard_will_fail(ehooks_t *ehooks) { + /* + * Before the guard hooks are officially introduced, limit the use to + * the default hooks only. + */ + return !ehooks_are_default(ehooks); +} + +/* + * Some hooks are required to return zeroed memory in certain situations. In + * debug mode, we do some heuristic checks that they did what they were supposed + * to. + * + * This isn't really ehooks-specific (i.e. anyone can check for zeroed memory). + * But incorrect zero information indicates an ehook bug. + */ +static inline void +ehooks_debug_zero_check(void *addr, size_t size) { + assert(((uintptr_t)addr & PAGE_MASK) == 0); + assert((size & PAGE_MASK) == 0); + assert(size > 0); + if (config_debug) { + /* Check the whole first page. */ + size_t *p = (size_t *)addr; + for (size_t i = 0; i < PAGE / sizeof(size_t); i++) { + assert(p[i] == 0); + } + /* + * And 4 spots within. There's a tradeoff here; the larger + * this number, the more likely it is that we'll catch a bug + * where ehooks return a sparsely non-zero range. But + * increasing the number of checks also increases the number of + * page faults in debug mode. FreeBSD does much of their + * day-to-day development work in debug mode, so we don't want + * even the debug builds to be too slow. + */ + const size_t nchecks = 4; + assert(PAGE >= sizeof(size_t) * nchecks); + for (size_t i = 0; i < nchecks; ++i) { + assert(p[i * (size / sizeof(size_t) / nchecks)] == 0); + } + } +} + + +static inline void * +ehooks_alloc(tsdn_t *tsdn, ehooks_t *ehooks, void *new_addr, size_t size, + size_t alignment, bool *zero, bool *commit) { + bool orig_zero = *zero; + void *ret; + extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks); + if (extent_hooks == &ehooks_default_extent_hooks) { + ret = ehooks_default_alloc_impl(tsdn, new_addr, size, + alignment, zero, commit, ehooks_ind_get(ehooks)); + } else { + ehooks_pre_reentrancy(tsdn); + ret = extent_hooks->alloc(extent_hooks, new_addr, size, + alignment, zero, commit, ehooks_ind_get(ehooks)); + ehooks_post_reentrancy(tsdn); + } + assert(new_addr == NULL || ret == NULL || new_addr == ret); + assert(!orig_zero || *zero); + if (*zero && ret != NULL) { + ehooks_debug_zero_check(ret, size); + } + return ret; +} + +static inline bool +ehooks_dalloc(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size, + bool committed) { + extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks); + if (extent_hooks == &ehooks_default_extent_hooks) { + return ehooks_default_dalloc_impl(addr, size); + } else if (extent_hooks->dalloc == NULL) { + return true; + } else { + ehooks_pre_reentrancy(tsdn); + bool err = extent_hooks->dalloc(extent_hooks, addr, size, + committed, ehooks_ind_get(ehooks)); + ehooks_post_reentrancy(tsdn); + return err; + } +} + +static inline void +ehooks_destroy(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size, + bool committed) { + extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks); + if (extent_hooks == &ehooks_default_extent_hooks) { + ehooks_default_destroy_impl(addr, size); + } else if (extent_hooks->destroy == NULL) { + /* Do nothing. */ + } else { + ehooks_pre_reentrancy(tsdn); + extent_hooks->destroy(extent_hooks, addr, size, committed, + ehooks_ind_get(ehooks)); + ehooks_post_reentrancy(tsdn); + } +} + +static inline bool +ehooks_commit(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size, + size_t offset, size_t length) { + extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks); + bool err; + if (extent_hooks == &ehooks_default_extent_hooks) { + err = ehooks_default_commit_impl(addr, offset, length); + } else if (extent_hooks->commit == NULL) { + err = true; + } else { + ehooks_pre_reentrancy(tsdn); + err = extent_hooks->commit(extent_hooks, addr, size, + offset, length, ehooks_ind_get(ehooks)); + ehooks_post_reentrancy(tsdn); + } + if (!err) { + ehooks_debug_zero_check(addr, size); + } + return err; +} + +static inline bool +ehooks_decommit(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size, + size_t offset, size_t length) { + extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks); + if (extent_hooks == &ehooks_default_extent_hooks) { + return ehooks_default_decommit_impl(addr, offset, length); + } else if (extent_hooks->decommit == NULL) { + return true; + } else { + ehooks_pre_reentrancy(tsdn); + bool err = extent_hooks->decommit(extent_hooks, addr, size, + offset, length, ehooks_ind_get(ehooks)); + ehooks_post_reentrancy(tsdn); + return err; + } +} + +static inline bool +ehooks_purge_lazy(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size, + size_t offset, size_t length) { + extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks); +#ifdef PAGES_CAN_PURGE_LAZY + if (extent_hooks == &ehooks_default_extent_hooks) { + return ehooks_default_purge_lazy_impl(addr, offset, length); + } +#endif + if (extent_hooks->purge_lazy == NULL) { + return true; + } else { + ehooks_pre_reentrancy(tsdn); + bool err = extent_hooks->purge_lazy(extent_hooks, addr, size, + offset, length, ehooks_ind_get(ehooks)); + ehooks_post_reentrancy(tsdn); + return err; + } +} + +static inline bool +ehooks_purge_forced(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size, + size_t offset, size_t length) { + extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks); + /* + * It would be correct to have a ehooks_debug_zero_check call at the end + * of this function; purge_forced is required to zero. But checking + * would touch the page in question, which may have performance + * consequences (imagine the hooks are using hugepages, with a global + * zero page off). Even in debug mode, it's usually a good idea to + * avoid cases that can dramatically increase memory consumption. + */ +#ifdef PAGES_CAN_PURGE_FORCED + if (extent_hooks == &ehooks_default_extent_hooks) { + return ehooks_default_purge_forced_impl(addr, offset, length); + } +#endif + if (extent_hooks->purge_forced == NULL) { + return true; + } else { + ehooks_pre_reentrancy(tsdn); + bool err = extent_hooks->purge_forced(extent_hooks, addr, size, + offset, length, ehooks_ind_get(ehooks)); + ehooks_post_reentrancy(tsdn); + return err; + } +} + +static inline bool +ehooks_split(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size, + size_t size_a, size_t size_b, bool committed) { + extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks); + if (ehooks_are_default(ehooks)) { + return ehooks_default_split_impl(); + } else if (extent_hooks->split == NULL) { + return true; + } else { + ehooks_pre_reentrancy(tsdn); + bool err = extent_hooks->split(extent_hooks, addr, size, size_a, + size_b, committed, ehooks_ind_get(ehooks)); + ehooks_post_reentrancy(tsdn); + return err; + } +} + +static inline bool +ehooks_merge(tsdn_t *tsdn, ehooks_t *ehooks, void *addr_a, size_t size_a, + void *addr_b, size_t size_b, bool committed) { + extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks); + if (extent_hooks == &ehooks_default_extent_hooks) { + return ehooks_default_merge_impl(tsdn, addr_a, addr_b); + } else if (extent_hooks->merge == NULL) { + return true; + } else { + ehooks_pre_reentrancy(tsdn); + bool err = extent_hooks->merge(extent_hooks, addr_a, size_a, + addr_b, size_b, committed, ehooks_ind_get(ehooks)); + ehooks_post_reentrancy(tsdn); + return err; + } +} + +static inline void +ehooks_zero(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size) { + extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks); + if (extent_hooks == &ehooks_default_extent_hooks) { + ehooks_default_zero_impl(addr, size); + } else { + /* + * It would be correct to try using the user-provided purge + * hooks (since they are required to have zeroed the extent if + * they indicate success), but we don't necessarily know their + * cost. We'll be conservative and use memset. + */ + memset(addr, 0, size); + } +} + +static inline bool +ehooks_guard(tsdn_t *tsdn, ehooks_t *ehooks, void *guard1, void *guard2) { + bool err; + extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks); + + if (extent_hooks == &ehooks_default_extent_hooks) { + ehooks_default_guard_impl(guard1, guard2); + err = false; + } else { + err = true; + } + + return err; +} + +static inline bool +ehooks_unguard(tsdn_t *tsdn, ehooks_t *ehooks, void *guard1, void *guard2) { + bool err; + extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks); + + if (extent_hooks == &ehooks_default_extent_hooks) { + ehooks_default_unguard_impl(guard1, guard2); + err = false; + } else { + err = true; + } + + return err; +} + +#endif /* JEMALLOC_INTERNAL_EHOOKS_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/emap.h b/contrib/jemalloc/include/jemalloc/internal/emap.h new file mode 100644 index 00000000000000..847af3278de309 --- /dev/null +++ b/contrib/jemalloc/include/jemalloc/internal/emap.h @@ -0,0 +1,357 @@ +#ifndef JEMALLOC_INTERNAL_EMAP_H +#define JEMALLOC_INTERNAL_EMAP_H + +#include "jemalloc/internal/base.h" +#include "jemalloc/internal/rtree.h" + +/* + * Note: Ends without at semicolon, so that + * EMAP_DECLARE_RTREE_CTX; + * in uses will avoid empty-statement warnings. + */ +#define EMAP_DECLARE_RTREE_CTX \ + rtree_ctx_t rtree_ctx_fallback; \ + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback) + +typedef struct emap_s emap_t; +struct emap_s { + rtree_t rtree; +}; + +/* Used to pass rtree lookup context down the path. */ +typedef struct emap_alloc_ctx_t emap_alloc_ctx_t; +struct emap_alloc_ctx_t { + szind_t szind; + bool slab; +}; + +typedef struct emap_full_alloc_ctx_s emap_full_alloc_ctx_t; +struct emap_full_alloc_ctx_s { + szind_t szind; + bool slab; + edata_t *edata; +}; + +bool emap_init(emap_t *emap, base_t *base, bool zeroed); + +void emap_remap(tsdn_t *tsdn, emap_t *emap, edata_t *edata, szind_t szind, + bool slab); + +void emap_update_edata_state(tsdn_t *tsdn, emap_t *emap, edata_t *edata, + extent_state_t state); + +/* + * The two acquire functions below allow accessing neighbor edatas, if it's safe + * and valid to do so (i.e. from the same arena, of the same state, etc.). This + * is necessary because the ecache locks are state based, and only protect + * edatas with the same state. Therefore the neighbor edata's state needs to be + * verified first, before chasing the edata pointer. The returned edata will be + * in an acquired state, meaning other threads will be prevented from accessing + * it, even if technically the edata can still be discovered from the rtree. + * + * This means, at any moment when holding pointers to edata, either one of the + * state based locks is held (and the edatas are all of the protected state), or + * the edatas are in an acquired state (e.g. in active or merging state). The + * acquire operation itself (changing the edata to an acquired state) is done + * under the state locks. + */ +edata_t *emap_try_acquire_edata_neighbor(tsdn_t *tsdn, emap_t *emap, + edata_t *edata, extent_pai_t pai, extent_state_t expected_state, + bool forward); +edata_t *emap_try_acquire_edata_neighbor_expand(tsdn_t *tsdn, emap_t *emap, + edata_t *edata, extent_pai_t pai, extent_state_t expected_state); +void emap_release_edata(tsdn_t *tsdn, emap_t *emap, edata_t *edata, + extent_state_t new_state); + +/* + * Associate the given edata with its beginning and end address, setting the + * szind and slab info appropriately. + * Returns true on error (i.e. resource exhaustion). + */ +bool emap_register_boundary(tsdn_t *tsdn, emap_t *emap, edata_t *edata, + szind_t szind, bool slab); + +/* + * Does the same thing, but with the interior of the range, for slab + * allocations. + * + * You might wonder why we don't just have a single emap_register function that + * does both depending on the value of 'slab'. The answer is twofold: + * - As a practical matter, in places like the extract->split->commit pathway, + * we defer the interior operation until we're sure that the commit won't fail + * (but we have to register the split boundaries there). + * - In general, we're trying to move to a world where the page-specific + * allocator doesn't know as much about how the pages it allocates will be + * used, and passing a 'slab' parameter everywhere makes that more + * complicated. + * + * Unlike the boundary version, this function can't fail; this is because slabs + * can't get big enough to touch a new page that neither of the boundaries + * touched, so no allocation is necessary to fill the interior once the boundary + * has been touched. + */ +void emap_register_interior(tsdn_t *tsdn, emap_t *emap, edata_t *edata, + szind_t szind); + +void emap_deregister_boundary(tsdn_t *tsdn, emap_t *emap, edata_t *edata); +void emap_deregister_interior(tsdn_t *tsdn, emap_t *emap, edata_t *edata); + +typedef struct emap_prepare_s emap_prepare_t; +struct emap_prepare_s { + rtree_leaf_elm_t *lead_elm_a; + rtree_leaf_elm_t *lead_elm_b; + rtree_leaf_elm_t *trail_elm_a; + rtree_leaf_elm_t *trail_elm_b; +}; + +/** + * These functions the emap metadata management for merging, splitting, and + * reusing extents. In particular, they set the boundary mappings from + * addresses to edatas. If the result is going to be used as a slab, you + * still need to call emap_register_interior on it, though. + * + * Remap simply changes the szind and slab status of an extent's boundary + * mappings. If the extent is not a slab, it doesn't bother with updating the + * end mapping (since lookups only occur in the interior of an extent for + * slabs). Since the szind and slab status only make sense for active extents, + * this should only be called while activating or deactivating an extent. + * + * Split and merge have a "prepare" and a "commit" portion. The prepare portion + * does the operations that can be done without exclusive access to the extent + * in question, while the commit variant requires exclusive access to maintain + * the emap invariants. The only function that can fail is emap_split_prepare, + * and it returns true on failure (at which point the caller shouldn't commit). + * + * In all cases, "lead" refers to the lower-addressed extent, and trail to the + * higher-addressed one. It's the caller's responsibility to set the edata + * state appropriately. + */ +bool emap_split_prepare(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare, + edata_t *edata, size_t size_a, edata_t *trail, size_t size_b); +void emap_split_commit(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare, + edata_t *lead, size_t size_a, edata_t *trail, size_t size_b); +void emap_merge_prepare(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare, + edata_t *lead, edata_t *trail); +void emap_merge_commit(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare, + edata_t *lead, edata_t *trail); + +/* Assert that the emap's view of the given edata matches the edata's view. */ +void emap_do_assert_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata); +static inline void +emap_assert_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata) { + if (config_debug) { + emap_do_assert_mapped(tsdn, emap, edata); + } +} + +/* Assert that the given edata isn't in the map. */ +void emap_do_assert_not_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata); +static inline void +emap_assert_not_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata) { + if (config_debug) { + emap_do_assert_not_mapped(tsdn, emap, edata); + } +} + +JEMALLOC_ALWAYS_INLINE bool +emap_edata_in_transition(tsdn_t *tsdn, emap_t *emap, edata_t *edata) { + assert(config_debug); + emap_assert_mapped(tsdn, emap, edata); + + EMAP_DECLARE_RTREE_CTX; + rtree_contents_t contents = rtree_read(tsdn, &emap->rtree, rtree_ctx, + (uintptr_t)edata_base_get(edata)); + + return edata_state_in_transition(contents.metadata.state); +} + +JEMALLOC_ALWAYS_INLINE bool +emap_edata_is_acquired(tsdn_t *tsdn, emap_t *emap, edata_t *edata) { + if (!config_debug) { + /* For assertions only. */ + return false; + } + + /* + * The edata is considered acquired if no other threads will attempt to + * read / write any fields from it. This includes a few cases: + * + * 1) edata not hooked into emap yet -- This implies the edata just got + * allocated or initialized. + * + * 2) in an active or transition state -- In both cases, the edata can + * be discovered from the emap, however the state tracked in the rtree + * will prevent other threads from accessing the actual edata. + */ + EMAP_DECLARE_RTREE_CTX; + rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &emap->rtree, + rtree_ctx, (uintptr_t)edata_base_get(edata), /* dependent */ true, + /* init_missing */ false); + if (elm == NULL) { + return true; + } + rtree_contents_t contents = rtree_leaf_elm_read(tsdn, &emap->rtree, elm, + /* dependent */ true); + if (contents.edata == NULL || + contents.metadata.state == extent_state_active || + edata_state_in_transition(contents.metadata.state)) { + return true; + } + + return false; +} + +JEMALLOC_ALWAYS_INLINE void +extent_assert_can_coalesce(const edata_t *inner, const edata_t *outer) { + assert(edata_arena_ind_get(inner) == edata_arena_ind_get(outer)); + assert(edata_pai_get(inner) == edata_pai_get(outer)); + assert(edata_committed_get(inner) == edata_committed_get(outer)); + assert(edata_state_get(inner) == extent_state_active); + assert(edata_state_get(outer) == extent_state_merging); + assert(!edata_guarded_get(inner) && !edata_guarded_get(outer)); + assert(edata_base_get(inner) == edata_past_get(outer) || + edata_base_get(outer) == edata_past_get(inner)); +} + +JEMALLOC_ALWAYS_INLINE void +extent_assert_can_expand(const edata_t *original, const edata_t *expand) { + assert(edata_arena_ind_get(original) == edata_arena_ind_get(expand)); + assert(edata_pai_get(original) == edata_pai_get(expand)); + assert(edata_state_get(original) == extent_state_active); + assert(edata_state_get(expand) == extent_state_merging); + assert(edata_past_get(original) == edata_base_get(expand)); +} + +JEMALLOC_ALWAYS_INLINE edata_t * +emap_edata_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr) { + EMAP_DECLARE_RTREE_CTX; + + return rtree_read(tsdn, &emap->rtree, rtree_ctx, (uintptr_t)ptr).edata; +} + +/* Fills in alloc_ctx with the info in the map. */ +JEMALLOC_ALWAYS_INLINE void +emap_alloc_ctx_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr, + emap_alloc_ctx_t *alloc_ctx) { + EMAP_DECLARE_RTREE_CTX; + + rtree_metadata_t metadata = rtree_metadata_read(tsdn, &emap->rtree, + rtree_ctx, (uintptr_t)ptr); + alloc_ctx->szind = metadata.szind; + alloc_ctx->slab = metadata.slab; +} + +/* The pointer must be mapped. */ +JEMALLOC_ALWAYS_INLINE void +emap_full_alloc_ctx_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr, + emap_full_alloc_ctx_t *full_alloc_ctx) { + EMAP_DECLARE_RTREE_CTX; + + rtree_contents_t contents = rtree_read(tsdn, &emap->rtree, rtree_ctx, + (uintptr_t)ptr); + full_alloc_ctx->edata = contents.edata; + full_alloc_ctx->szind = contents.metadata.szind; + full_alloc_ctx->slab = contents.metadata.slab; +} + +/* + * The pointer is allowed to not be mapped. + * + * Returns true when the pointer is not present. + */ +JEMALLOC_ALWAYS_INLINE bool +emap_full_alloc_ctx_try_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr, + emap_full_alloc_ctx_t *full_alloc_ctx) { + EMAP_DECLARE_RTREE_CTX; + + rtree_contents_t contents; + bool err = rtree_read_independent(tsdn, &emap->rtree, rtree_ctx, + (uintptr_t)ptr, &contents); + if (err) { + return true; + } + full_alloc_ctx->edata = contents.edata; + full_alloc_ctx->szind = contents.metadata.szind; + full_alloc_ctx->slab = contents.metadata.slab; + return false; +} + +/* + * Only used on the fastpath of free. Returns true when cannot be fulfilled by + * fast path, e.g. when the metadata key is not cached. + */ +JEMALLOC_ALWAYS_INLINE bool +emap_alloc_ctx_try_lookup_fast(tsd_t *tsd, emap_t *emap, const void *ptr, + emap_alloc_ctx_t *alloc_ctx) { + /* Use the unsafe getter since this may gets called during exit. */ + rtree_ctx_t *rtree_ctx = tsd_rtree_ctxp_get_unsafe(tsd); + + rtree_metadata_t metadata; + bool err = rtree_metadata_try_read_fast(tsd_tsdn(tsd), &emap->rtree, + rtree_ctx, (uintptr_t)ptr, &metadata); + if (err) { + return true; + } + alloc_ctx->szind = metadata.szind; + alloc_ctx->slab = metadata.slab; + return false; +} + +/* + * We want to do batch lookups out of the cache bins, which use + * cache_bin_ptr_array_get to access the i'th element of the bin (since they + * invert usual ordering in deciding what to flush). This lets the emap avoid + * caring about its caller's ordering. + */ +typedef const void *(*emap_ptr_getter)(void *ctx, size_t ind); +/* + * This allows size-checking assertions, which we can only do while we're in the + * process of edata lookups. + */ +typedef void (*emap_metadata_visitor)(void *ctx, emap_full_alloc_ctx_t *alloc_ctx); + +typedef union emap_batch_lookup_result_u emap_batch_lookup_result_t; +union emap_batch_lookup_result_u { + edata_t *edata; + rtree_leaf_elm_t *rtree_leaf; +}; + +JEMALLOC_ALWAYS_INLINE void +emap_edata_lookup_batch(tsd_t *tsd, emap_t *emap, size_t nptrs, + emap_ptr_getter ptr_getter, void *ptr_getter_ctx, + emap_metadata_visitor metadata_visitor, void *metadata_visitor_ctx, + emap_batch_lookup_result_t *result) { + /* Avoids null-checking tsdn in the loop below. */ + util_assume(tsd != NULL); + rtree_ctx_t *rtree_ctx = tsd_rtree_ctxp_get(tsd); + + for (size_t i = 0; i < nptrs; i++) { + const void *ptr = ptr_getter(ptr_getter_ctx, i); + /* + * Reuse the edatas array as a temp buffer, lying a little about + * the types. + */ + result[i].rtree_leaf = rtree_leaf_elm_lookup(tsd_tsdn(tsd), + &emap->rtree, rtree_ctx, (uintptr_t)ptr, + /* dependent */ true, /* init_missing */ false); + } + + for (size_t i = 0; i < nptrs; i++) { + rtree_leaf_elm_t *elm = result[i].rtree_leaf; + rtree_contents_t contents = rtree_leaf_elm_read(tsd_tsdn(tsd), + &emap->rtree, elm, /* dependent */ true); + result[i].edata = contents.edata; + emap_full_alloc_ctx_t alloc_ctx; + /* + * Not all these fields are read in practice by the metadata + * visitor. But the compiler can easily optimize away the ones + * that aren't, so no sense in being incomplete. + */ + alloc_ctx.szind = contents.metadata.szind; + alloc_ctx.slab = contents.metadata.slab; + alloc_ctx.edata = contents.edata; + metadata_visitor(metadata_visitor_ctx, &alloc_ctx); + } +} + +#endif /* JEMALLOC_INTERNAL_EMAP_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/emitter.h b/contrib/jemalloc/include/jemalloc/internal/emitter.h index 542bc79c36d05f..9482f68bc5c333 100644 --- a/contrib/jemalloc/include/jemalloc/internal/emitter.h +++ b/contrib/jemalloc/include/jemalloc/internal/emitter.h @@ -6,6 +6,7 @@ typedef enum emitter_output_e emitter_output_t; enum emitter_output_e { emitter_output_json, + emitter_output_json_compact, emitter_output_table }; @@ -21,6 +22,7 @@ typedef enum emitter_type_e emitter_type_t; enum emitter_type_e { emitter_type_bool, emitter_type_int, + emitter_type_int64, emitter_type_unsigned, emitter_type_uint32, emitter_type_uint64, @@ -66,7 +68,7 @@ typedef struct emitter_s emitter_t; struct emitter_s { emitter_output_t output; /* The output information. */ - void (*write_cb)(void *, const char *); + write_cb_t *write_cb; void *cbopaque; int nesting_depth; /* True if we've already emitted a value at the given depth. */ @@ -75,6 +77,12 @@ struct emitter_s { bool emitted_key; }; +static inline bool +emitter_outputs_json(emitter_t *emitter) { + return emitter->output == emitter_output_json || + emitter->output == emitter_output_json_compact; +} + /* Internal convenience function. Write to the emitter the given string. */ JEMALLOC_FORMAT_PRINTF(2, 3) static inline void @@ -135,13 +143,16 @@ emitter_print_value(emitter_t *emitter, emitter_justify_t justify, int width, switch (value_type) { case emitter_type_bool: - emitter_printf(emitter, + emitter_printf(emitter, emitter_gen_fmt(fmt, FMT_SIZE, "%s", justify, width), *(const bool *)value ? "true" : "false"); break; case emitter_type_int: EMIT_SIMPLE(int, "%d") break; + case emitter_type_int64: + EMIT_SIMPLE(int64_t, "%" FMTd64) + break; case emitter_type_unsigned: EMIT_SIMPLE(unsigned, "%u") break; @@ -159,7 +170,7 @@ emitter_print_value(emitter_t *emitter, emitter_justify_t justify, int width, * anywhere near the fmt size. */ assert(str_written < BUF_SIZE); - emitter_printf(emitter, + emitter_printf(emitter, emitter_gen_fmt(fmt, FMT_SIZE, "%s", justify, width), buf); break; case emitter_type_uint32: @@ -196,6 +207,7 @@ static inline void emitter_indent(emitter_t *emitter) { int amount = emitter->nesting_depth; const char *indent_str; + assert(emitter->output != emitter_output_json_compact); if (emitter->output == emitter_output_json) { indent_str = "\t"; } else { @@ -209,12 +221,18 @@ emitter_indent(emitter_t *emitter) { static inline void emitter_json_key_prefix(emitter_t *emitter) { + assert(emitter_outputs_json(emitter)); if (emitter->emitted_key) { emitter->emitted_key = false; return; } - emitter_printf(emitter, "%s\n", emitter->item_at_depth ? "," : ""); - emitter_indent(emitter); + if (emitter->item_at_depth) { + emitter_printf(emitter, ","); + } + if (emitter->output != emitter_output_json_compact) { + emitter_printf(emitter, "\n"); + emitter_indent(emitter); + } } /******************************************************************************/ @@ -222,27 +240,28 @@ emitter_json_key_prefix(emitter_t *emitter) { static inline void emitter_init(emitter_t *emitter, emitter_output_t emitter_output, - void (*write_cb)(void *, const char *), void *cbopaque) { + write_cb_t *write_cb, void *cbopaque) { emitter->output = emitter_output; emitter->write_cb = write_cb; emitter->cbopaque = cbopaque; emitter->item_at_depth = false; - emitter->emitted_key = false; + emitter->emitted_key = false; emitter->nesting_depth = 0; } /******************************************************************************/ /* JSON public API. */ -/* +/* * Emits a key (e.g. as appears in an object). The next json entity emitted will * be the corresponding value. */ static inline void emitter_json_key(emitter_t *emitter, const char *json_key) { - if (emitter->output == emitter_output_json) { + if (emitter_outputs_json(emitter)) { emitter_json_key_prefix(emitter); - emitter_printf(emitter, "\"%s\": ", json_key); + emitter_printf(emitter, "\"%s\":%s", json_key, + emitter->output == emitter_output_json_compact ? "" : " "); emitter->emitted_key = true; } } @@ -250,7 +269,7 @@ emitter_json_key(emitter_t *emitter, const char *json_key) { static inline void emitter_json_value(emitter_t *emitter, emitter_type_t value_type, const void *value) { - if (emitter->output == emitter_output_json) { + if (emitter_outputs_json(emitter)) { emitter_json_key_prefix(emitter); emitter_print_value(emitter, emitter_justify_none, -1, value_type, value); @@ -268,7 +287,7 @@ emitter_json_kv(emitter_t *emitter, const char *json_key, static inline void emitter_json_array_begin(emitter_t *emitter) { - if (emitter->output == emitter_output_json) { + if (emitter_outputs_json(emitter)) { emitter_json_key_prefix(emitter); emitter_printf(emitter, "["); emitter_nest_inc(emitter); @@ -284,18 +303,20 @@ emitter_json_array_kv_begin(emitter_t *emitter, const char *json_key) { static inline void emitter_json_array_end(emitter_t *emitter) { - if (emitter->output == emitter_output_json) { + if (emitter_outputs_json(emitter)) { assert(emitter->nesting_depth > 0); emitter_nest_dec(emitter); - emitter_printf(emitter, "\n"); - emitter_indent(emitter); + if (emitter->output != emitter_output_json_compact) { + emitter_printf(emitter, "\n"); + emitter_indent(emitter); + } emitter_printf(emitter, "]"); } } static inline void emitter_json_object_begin(emitter_t *emitter) { - if (emitter->output == emitter_output_json) { + if (emitter_outputs_json(emitter)) { emitter_json_key_prefix(emitter); emitter_printf(emitter, "{"); emitter_nest_inc(emitter); @@ -311,11 +332,13 @@ emitter_json_object_kv_begin(emitter_t *emitter, const char *json_key) { static inline void emitter_json_object_end(emitter_t *emitter) { - if (emitter->output == emitter_output_json) { + if (emitter_outputs_json(emitter)) { assert(emitter->nesting_depth > 0); emitter_nest_dec(emitter); - emitter_printf(emitter, "\n"); - emitter_indent(emitter); + if (emitter->output != emitter_output_json_compact) { + emitter_printf(emitter, "\n"); + emitter_indent(emitter); + } emitter_printf(emitter, "}"); } } @@ -420,7 +443,7 @@ emitter_kv_note(emitter_t *emitter, const char *json_key, const char *table_key, emitter_type_t value_type, const void *value, const char *table_note_key, emitter_type_t table_note_value_type, const void *table_note_value) { - if (emitter->output == emitter_output_json) { + if (emitter_outputs_json(emitter)) { emitter_json_key(emitter, json_key); emitter_json_value(emitter, value_type, value); } else { @@ -440,7 +463,7 @@ emitter_kv(emitter_t *emitter, const char *json_key, const char *table_key, static inline void emitter_dict_begin(emitter_t *emitter, const char *json_key, const char *table_header) { - if (emitter->output == emitter_output_json) { + if (emitter_outputs_json(emitter)) { emitter_json_key(emitter, json_key); emitter_json_object_begin(emitter); } else { @@ -450,7 +473,7 @@ emitter_dict_begin(emitter_t *emitter, const char *json_key, static inline void emitter_dict_end(emitter_t *emitter) { - if (emitter->output == emitter_output_json) { + if (emitter_outputs_json(emitter)) { emitter_json_object_end(emitter); } else { emitter_table_dict_end(emitter); @@ -459,7 +482,7 @@ emitter_dict_end(emitter_t *emitter) { static inline void emitter_begin(emitter_t *emitter) { - if (emitter->output == emitter_output_json) { + if (emitter_outputs_json(emitter)) { assert(emitter->nesting_depth == 0); emitter_printf(emitter, "{"); emitter_nest_inc(emitter); @@ -476,10 +499,11 @@ emitter_begin(emitter_t *emitter) { static inline void emitter_end(emitter_t *emitter) { - if (emitter->output == emitter_output_json) { + if (emitter_outputs_json(emitter)) { assert(emitter->nesting_depth == 1); emitter_nest_dec(emitter); - emitter_printf(emitter, "\n}\n"); + emitter_printf(emitter, "%s", emitter->output == + emitter_output_json_compact ? "}" : "\n}\n"); } } diff --git a/contrib/jemalloc/include/jemalloc/internal/eset.h b/contrib/jemalloc/include/jemalloc/internal/eset.h new file mode 100644 index 00000000000000..4f689b47d88117 --- /dev/null +++ b/contrib/jemalloc/include/jemalloc/internal/eset.h @@ -0,0 +1,77 @@ +#ifndef JEMALLOC_INTERNAL_ESET_H +#define JEMALLOC_INTERNAL_ESET_H + +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/fb.h" +#include "jemalloc/internal/edata.h" +#include "jemalloc/internal/mutex.h" + +/* + * An eset ("extent set") is a quantized collection of extents, with built-in + * LRU queue. + * + * This class is not thread-safe; synchronization must be done externally if + * there are mutating operations. One exception is the stats counters, which + * may be read without any locking. + */ + +typedef struct eset_bin_s eset_bin_t; +struct eset_bin_s { + edata_heap_t heap; + /* + * We do first-fit across multiple size classes. If we compared against + * the min element in each heap directly, we'd take a cache miss per + * extent we looked at. If we co-locate the edata summaries, we only + * take a miss on the edata we're actually going to return (which is + * inevitable anyways). + */ + edata_cmp_summary_t heap_min; +}; + +typedef struct eset_bin_stats_s eset_bin_stats_t; +struct eset_bin_stats_s { + atomic_zu_t nextents; + atomic_zu_t nbytes; +}; + +typedef struct eset_s eset_t; +struct eset_s { + /* Bitmap for which set bits correspond to non-empty heaps. */ + fb_group_t bitmap[FB_NGROUPS(SC_NPSIZES + 1)]; + + /* Quantized per size class heaps of extents. */ + eset_bin_t bins[SC_NPSIZES + 1]; + + eset_bin_stats_t bin_stats[SC_NPSIZES + 1]; + + /* LRU of all extents in heaps. */ + edata_list_inactive_t lru; + + /* Page sum for all extents in heaps. */ + atomic_zu_t npages; + + /* + * A duplication of the data in the containing ecache. We use this only + * for assertions on the states of the passed-in extents. + */ + extent_state_t state; +}; + +void eset_init(eset_t *eset, extent_state_t state); + +size_t eset_npages_get(eset_t *eset); +/* Get the number of extents in the given page size index. */ +size_t eset_nextents_get(eset_t *eset, pszind_t ind); +/* Get the sum total bytes of the extents in the given page size index. */ +size_t eset_nbytes_get(eset_t *eset, pszind_t ind); + +void eset_insert(eset_t *eset, edata_t *edata); +void eset_remove(eset_t *eset, edata_t *edata); +/* + * Select an extent from this eset of the given size and alignment. Returns + * null if no such item could be found. + */ +edata_t *eset_fit(eset_t *eset, size_t esize, size_t alignment, bool exact_only, + unsigned lg_max_fit); + +#endif /* JEMALLOC_INTERNAL_ESET_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/exp_grow.h b/contrib/jemalloc/include/jemalloc/internal/exp_grow.h new file mode 100644 index 00000000000000..8566b8a4c6a4b9 --- /dev/null +++ b/contrib/jemalloc/include/jemalloc/internal/exp_grow.h @@ -0,0 +1,50 @@ +#ifndef JEMALLOC_INTERNAL_EXP_GROW_H +#define JEMALLOC_INTERNAL_EXP_GROW_H + +typedef struct exp_grow_s exp_grow_t; +struct exp_grow_s { + /* + * Next extent size class in a growing series to use when satisfying a + * request via the extent hooks (only if opt_retain). This limits the + * number of disjoint virtual memory ranges so that extent merging can + * be effective even if multiple arenas' extent allocation requests are + * highly interleaved. + * + * retain_grow_limit is the max allowed size ind to expand (unless the + * required size is greater). Default is no limit, and controlled + * through mallctl only. + */ + pszind_t next; + pszind_t limit; +}; + +static inline bool +exp_grow_size_prepare(exp_grow_t *exp_grow, size_t alloc_size_min, + size_t *r_alloc_size, pszind_t *r_skip) { + *r_skip = 0; + *r_alloc_size = sz_pind2sz(exp_grow->next + *r_skip); + while (*r_alloc_size < alloc_size_min) { + (*r_skip)++; + if (exp_grow->next + *r_skip >= + sz_psz2ind(SC_LARGE_MAXCLASS)) { + /* Outside legal range. */ + return true; + } + *r_alloc_size = sz_pind2sz(exp_grow->next + *r_skip); + } + return false; +} + +static inline void +exp_grow_size_commit(exp_grow_t *exp_grow, pszind_t skip) { + if (exp_grow->next + skip + 1 <= exp_grow->limit) { + exp_grow->next += skip + 1; + } else { + exp_grow->next = exp_grow->limit; + } + +} + +void exp_grow_init(exp_grow_t *exp_grow); + +#endif /* JEMALLOC_INTERNAL_EXP_GROW_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/extent.h b/contrib/jemalloc/include/jemalloc/internal/extent.h new file mode 100644 index 00000000000000..1d51d41097e3ad --- /dev/null +++ b/contrib/jemalloc/include/jemalloc/internal/extent.h @@ -0,0 +1,137 @@ +#ifndef JEMALLOC_INTERNAL_EXTENT_H +#define JEMALLOC_INTERNAL_EXTENT_H + +#include "jemalloc/internal/ecache.h" +#include "jemalloc/internal/ehooks.h" +#include "jemalloc/internal/ph.h" +#include "jemalloc/internal/rtree.h" + +/* + * This module contains the page-level allocator. It chooses the addresses that + * allocations requested by other modules will inhabit, and updates the global + * metadata to reflect allocation/deallocation/purging decisions. + */ + +/* + * When reuse (and split) an active extent, (1U << opt_lg_extent_max_active_fit) + * is the max ratio between the size of the active extent and the new extent. + */ +#define LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT 6 +extern size_t opt_lg_extent_max_active_fit; + +edata_t *ecache_alloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, + ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment, + bool zero, bool guarded); +edata_t *ecache_alloc_grow(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, + ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment, + bool zero, bool guarded); +void ecache_dalloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, + ecache_t *ecache, edata_t *edata); +edata_t *ecache_evict(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, + ecache_t *ecache, size_t npages_min); + +void extent_gdump_add(tsdn_t *tsdn, const edata_t *edata); +void extent_record(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache, + edata_t *edata); +void extent_dalloc_gap(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, + edata_t *edata); +edata_t *extent_alloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, + void *new_addr, size_t size, size_t alignment, bool zero, bool *commit, + bool growing_retained); +void extent_dalloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, + edata_t *edata); +void extent_destroy_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, + edata_t *edata); +bool extent_commit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, + size_t offset, size_t length); +bool extent_decommit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, + size_t offset, size_t length); +bool extent_purge_lazy_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, + size_t offset, size_t length); +bool extent_purge_forced_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, + size_t offset, size_t length); +edata_t *extent_split_wrapper(tsdn_t *tsdn, pac_t *pac, + ehooks_t *ehooks, edata_t *edata, size_t size_a, size_t size_b, + bool holding_core_locks); +bool extent_merge_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, + edata_t *a, edata_t *b); +bool extent_commit_zero(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, + bool commit, bool zero, bool growing_retained); +size_t extent_sn_next(pac_t *pac); +bool extent_boot(void); + +JEMALLOC_ALWAYS_INLINE bool +extent_neighbor_head_state_mergeable(bool edata_is_head, + bool neighbor_is_head, bool forward) { + /* + * Head states checking: disallow merging if the higher addr extent is a + * head extent. This helps preserve first-fit, and more importantly + * makes sure no merge across arenas. + */ + if (forward) { + if (neighbor_is_head) { + return false; + } + } else { + if (edata_is_head) { + return false; + } + } + return true; +} + +JEMALLOC_ALWAYS_INLINE bool +extent_can_acquire_neighbor(edata_t *edata, rtree_contents_t contents, + extent_pai_t pai, extent_state_t expected_state, bool forward, + bool expanding) { + edata_t *neighbor = contents.edata; + if (neighbor == NULL) { + return false; + } + /* It's not safe to access *neighbor yet; must verify states first. */ + bool neighbor_is_head = contents.metadata.is_head; + if (!extent_neighbor_head_state_mergeable(edata_is_head_get(edata), + neighbor_is_head, forward)) { + return false; + } + extent_state_t neighbor_state = contents.metadata.state; + if (pai == EXTENT_PAI_PAC) { + if (neighbor_state != expected_state) { + return false; + } + /* From this point, it's safe to access *neighbor. */ + if (!expanding && (edata_committed_get(edata) != + edata_committed_get(neighbor))) { + /* + * Some platforms (e.g. Windows) require an explicit + * commit step (and writing to uncommitted memory is not + * allowed). + */ + return false; + } + } else { + if (neighbor_state == extent_state_active) { + return false; + } + /* From this point, it's safe to access *neighbor. */ + } + + assert(edata_pai_get(edata) == pai); + if (edata_pai_get(neighbor) != pai) { + return false; + } + if (opt_retain) { + assert(edata_arena_ind_get(edata) == + edata_arena_ind_get(neighbor)); + } else { + if (edata_arena_ind_get(edata) != + edata_arena_ind_get(neighbor)) { + return false; + } + } + assert(!edata_guarded_get(edata) && !edata_guarded_get(neighbor)); + + return true; +} + +#endif /* JEMALLOC_INTERNAL_EXTENT_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/extent_externs.h b/contrib/jemalloc/include/jemalloc/internal/extent_externs.h deleted file mode 100644 index 8aba57633a3413..00000000000000 --- a/contrib/jemalloc/include/jemalloc/internal/extent_externs.h +++ /dev/null @@ -1,83 +0,0 @@ -#ifndef JEMALLOC_INTERNAL_EXTENT_EXTERNS_H -#define JEMALLOC_INTERNAL_EXTENT_EXTERNS_H - -#include "jemalloc/internal/mutex.h" -#include "jemalloc/internal/mutex_pool.h" -#include "jemalloc/internal/ph.h" -#include "jemalloc/internal/rtree.h" - -extern size_t opt_lg_extent_max_active_fit; - -extern rtree_t extents_rtree; -extern const extent_hooks_t extent_hooks_default; -extern mutex_pool_t extent_mutex_pool; - -extent_t *extent_alloc(tsdn_t *tsdn, arena_t *arena); -void extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent); - -extent_hooks_t *extent_hooks_get(arena_t *arena); -extent_hooks_t *extent_hooks_set(tsd_t *tsd, arena_t *arena, - extent_hooks_t *extent_hooks); - -#ifdef JEMALLOC_JET -size_t extent_size_quantize_floor(size_t size); -size_t extent_size_quantize_ceil(size_t size); -#endif - -ph_proto(, extent_avail_, extent_tree_t, extent_t) -ph_proto(, extent_heap_, extent_heap_t, extent_t) - -bool extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state, - bool delay_coalesce); -extent_state_t extents_state_get(const extents_t *extents); -size_t extents_npages_get(extents_t *extents); -/* Get the number of extents in the given page size index. */ -size_t extents_nextents_get(extents_t *extents, pszind_t ind); -/* Get the sum total bytes of the extents in the given page size index. */ -size_t extents_nbytes_get(extents_t *extents, pszind_t ind); -extent_t *extents_alloc(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr, - size_t size, size_t pad, size_t alignment, bool slab, szind_t szind, - bool *zero, bool *commit); -void extents_dalloc(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent); -extent_t *extents_evict(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_min); -void extents_prefork(tsdn_t *tsdn, extents_t *extents); -void extents_postfork_parent(tsdn_t *tsdn, extents_t *extents); -void extents_postfork_child(tsdn_t *tsdn, extents_t *extents); -extent_t *extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad, - size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit); -void extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent); -void extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, extent_t *extent); -void extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, extent_t *extent); -bool extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, - size_t length); -bool extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, - size_t length); -bool extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, - size_t length); -bool extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, - size_t length); -extent_t *extent_split_wrapper(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a, - szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b); -bool extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b); - -bool extent_boot(void); - -void extent_util_stats_get(tsdn_t *tsdn, const void *ptr, - size_t *nfree, size_t *nregs, size_t *size); -void extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr, - size_t *nfree, size_t *nregs, size_t *size, - size_t *bin_nfree, size_t *bin_nregs, void **slabcur_addr); - -#endif /* JEMALLOC_INTERNAL_EXTENT_EXTERNS_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/extent_inlines.h b/contrib/jemalloc/include/jemalloc/internal/extent_inlines.h deleted file mode 100644 index 77fa4c4a29a726..00000000000000 --- a/contrib/jemalloc/include/jemalloc/internal/extent_inlines.h +++ /dev/null @@ -1,501 +0,0 @@ -#ifndef JEMALLOC_INTERNAL_EXTENT_INLINES_H -#define JEMALLOC_INTERNAL_EXTENT_INLINES_H - -#include "jemalloc/internal/mutex.h" -#include "jemalloc/internal/mutex_pool.h" -#include "jemalloc/internal/pages.h" -#include "jemalloc/internal/prng.h" -#include "jemalloc/internal/ql.h" -#include "jemalloc/internal/sc.h" -#include "jemalloc/internal/sz.h" - -static inline void -extent_lock(tsdn_t *tsdn, extent_t *extent) { - assert(extent != NULL); - mutex_pool_lock(tsdn, &extent_mutex_pool, (uintptr_t)extent); -} - -static inline void -extent_unlock(tsdn_t *tsdn, extent_t *extent) { - assert(extent != NULL); - mutex_pool_unlock(tsdn, &extent_mutex_pool, (uintptr_t)extent); -} - -static inline void -extent_lock2(tsdn_t *tsdn, extent_t *extent1, extent_t *extent2) { - assert(extent1 != NULL && extent2 != NULL); - mutex_pool_lock2(tsdn, &extent_mutex_pool, (uintptr_t)extent1, - (uintptr_t)extent2); -} - -static inline void -extent_unlock2(tsdn_t *tsdn, extent_t *extent1, extent_t *extent2) { - assert(extent1 != NULL && extent2 != NULL); - mutex_pool_unlock2(tsdn, &extent_mutex_pool, (uintptr_t)extent1, - (uintptr_t)extent2); -} - -static inline unsigned -extent_arena_ind_get(const extent_t *extent) { - unsigned arena_ind = (unsigned)((extent->e_bits & - EXTENT_BITS_ARENA_MASK) >> EXTENT_BITS_ARENA_SHIFT); - assert(arena_ind < MALLOCX_ARENA_LIMIT); - - return arena_ind; -} - -static inline arena_t * -extent_arena_get(const extent_t *extent) { - unsigned arena_ind = extent_arena_ind_get(extent); - - return (arena_t *)atomic_load_p(&arenas[arena_ind], ATOMIC_ACQUIRE); -} - -static inline szind_t -extent_szind_get_maybe_invalid(const extent_t *extent) { - szind_t szind = (szind_t)((extent->e_bits & EXTENT_BITS_SZIND_MASK) >> - EXTENT_BITS_SZIND_SHIFT); - assert(szind <= SC_NSIZES); - return szind; -} - -static inline szind_t -extent_szind_get(const extent_t *extent) { - szind_t szind = extent_szind_get_maybe_invalid(extent); - assert(szind < SC_NSIZES); /* Never call when "invalid". */ - return szind; -} - -static inline size_t -extent_usize_get(const extent_t *extent) { - return sz_index2size(extent_szind_get(extent)); -} - -static inline unsigned -extent_binshard_get(const extent_t *extent) { - unsigned binshard = (unsigned)((extent->e_bits & - EXTENT_BITS_BINSHARD_MASK) >> EXTENT_BITS_BINSHARD_SHIFT); - assert(binshard < bin_infos[extent_szind_get(extent)].n_shards); - return binshard; -} - -static inline size_t -extent_sn_get(const extent_t *extent) { - return (size_t)((extent->e_bits & EXTENT_BITS_SN_MASK) >> - EXTENT_BITS_SN_SHIFT); -} - -static inline extent_state_t -extent_state_get(const extent_t *extent) { - return (extent_state_t)((extent->e_bits & EXTENT_BITS_STATE_MASK) >> - EXTENT_BITS_STATE_SHIFT); -} - -static inline bool -extent_zeroed_get(const extent_t *extent) { - return (bool)((extent->e_bits & EXTENT_BITS_ZEROED_MASK) >> - EXTENT_BITS_ZEROED_SHIFT); -} - -static inline bool -extent_committed_get(const extent_t *extent) { - return (bool)((extent->e_bits & EXTENT_BITS_COMMITTED_MASK) >> - EXTENT_BITS_COMMITTED_SHIFT); -} - -static inline bool -extent_dumpable_get(const extent_t *extent) { - return (bool)((extent->e_bits & EXTENT_BITS_DUMPABLE_MASK) >> - EXTENT_BITS_DUMPABLE_SHIFT); -} - -static inline bool -extent_slab_get(const extent_t *extent) { - return (bool)((extent->e_bits & EXTENT_BITS_SLAB_MASK) >> - EXTENT_BITS_SLAB_SHIFT); -} - -static inline unsigned -extent_nfree_get(const extent_t *extent) { - assert(extent_slab_get(extent)); - return (unsigned)((extent->e_bits & EXTENT_BITS_NFREE_MASK) >> - EXTENT_BITS_NFREE_SHIFT); -} - -static inline void * -extent_base_get(const extent_t *extent) { - assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) || - !extent_slab_get(extent)); - return PAGE_ADDR2BASE(extent->e_addr); -} - -static inline void * -extent_addr_get(const extent_t *extent) { - assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) || - !extent_slab_get(extent)); - return extent->e_addr; -} - -static inline size_t -extent_size_get(const extent_t *extent) { - return (extent->e_size_esn & EXTENT_SIZE_MASK); -} - -static inline size_t -extent_esn_get(const extent_t *extent) { - return (extent->e_size_esn & EXTENT_ESN_MASK); -} - -static inline size_t -extent_bsize_get(const extent_t *extent) { - return extent->e_bsize; -} - -static inline void * -extent_before_get(const extent_t *extent) { - return (void *)((uintptr_t)extent_base_get(extent) - PAGE); -} - -static inline void * -extent_last_get(const extent_t *extent) { - return (void *)((uintptr_t)extent_base_get(extent) + - extent_size_get(extent) - PAGE); -} - -static inline void * -extent_past_get(const extent_t *extent) { - return (void *)((uintptr_t)extent_base_get(extent) + - extent_size_get(extent)); -} - -static inline arena_slab_data_t * -extent_slab_data_get(extent_t *extent) { - assert(extent_slab_get(extent)); - return &extent->e_slab_data; -} - -static inline const arena_slab_data_t * -extent_slab_data_get_const(const extent_t *extent) { - assert(extent_slab_get(extent)); - return &extent->e_slab_data; -} - -static inline prof_tctx_t * -extent_prof_tctx_get(const extent_t *extent) { - return (prof_tctx_t *)atomic_load_p(&extent->e_prof_tctx, - ATOMIC_ACQUIRE); -} - -static inline nstime_t -extent_prof_alloc_time_get(const extent_t *extent) { - return extent->e_alloc_time; -} - -static inline void -extent_arena_set(extent_t *extent, arena_t *arena) { - unsigned arena_ind = (arena != NULL) ? arena_ind_get(arena) : ((1U << - MALLOCX_ARENA_BITS) - 1); - extent->e_bits = (extent->e_bits & ~EXTENT_BITS_ARENA_MASK) | - ((uint64_t)arena_ind << EXTENT_BITS_ARENA_SHIFT); -} - -static inline void -extent_binshard_set(extent_t *extent, unsigned binshard) { - /* The assertion assumes szind is set already. */ - assert(binshard < bin_infos[extent_szind_get(extent)].n_shards); - extent->e_bits = (extent->e_bits & ~EXTENT_BITS_BINSHARD_MASK) | - ((uint64_t)binshard << EXTENT_BITS_BINSHARD_SHIFT); -} - -static inline void -extent_addr_set(extent_t *extent, void *addr) { - extent->e_addr = addr; -} - -static inline void -extent_addr_randomize(tsdn_t *tsdn, extent_t *extent, size_t alignment) { - assert(extent_base_get(extent) == extent_addr_get(extent)); - - if (alignment < PAGE) { - unsigned lg_range = LG_PAGE - - lg_floor(CACHELINE_CEILING(alignment)); - size_t r; - if (!tsdn_null(tsdn)) { - tsd_t *tsd = tsdn_tsd(tsdn); - r = (size_t)prng_lg_range_u64( - tsd_offset_statep_get(tsd), lg_range); - } else { - r = prng_lg_range_zu( - &extent_arena_get(extent)->offset_state, - lg_range, true); - } - uintptr_t random_offset = ((uintptr_t)r) << (LG_PAGE - - lg_range); - extent->e_addr = (void *)((uintptr_t)extent->e_addr + - random_offset); - assert(ALIGNMENT_ADDR2BASE(extent->e_addr, alignment) == - extent->e_addr); - } -} - -static inline void -extent_size_set(extent_t *extent, size_t size) { - assert((size & ~EXTENT_SIZE_MASK) == 0); - extent->e_size_esn = size | (extent->e_size_esn & ~EXTENT_SIZE_MASK); -} - -static inline void -extent_esn_set(extent_t *extent, size_t esn) { - extent->e_size_esn = (extent->e_size_esn & ~EXTENT_ESN_MASK) | (esn & - EXTENT_ESN_MASK); -} - -static inline void -extent_bsize_set(extent_t *extent, size_t bsize) { - extent->e_bsize = bsize; -} - -static inline void -extent_szind_set(extent_t *extent, szind_t szind) { - assert(szind <= SC_NSIZES); /* SC_NSIZES means "invalid". */ - extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SZIND_MASK) | - ((uint64_t)szind << EXTENT_BITS_SZIND_SHIFT); -} - -static inline void -extent_nfree_set(extent_t *extent, unsigned nfree) { - assert(extent_slab_get(extent)); - extent->e_bits = (extent->e_bits & ~EXTENT_BITS_NFREE_MASK) | - ((uint64_t)nfree << EXTENT_BITS_NFREE_SHIFT); -} - -static inline void -extent_nfree_binshard_set(extent_t *extent, unsigned nfree, unsigned binshard) { - /* The assertion assumes szind is set already. */ - assert(binshard < bin_infos[extent_szind_get(extent)].n_shards); - extent->e_bits = (extent->e_bits & - (~EXTENT_BITS_NFREE_MASK & ~EXTENT_BITS_BINSHARD_MASK)) | - ((uint64_t)binshard << EXTENT_BITS_BINSHARD_SHIFT) | - ((uint64_t)nfree << EXTENT_BITS_NFREE_SHIFT); -} - -static inline void -extent_nfree_inc(extent_t *extent) { - assert(extent_slab_get(extent)); - extent->e_bits += ((uint64_t)1U << EXTENT_BITS_NFREE_SHIFT); -} - -static inline void -extent_nfree_dec(extent_t *extent) { - assert(extent_slab_get(extent)); - extent->e_bits -= ((uint64_t)1U << EXTENT_BITS_NFREE_SHIFT); -} - -static inline void -extent_nfree_sub(extent_t *extent, uint64_t n) { - assert(extent_slab_get(extent)); - extent->e_bits -= (n << EXTENT_BITS_NFREE_SHIFT); -} - -static inline void -extent_sn_set(extent_t *extent, size_t sn) { - extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SN_MASK) | - ((uint64_t)sn << EXTENT_BITS_SN_SHIFT); -} - -static inline void -extent_state_set(extent_t *extent, extent_state_t state) { - extent->e_bits = (extent->e_bits & ~EXTENT_BITS_STATE_MASK) | - ((uint64_t)state << EXTENT_BITS_STATE_SHIFT); -} - -static inline void -extent_zeroed_set(extent_t *extent, bool zeroed) { - extent->e_bits = (extent->e_bits & ~EXTENT_BITS_ZEROED_MASK) | - ((uint64_t)zeroed << EXTENT_BITS_ZEROED_SHIFT); -} - -static inline void -extent_committed_set(extent_t *extent, bool committed) { - extent->e_bits = (extent->e_bits & ~EXTENT_BITS_COMMITTED_MASK) | - ((uint64_t)committed << EXTENT_BITS_COMMITTED_SHIFT); -} - -static inline void -extent_dumpable_set(extent_t *extent, bool dumpable) { - extent->e_bits = (extent->e_bits & ~EXTENT_BITS_DUMPABLE_MASK) | - ((uint64_t)dumpable << EXTENT_BITS_DUMPABLE_SHIFT); -} - -static inline void -extent_slab_set(extent_t *extent, bool slab) { - extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SLAB_MASK) | - ((uint64_t)slab << EXTENT_BITS_SLAB_SHIFT); -} - -static inline void -extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx) { - atomic_store_p(&extent->e_prof_tctx, tctx, ATOMIC_RELEASE); -} - -static inline void -extent_prof_alloc_time_set(extent_t *extent, nstime_t t) { - nstime_copy(&extent->e_alloc_time, &t); -} - -static inline bool -extent_is_head_get(extent_t *extent) { - if (maps_coalesce) { - not_reached(); - } - - return (bool)((extent->e_bits & EXTENT_BITS_IS_HEAD_MASK) >> - EXTENT_BITS_IS_HEAD_SHIFT); -} - -static inline void -extent_is_head_set(extent_t *extent, bool is_head) { - if (maps_coalesce) { - not_reached(); - } - - extent->e_bits = (extent->e_bits & ~EXTENT_BITS_IS_HEAD_MASK) | - ((uint64_t)is_head << EXTENT_BITS_IS_HEAD_SHIFT); -} - -static inline void -extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size, - bool slab, szind_t szind, size_t sn, extent_state_t state, bool zeroed, - bool committed, bool dumpable, extent_head_state_t is_head) { - assert(addr == PAGE_ADDR2BASE(addr) || !slab); - - extent_arena_set(extent, arena); - extent_addr_set(extent, addr); - extent_size_set(extent, size); - extent_slab_set(extent, slab); - extent_szind_set(extent, szind); - extent_sn_set(extent, sn); - extent_state_set(extent, state); - extent_zeroed_set(extent, zeroed); - extent_committed_set(extent, committed); - extent_dumpable_set(extent, dumpable); - ql_elm_new(extent, ql_link); - if (!maps_coalesce) { - extent_is_head_set(extent, (is_head == EXTENT_IS_HEAD) ? true : - false); - } - if (config_prof) { - extent_prof_tctx_set(extent, NULL); - } -} - -static inline void -extent_binit(extent_t *extent, void *addr, size_t bsize, size_t sn) { - extent_arena_set(extent, NULL); - extent_addr_set(extent, addr); - extent_bsize_set(extent, bsize); - extent_slab_set(extent, false); - extent_szind_set(extent, SC_NSIZES); - extent_sn_set(extent, sn); - extent_state_set(extent, extent_state_active); - extent_zeroed_set(extent, true); - extent_committed_set(extent, true); - extent_dumpable_set(extent, true); -} - -static inline void -extent_list_init(extent_list_t *list) { - ql_new(list); -} - -static inline extent_t * -extent_list_first(const extent_list_t *list) { - return ql_first(list); -} - -static inline extent_t * -extent_list_last(const extent_list_t *list) { - return ql_last(list, ql_link); -} - -static inline void -extent_list_append(extent_list_t *list, extent_t *extent) { - ql_tail_insert(list, extent, ql_link); -} - -static inline void -extent_list_prepend(extent_list_t *list, extent_t *extent) { - ql_head_insert(list, extent, ql_link); -} - -static inline void -extent_list_replace(extent_list_t *list, extent_t *to_remove, - extent_t *to_insert) { - ql_after_insert(to_remove, to_insert, ql_link); - ql_remove(list, to_remove, ql_link); -} - -static inline void -extent_list_remove(extent_list_t *list, extent_t *extent) { - ql_remove(list, extent, ql_link); -} - -static inline int -extent_sn_comp(const extent_t *a, const extent_t *b) { - size_t a_sn = extent_sn_get(a); - size_t b_sn = extent_sn_get(b); - - return (a_sn > b_sn) - (a_sn < b_sn); -} - -static inline int -extent_esn_comp(const extent_t *a, const extent_t *b) { - size_t a_esn = extent_esn_get(a); - size_t b_esn = extent_esn_get(b); - - return (a_esn > b_esn) - (a_esn < b_esn); -} - -static inline int -extent_ad_comp(const extent_t *a, const extent_t *b) { - uintptr_t a_addr = (uintptr_t)extent_addr_get(a); - uintptr_t b_addr = (uintptr_t)extent_addr_get(b); - - return (a_addr > b_addr) - (a_addr < b_addr); -} - -static inline int -extent_ead_comp(const extent_t *a, const extent_t *b) { - uintptr_t a_eaddr = (uintptr_t)a; - uintptr_t b_eaddr = (uintptr_t)b; - - return (a_eaddr > b_eaddr) - (a_eaddr < b_eaddr); -} - -static inline int -extent_snad_comp(const extent_t *a, const extent_t *b) { - int ret; - - ret = extent_sn_comp(a, b); - if (ret != 0) { - return ret; - } - - ret = extent_ad_comp(a, b); - return ret; -} - -static inline int -extent_esnead_comp(const extent_t *a, const extent_t *b) { - int ret; - - ret = extent_esn_comp(a, b); - if (ret != 0) { - return ret; - } - - ret = extent_ead_comp(a, b); - return ret; -} - -#endif /* JEMALLOC_INTERNAL_EXTENT_INLINES_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/extent_structs.h b/contrib/jemalloc/include/jemalloc/internal/extent_structs.h deleted file mode 100644 index 767cd8930fbe6e..00000000000000 --- a/contrib/jemalloc/include/jemalloc/internal/extent_structs.h +++ /dev/null @@ -1,256 +0,0 @@ -#ifndef JEMALLOC_INTERNAL_EXTENT_STRUCTS_H -#define JEMALLOC_INTERNAL_EXTENT_STRUCTS_H - -#include "jemalloc/internal/atomic.h" -#include "jemalloc/internal/bit_util.h" -#include "jemalloc/internal/bitmap.h" -#include "jemalloc/internal/mutex.h" -#include "jemalloc/internal/ql.h" -#include "jemalloc/internal/ph.h" -#include "jemalloc/internal/sc.h" - -typedef enum { - extent_state_active = 0, - extent_state_dirty = 1, - extent_state_muzzy = 2, - extent_state_retained = 3 -} extent_state_t; - -/* Extent (span of pages). Use accessor functions for e_* fields. */ -struct extent_s { - /* - * Bitfield containing several fields: - * - * a: arena_ind - * b: slab - * c: committed - * d: dumpable - * z: zeroed - * t: state - * i: szind - * f: nfree - * s: bin_shard - * n: sn - * - * nnnnnnnn ... nnnnnnss ssssffff ffffffii iiiiiitt zdcbaaaa aaaaaaaa - * - * arena_ind: Arena from which this extent came, or all 1 bits if - * unassociated. - * - * slab: The slab flag indicates whether the extent is used for a slab - * of small regions. This helps differentiate small size classes, - * and it indicates whether interior pointers can be looked up via - * iealloc(). - * - * committed: The committed flag indicates whether physical memory is - * committed to the extent, whether explicitly or implicitly - * as on a system that overcommits and satisfies physical - * memory needs on demand via soft page faults. - * - * dumpable: The dumpable flag indicates whether or not we've set the - * memory in question to be dumpable. Note that this - * interacts somewhat subtly with user-specified extent hooks, - * since we don't know if *they* are fiddling with - * dumpability (in which case, we don't want to undo whatever - * they're doing). To deal with this scenario, we: - * - Make dumpable false only for memory allocated with the - * default hooks. - * - Only allow memory to go from non-dumpable to dumpable, - * and only once. - * - Never make the OS call to allow dumping when the - * dumpable bit is already set. - * These three constraints mean that we will never - * accidentally dump user memory that the user meant to set - * nondumpable with their extent hooks. - * - * - * zeroed: The zeroed flag is used by extent recycling code to track - * whether memory is zero-filled. - * - * state: The state flag is an extent_state_t. - * - * szind: The szind flag indicates usable size class index for - * allocations residing in this extent, regardless of whether the - * extent is a slab. Extent size and usable size often differ - * even for non-slabs, either due to sz_large_pad or promotion of - * sampled small regions. - * - * nfree: Number of free regions in slab. - * - * bin_shard: the shard of the bin from which this extent came. - * - * sn: Serial number (potentially non-unique). - * - * Serial numbers may wrap around if !opt_retain, but as long as - * comparison functions fall back on address comparison for equal - * serial numbers, stable (if imperfect) ordering is maintained. - * - * Serial numbers may not be unique even in the absence of - * wrap-around, e.g. when splitting an extent and assigning the same - * serial number to both resulting adjacent extents. - */ - uint64_t e_bits; -#define MASK(CURRENT_FIELD_WIDTH, CURRENT_FIELD_SHIFT) ((((((uint64_t)0x1U) << (CURRENT_FIELD_WIDTH)) - 1)) << (CURRENT_FIELD_SHIFT)) - -#define EXTENT_BITS_ARENA_WIDTH MALLOCX_ARENA_BITS -#define EXTENT_BITS_ARENA_SHIFT 0 -#define EXTENT_BITS_ARENA_MASK MASK(EXTENT_BITS_ARENA_WIDTH, EXTENT_BITS_ARENA_SHIFT) - -#define EXTENT_BITS_SLAB_WIDTH 1 -#define EXTENT_BITS_SLAB_SHIFT (EXTENT_BITS_ARENA_WIDTH + EXTENT_BITS_ARENA_SHIFT) -#define EXTENT_BITS_SLAB_MASK MASK(EXTENT_BITS_SLAB_WIDTH, EXTENT_BITS_SLAB_SHIFT) - -#define EXTENT_BITS_COMMITTED_WIDTH 1 -#define EXTENT_BITS_COMMITTED_SHIFT (EXTENT_BITS_SLAB_WIDTH + EXTENT_BITS_SLAB_SHIFT) -#define EXTENT_BITS_COMMITTED_MASK MASK(EXTENT_BITS_COMMITTED_WIDTH, EXTENT_BITS_COMMITTED_SHIFT) - -#define EXTENT_BITS_DUMPABLE_WIDTH 1 -#define EXTENT_BITS_DUMPABLE_SHIFT (EXTENT_BITS_COMMITTED_WIDTH + EXTENT_BITS_COMMITTED_SHIFT) -#define EXTENT_BITS_DUMPABLE_MASK MASK(EXTENT_BITS_DUMPABLE_WIDTH, EXTENT_BITS_DUMPABLE_SHIFT) - -#define EXTENT_BITS_ZEROED_WIDTH 1 -#define EXTENT_BITS_ZEROED_SHIFT (EXTENT_BITS_DUMPABLE_WIDTH + EXTENT_BITS_DUMPABLE_SHIFT) -#define EXTENT_BITS_ZEROED_MASK MASK(EXTENT_BITS_ZEROED_WIDTH, EXTENT_BITS_ZEROED_SHIFT) - -#define EXTENT_BITS_STATE_WIDTH 2 -#define EXTENT_BITS_STATE_SHIFT (EXTENT_BITS_ZEROED_WIDTH + EXTENT_BITS_ZEROED_SHIFT) -#define EXTENT_BITS_STATE_MASK MASK(EXTENT_BITS_STATE_WIDTH, EXTENT_BITS_STATE_SHIFT) - -#define EXTENT_BITS_SZIND_WIDTH LG_CEIL(SC_NSIZES) -#define EXTENT_BITS_SZIND_SHIFT (EXTENT_BITS_STATE_WIDTH + EXTENT_BITS_STATE_SHIFT) -#define EXTENT_BITS_SZIND_MASK MASK(EXTENT_BITS_SZIND_WIDTH, EXTENT_BITS_SZIND_SHIFT) - -#define EXTENT_BITS_NFREE_WIDTH (LG_SLAB_MAXREGS + 1) -#define EXTENT_BITS_NFREE_SHIFT (EXTENT_BITS_SZIND_WIDTH + EXTENT_BITS_SZIND_SHIFT) -#define EXTENT_BITS_NFREE_MASK MASK(EXTENT_BITS_NFREE_WIDTH, EXTENT_BITS_NFREE_SHIFT) - -#define EXTENT_BITS_BINSHARD_WIDTH 6 -#define EXTENT_BITS_BINSHARD_SHIFT (EXTENT_BITS_NFREE_WIDTH + EXTENT_BITS_NFREE_SHIFT) -#define EXTENT_BITS_BINSHARD_MASK MASK(EXTENT_BITS_BINSHARD_WIDTH, EXTENT_BITS_BINSHARD_SHIFT) - -#define EXTENT_BITS_IS_HEAD_WIDTH 1 -#define EXTENT_BITS_IS_HEAD_SHIFT (EXTENT_BITS_BINSHARD_WIDTH + EXTENT_BITS_BINSHARD_SHIFT) -#define EXTENT_BITS_IS_HEAD_MASK MASK(EXTENT_BITS_IS_HEAD_WIDTH, EXTENT_BITS_IS_HEAD_SHIFT) - -#define EXTENT_BITS_SN_SHIFT (EXTENT_BITS_IS_HEAD_WIDTH + EXTENT_BITS_IS_HEAD_SHIFT) -#define EXTENT_BITS_SN_MASK (UINT64_MAX << EXTENT_BITS_SN_SHIFT) - - /* Pointer to the extent that this structure is responsible for. */ - void *e_addr; - - union { - /* - * Extent size and serial number associated with the extent - * structure (different than the serial number for the extent at - * e_addr). - * - * ssssssss [...] ssssssss ssssnnnn nnnnnnnn - */ - size_t e_size_esn; - #define EXTENT_SIZE_MASK ((size_t)~(PAGE-1)) - #define EXTENT_ESN_MASK ((size_t)PAGE-1) - /* Base extent size, which may not be a multiple of PAGE. */ - size_t e_bsize; - }; - - /* - * List linkage, used by a variety of lists: - * - bin_t's slabs_full - * - extents_t's LRU - * - stashed dirty extents - * - arena's large allocations - */ - ql_elm(extent_t) ql_link; - - /* - * Linkage for per size class sn/address-ordered heaps, and - * for extent_avail - */ - phn(extent_t) ph_link; - - union { - /* Small region slab metadata. */ - arena_slab_data_t e_slab_data; - - /* Profiling data, used for large objects. */ - struct { - /* Time when this was allocated. */ - nstime_t e_alloc_time; - /* Points to a prof_tctx_t. */ - atomic_p_t e_prof_tctx; - }; - }; -}; -typedef ql_head(extent_t) extent_list_t; -typedef ph(extent_t) extent_tree_t; -typedef ph(extent_t) extent_heap_t; - -/* Quantized collection of extents, with built-in LRU queue. */ -struct extents_s { - malloc_mutex_t mtx; - - /* - * Quantized per size class heaps of extents. - * - * Synchronization: mtx. - */ - extent_heap_t heaps[SC_NPSIZES + 1]; - atomic_zu_t nextents[SC_NPSIZES + 1]; - atomic_zu_t nbytes[SC_NPSIZES + 1]; - - /* - * Bitmap for which set bits correspond to non-empty heaps. - * - * Synchronization: mtx. - */ - bitmap_t bitmap[BITMAP_GROUPS(SC_NPSIZES + 1)]; - - /* - * LRU of all extents in heaps. - * - * Synchronization: mtx. - */ - extent_list_t lru; - - /* - * Page sum for all extents in heaps. - * - * The synchronization here is a little tricky. Modifications to npages - * must hold mtx, but reads need not (though, a reader who sees npages - * without holding the mutex can't assume anything about the rest of the - * state of the extents_t). - */ - atomic_zu_t npages; - - /* All stored extents must be in the same state. */ - extent_state_t state; - - /* - * If true, delay coalescing until eviction; otherwise coalesce during - * deallocation. - */ - bool delay_coalesce; -}; - -/* - * The following two structs are for experimental purposes. See - * experimental_utilization_query_ctl and - * experimental_utilization_batch_query_ctl in src/ctl.c. - */ - -struct extent_util_stats_s { - size_t nfree; - size_t nregs; - size_t size; -}; - -struct extent_util_stats_verbose_s { - void *slabcur_addr; - size_t nfree; - size_t nregs; - size_t size; - size_t bin_nfree; - size_t bin_nregs; -}; - -#endif /* JEMALLOC_INTERNAL_EXTENT_STRUCTS_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/extent_types.h b/contrib/jemalloc/include/jemalloc/internal/extent_types.h deleted file mode 100644 index 96925cf958870e..00000000000000 --- a/contrib/jemalloc/include/jemalloc/internal/extent_types.h +++ /dev/null @@ -1,23 +0,0 @@ -#ifndef JEMALLOC_INTERNAL_EXTENT_TYPES_H -#define JEMALLOC_INTERNAL_EXTENT_TYPES_H - -typedef struct extent_s extent_t; -typedef struct extents_s extents_t; - -typedef struct extent_util_stats_s extent_util_stats_t; -typedef struct extent_util_stats_verbose_s extent_util_stats_verbose_t; - -#define EXTENT_HOOKS_INITIALIZER NULL - -/* - * When reuse (and split) an active extent, (1U << opt_lg_extent_max_active_fit) - * is the max ratio between the size of the active extent and the new extent. - */ -#define LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT 6 - -typedef enum { - EXTENT_NOT_HEAD, - EXTENT_IS_HEAD /* Only relevant for Windows && opt.retain. */ -} extent_head_state_t; - -#endif /* JEMALLOC_INTERNAL_EXTENT_TYPES_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/fb.h b/contrib/jemalloc/include/jemalloc/internal/fb.h new file mode 100644 index 00000000000000..90c4091ff6cc00 --- /dev/null +++ b/contrib/jemalloc/include/jemalloc/internal/fb.h @@ -0,0 +1,373 @@ +#ifndef JEMALLOC_INTERNAL_FB_H +#define JEMALLOC_INTERNAL_FB_H + +/* + * The flat bitmap module. This has a larger API relative to the bitmap module + * (supporting things like backwards searches, and searching for both set and + * unset bits), at the cost of slower operations for very large bitmaps. + * + * Initialized flat bitmaps start at all-zeros (all bits unset). + */ + +typedef unsigned long fb_group_t; +#define FB_GROUP_BITS (ZU(1) << (LG_SIZEOF_LONG + 3)) +#define FB_NGROUPS(nbits) ((nbits) / FB_GROUP_BITS \ + + ((nbits) % FB_GROUP_BITS == 0 ? 0 : 1)) + +static inline void +fb_init(fb_group_t *fb, size_t nbits) { + size_t ngroups = FB_NGROUPS(nbits); + memset(fb, 0, ngroups * sizeof(fb_group_t)); +} + +static inline bool +fb_empty(fb_group_t *fb, size_t nbits) { + size_t ngroups = FB_NGROUPS(nbits); + for (size_t i = 0; i < ngroups; i++) { + if (fb[i] != 0) { + return false; + } + } + return true; +} + +static inline bool +fb_full(fb_group_t *fb, size_t nbits) { + size_t ngroups = FB_NGROUPS(nbits); + size_t trailing_bits = nbits % FB_GROUP_BITS; + size_t limit = (trailing_bits == 0 ? ngroups : ngroups - 1); + for (size_t i = 0; i < limit; i++) { + if (fb[i] != ~(fb_group_t)0) { + return false; + } + } + if (trailing_bits == 0) { + return true; + } + return fb[ngroups - 1] == ((fb_group_t)1 << trailing_bits) - 1; +} + +static inline bool +fb_get(fb_group_t *fb, size_t nbits, size_t bit) { + assert(bit < nbits); + size_t group_ind = bit / FB_GROUP_BITS; + size_t bit_ind = bit % FB_GROUP_BITS; + return (bool)(fb[group_ind] & ((fb_group_t)1 << bit_ind)); +} + +static inline void +fb_set(fb_group_t *fb, size_t nbits, size_t bit) { + assert(bit < nbits); + size_t group_ind = bit / FB_GROUP_BITS; + size_t bit_ind = bit % FB_GROUP_BITS; + fb[group_ind] |= ((fb_group_t)1 << bit_ind); +} + +static inline void +fb_unset(fb_group_t *fb, size_t nbits, size_t bit) { + assert(bit < nbits); + size_t group_ind = bit / FB_GROUP_BITS; + size_t bit_ind = bit % FB_GROUP_BITS; + fb[group_ind] &= ~((fb_group_t)1 << bit_ind); +} + + +/* + * Some implementation details. This visitation function lets us apply a group + * visitor to each group in the bitmap (potentially modifying it). The mask + * indicates which bits are logically part of the visitation. + */ +typedef void (*fb_group_visitor_t)(void *ctx, fb_group_t *fb, fb_group_t mask); +JEMALLOC_ALWAYS_INLINE void +fb_visit_impl(fb_group_t *fb, size_t nbits, fb_group_visitor_t visit, void *ctx, + size_t start, size_t cnt) { + assert(cnt > 0); + assert(start + cnt <= nbits); + size_t group_ind = start / FB_GROUP_BITS; + size_t start_bit_ind = start % FB_GROUP_BITS; + /* + * The first group is special; it's the only one we don't start writing + * to from bit 0. + */ + size_t first_group_cnt = (start_bit_ind + cnt > FB_GROUP_BITS + ? FB_GROUP_BITS - start_bit_ind : cnt); + /* + * We can basically split affected words into: + * - The first group, where we touch only the high bits + * - The last group, where we touch only the low bits + * - The middle, where we set all the bits to the same thing. + * We treat each case individually. The last two could be merged, but + * this can lead to bad codegen for those middle words. + */ + /* First group */ + fb_group_t mask = ((~(fb_group_t)0) + >> (FB_GROUP_BITS - first_group_cnt)) + << start_bit_ind; + visit(ctx, &fb[group_ind], mask); + + cnt -= first_group_cnt; + group_ind++; + /* Middle groups */ + while (cnt > FB_GROUP_BITS) { + visit(ctx, &fb[group_ind], ~(fb_group_t)0); + cnt -= FB_GROUP_BITS; + group_ind++; + } + /* Last group */ + if (cnt != 0) { + mask = (~(fb_group_t)0) >> (FB_GROUP_BITS - cnt); + visit(ctx, &fb[group_ind], mask); + } +} + +JEMALLOC_ALWAYS_INLINE void +fb_assign_visitor(void *ctx, fb_group_t *fb, fb_group_t mask) { + bool val = *(bool *)ctx; + if (val) { + *fb |= mask; + } else { + *fb &= ~mask; + } +} + +/* Sets the cnt bits starting at position start. Must not have a 0 count. */ +static inline void +fb_set_range(fb_group_t *fb, size_t nbits, size_t start, size_t cnt) { + bool val = true; + fb_visit_impl(fb, nbits, &fb_assign_visitor, &val, start, cnt); +} + +/* Unsets the cnt bits starting at position start. Must not have a 0 count. */ +static inline void +fb_unset_range(fb_group_t *fb, size_t nbits, size_t start, size_t cnt) { + bool val = false; + fb_visit_impl(fb, nbits, &fb_assign_visitor, &val, start, cnt); +} + +JEMALLOC_ALWAYS_INLINE void +fb_scount_visitor(void *ctx, fb_group_t *fb, fb_group_t mask) { + size_t *scount = (size_t *)ctx; + *scount += popcount_lu(*fb & mask); +} + +/* Finds the number of set bit in the of length cnt starting at start. */ +JEMALLOC_ALWAYS_INLINE size_t +fb_scount(fb_group_t *fb, size_t nbits, size_t start, size_t cnt) { + size_t scount = 0; + fb_visit_impl(fb, nbits, &fb_scount_visitor, &scount, start, cnt); + return scount; +} + +/* Finds the number of unset bit in the of length cnt starting at start. */ +JEMALLOC_ALWAYS_INLINE size_t +fb_ucount(fb_group_t *fb, size_t nbits, size_t start, size_t cnt) { + size_t scount = fb_scount(fb, nbits, start, cnt); + return cnt - scount; +} + +/* + * An implementation detail; find the first bit at position >= min_bit with the + * value val. + * + * Returns the number of bits in the bitmap if no such bit exists. + */ +JEMALLOC_ALWAYS_INLINE ssize_t +fb_find_impl(fb_group_t *fb, size_t nbits, size_t start, bool val, + bool forward) { + assert(start < nbits); + size_t ngroups = FB_NGROUPS(nbits); + ssize_t group_ind = start / FB_GROUP_BITS; + size_t bit_ind = start % FB_GROUP_BITS; + + fb_group_t maybe_invert = (val ? 0 : (fb_group_t)-1); + + fb_group_t group = fb[group_ind]; + group ^= maybe_invert; + if (forward) { + /* Only keep ones in bits bit_ind and above. */ + group &= ~((1LU << bit_ind) - 1); + } else { + /* + * Only keep ones in bits bit_ind and below. You might more + * naturally express this as (1 << (bit_ind + 1)) - 1, but + * that shifts by an invalid amount if bit_ind is one less than + * FB_GROUP_BITS. + */ + group &= ((2LU << bit_ind) - 1); + } + ssize_t group_ind_bound = forward ? (ssize_t)ngroups : -1; + while (group == 0) { + group_ind += forward ? 1 : -1; + if (group_ind == group_ind_bound) { + return forward ? (ssize_t)nbits : (ssize_t)-1; + } + group = fb[group_ind]; + group ^= maybe_invert; + } + assert(group != 0); + size_t bit = forward ? ffs_lu(group) : fls_lu(group); + size_t pos = group_ind * FB_GROUP_BITS + bit; + /* + * The high bits of a partially filled last group are zeros, so if we're + * looking for zeros we don't want to report an invalid result. + */ + if (forward && !val && pos > nbits) { + return nbits; + } + return pos; +} + +/* + * Find the first set bit in the bitmap with an index >= min_bit. Returns the + * number of bits in the bitmap if no such bit exists. + */ +static inline size_t +fb_ffu(fb_group_t *fb, size_t nbits, size_t min_bit) { + return (size_t)fb_find_impl(fb, nbits, min_bit, /* val */ false, + /* forward */ true); +} + +/* The same, but looks for an unset bit. */ +static inline size_t +fb_ffs(fb_group_t *fb, size_t nbits, size_t min_bit) { + return (size_t)fb_find_impl(fb, nbits, min_bit, /* val */ true, + /* forward */ true); +} + +/* + * Find the last set bit in the bitmap with an index <= max_bit. Returns -1 if + * no such bit exists. + */ +static inline ssize_t +fb_flu(fb_group_t *fb, size_t nbits, size_t max_bit) { + return fb_find_impl(fb, nbits, max_bit, /* val */ false, + /* forward */ false); +} + +static inline ssize_t +fb_fls(fb_group_t *fb, size_t nbits, size_t max_bit) { + return fb_find_impl(fb, nbits, max_bit, /* val */ true, + /* forward */ false); +} + +/* Returns whether or not we found a range. */ +JEMALLOC_ALWAYS_INLINE bool +fb_iter_range_impl(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin, + size_t *r_len, bool val, bool forward) { + assert(start < nbits); + ssize_t next_range_begin = fb_find_impl(fb, nbits, start, val, forward); + if ((forward && next_range_begin == (ssize_t)nbits) + || (!forward && next_range_begin == (ssize_t)-1)) { + return false; + } + /* Half open range; the set bits are [begin, end). */ + ssize_t next_range_end = fb_find_impl(fb, nbits, next_range_begin, !val, + forward); + if (forward) { + *r_begin = next_range_begin; + *r_len = next_range_end - next_range_begin; + } else { + *r_begin = next_range_end + 1; + *r_len = next_range_begin - next_range_end; + } + return true; +} + +/* + * Used to iterate through ranges of set bits. + * + * Tries to find the next contiguous sequence of set bits with a first index >= + * start. If one exists, puts the earliest bit of the range in *r_begin, its + * length in *r_len, and returns true. Otherwise, returns false (without + * touching *r_begin or *r_end). + */ +static inline bool +fb_srange_iter(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin, + size_t *r_len) { + return fb_iter_range_impl(fb, nbits, start, r_begin, r_len, + /* val */ true, /* forward */ true); +} + +/* + * The same as fb_srange_iter, but searches backwards from start rather than + * forwards. (The position returned is still the earliest bit in the range). + */ +static inline bool +fb_srange_riter(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin, + size_t *r_len) { + return fb_iter_range_impl(fb, nbits, start, r_begin, r_len, + /* val */ true, /* forward */ false); +} + +/* Similar to fb_srange_iter, but searches for unset bits. */ +static inline bool +fb_urange_iter(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin, + size_t *r_len) { + return fb_iter_range_impl(fb, nbits, start, r_begin, r_len, + /* val */ false, /* forward */ true); +} + +/* Similar to fb_srange_riter, but searches for unset bits. */ +static inline bool +fb_urange_riter(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin, + size_t *r_len) { + return fb_iter_range_impl(fb, nbits, start, r_begin, r_len, + /* val */ false, /* forward */ false); +} + +JEMALLOC_ALWAYS_INLINE size_t +fb_range_longest_impl(fb_group_t *fb, size_t nbits, bool val) { + size_t begin = 0; + size_t longest_len = 0; + size_t len = 0; + while (begin < nbits && fb_iter_range_impl(fb, nbits, begin, &begin, + &len, val, /* forward */ true)) { + if (len > longest_len) { + longest_len = len; + } + begin += len; + } + return longest_len; +} + +static inline size_t +fb_srange_longest(fb_group_t *fb, size_t nbits) { + return fb_range_longest_impl(fb, nbits, /* val */ true); +} + +static inline size_t +fb_urange_longest(fb_group_t *fb, size_t nbits) { + return fb_range_longest_impl(fb, nbits, /* val */ false); +} + +/* + * Initializes each bit of dst with the bitwise-AND of the corresponding bits of + * src1 and src2. All bitmaps must be the same size. + */ +static inline void +fb_bit_and(fb_group_t *dst, fb_group_t *src1, fb_group_t *src2, size_t nbits) { + size_t ngroups = FB_NGROUPS(nbits); + for (size_t i = 0; i < ngroups; i++) { + dst[i] = src1[i] & src2[i]; + } +} + +/* Like fb_bit_and, but with bitwise-OR. */ +static inline void +fb_bit_or(fb_group_t *dst, fb_group_t *src1, fb_group_t *src2, size_t nbits) { + size_t ngroups = FB_NGROUPS(nbits); + for (size_t i = 0; i < ngroups; i++) { + dst[i] = src1[i] | src2[i]; + } +} + +/* Initializes dst bit i to the negation of source bit i. */ +static inline void +fb_bit_not(fb_group_t *dst, fb_group_t *src, size_t nbits) { + size_t ngroups = FB_NGROUPS(nbits); + for (size_t i = 0; i < ngroups; i++) { + dst[i] = ~src[i]; + } +} + +#endif /* JEMALLOC_INTERNAL_FB_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/fxp.h b/contrib/jemalloc/include/jemalloc/internal/fxp.h new file mode 100644 index 00000000000000..415a982890a14e --- /dev/null +++ b/contrib/jemalloc/include/jemalloc/internal/fxp.h @@ -0,0 +1,126 @@ +#ifndef JEMALLOC_INTERNAL_FXP_H +#define JEMALLOC_INTERNAL_FXP_H + +/* + * A simple fixed-point math implementation, supporting only unsigned values + * (with overflow being an error). + * + * It's not in general safe to use floating point in core code, because various + * libc implementations we get linked against can assume that malloc won't touch + * floating point state and call it with an unusual calling convention. + */ + +/* + * High 16 bits are the integer part, low 16 are the fractional part. Or + * equivalently, repr == 2**16 * val, where we use "val" to refer to the + * (imaginary) fractional representation of the true value. + * + * We pick a uint32_t here since it's convenient in some places to + * double the representation size (i.e. multiplication and division use + * 64-bit integer types), and a uint64_t is the largest type we're + * certain is available. + */ +typedef uint32_t fxp_t; +#define FXP_INIT_INT(x) ((x) << 16) +#define FXP_INIT_PERCENT(pct) (((pct) << 16) / 100) + +/* + * Amount of precision used in parsing and printing numbers. The integer bound + * is simply because the integer part of the number gets 16 bits, and so is + * bounded by 65536. + * + * We use a lot of precision for the fractional part, even though most of it + * gets rounded off; this lets us get exact values for the important special + * case where the denominator is a small power of 2 (for instance, + * 1/512 == 0.001953125 is exactly representable even with only 16 bits of + * fractional precision). We need to left-shift by 16 before dividing by + * 10**precision, so we pick precision to be floor(log(2**48)) = 14. + */ +#define FXP_INTEGER_PART_DIGITS 5 +#define FXP_FRACTIONAL_PART_DIGITS 14 + +/* + * In addition to the integer and fractional parts of the number, we need to + * include a null character and (possibly) a decimal point. + */ +#define FXP_BUF_SIZE (FXP_INTEGER_PART_DIGITS + FXP_FRACTIONAL_PART_DIGITS + 2) + +static inline fxp_t +fxp_add(fxp_t a, fxp_t b) { + return a + b; +} + +static inline fxp_t +fxp_sub(fxp_t a, fxp_t b) { + assert(a >= b); + return a - b; +} + +static inline fxp_t +fxp_mul(fxp_t a, fxp_t b) { + uint64_t unshifted = (uint64_t)a * (uint64_t)b; + /* + * Unshifted is (a.val * 2**16) * (b.val * 2**16) + * == (a.val * b.val) * 2**32, but we want + * (a.val * b.val) * 2 ** 16. + */ + return (uint32_t)(unshifted >> 16); +} + +static inline fxp_t +fxp_div(fxp_t a, fxp_t b) { + assert(b != 0); + uint64_t unshifted = ((uint64_t)a << 32) / (uint64_t)b; + /* + * Unshifted is (a.val * 2**16) * (2**32) / (b.val * 2**16) + * == (a.val / b.val) * (2 ** 32), which again corresponds to a right + * shift of 16. + */ + return (uint32_t)(unshifted >> 16); +} + +static inline uint32_t +fxp_round_down(fxp_t a) { + return a >> 16; +} + +static inline uint32_t +fxp_round_nearest(fxp_t a) { + uint32_t fractional_part = (a & ((1U << 16) - 1)); + uint32_t increment = (uint32_t)(fractional_part >= (1U << 15)); + return (a >> 16) + increment; +} + +/* + * Approximately computes x * frac, without the size limitations that would be + * imposed by converting u to an fxp_t. + */ +static inline size_t +fxp_mul_frac(size_t x_orig, fxp_t frac) { + assert(frac <= (1U << 16)); + /* + * Work around an over-enthusiastic warning about type limits below (on + * 32-bit platforms, a size_t is always less than 1ULL << 48). + */ + uint64_t x = (uint64_t)x_orig; + /* + * If we can guarantee no overflow, multiply first before shifting, to + * preserve some precision. Otherwise, shift first and then multiply. + * In the latter case, we only lose the low 16 bits of a 48-bit number, + * so we're still accurate to within 1/2**32. + */ + if (x < (1ULL << 48)) { + return (size_t)((x * frac) >> 16); + } else { + return (size_t)((x >> 16) * (uint64_t)frac); + } +} + +/* + * Returns true on error. Otherwise, returns false and updates *ptr to point to + * the first character not parsed (because it wasn't a digit). + */ +bool fxp_parse(fxp_t *a, const char *ptr, char **end); +void fxp_print(fxp_t a, char buf[FXP_BUF_SIZE]); + +#endif /* JEMALLOC_INTERNAL_FXP_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/hash.h b/contrib/jemalloc/include/jemalloc/internal/hash.h index 935ddcfc95a166..7f945679efbc98 100644 --- a/contrib/jemalloc/include/jemalloc/internal/hash.h +++ b/contrib/jemalloc/include/jemalloc/internal/hash.h @@ -104,8 +104,8 @@ hash_x86_32(const void *key, int len, uint32_t seed) { uint32_t k1 = 0; switch (len & 3) { - case 3: k1 ^= tail[2] << 16; JEMALLOC_FALLTHROUGH - case 2: k1 ^= tail[1] << 8; JEMALLOC_FALLTHROUGH + case 3: k1 ^= tail[2] << 16; JEMALLOC_FALLTHROUGH; + case 2: k1 ^= tail[1] << 8; JEMALLOC_FALLTHROUGH; case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; } @@ -177,26 +177,26 @@ hash_x86_128(const void *key, const int len, uint32_t seed, uint32_t k4 = 0; switch (len & 15) { - case 15: k4 ^= tail[14] << 16; JEMALLOC_FALLTHROUGH - case 14: k4 ^= tail[13] << 8; JEMALLOC_FALLTHROUGH + case 15: k4 ^= tail[14] << 16; JEMALLOC_FALLTHROUGH; + case 14: k4 ^= tail[13] << 8; JEMALLOC_FALLTHROUGH; case 13: k4 ^= tail[12] << 0; k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4; - JEMALLOC_FALLTHROUGH - case 12: k3 ^= tail[11] << 24; JEMALLOC_FALLTHROUGH - case 11: k3 ^= tail[10] << 16; JEMALLOC_FALLTHROUGH - case 10: k3 ^= tail[ 9] << 8; JEMALLOC_FALLTHROUGH + JEMALLOC_FALLTHROUGH; + case 12: k3 ^= (uint32_t) tail[11] << 24; JEMALLOC_FALLTHROUGH; + case 11: k3 ^= tail[10] << 16; JEMALLOC_FALLTHROUGH; + case 10: k3 ^= tail[ 9] << 8; JEMALLOC_FALLTHROUGH; case 9: k3 ^= tail[ 8] << 0; - k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3; - JEMALLOC_FALLTHROUGH - case 8: k2 ^= tail[ 7] << 24; JEMALLOC_FALLTHROUGH - case 7: k2 ^= tail[ 6] << 16; JEMALLOC_FALLTHROUGH - case 6: k2 ^= tail[ 5] << 8; JEMALLOC_FALLTHROUGH + k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3; + JEMALLOC_FALLTHROUGH; + case 8: k2 ^= (uint32_t) tail[ 7] << 24; JEMALLOC_FALLTHROUGH; + case 7: k2 ^= tail[ 6] << 16; JEMALLOC_FALLTHROUGH; + case 6: k2 ^= tail[ 5] << 8; JEMALLOC_FALLTHROUGH; case 5: k2 ^= tail[ 4] << 0; k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2; - JEMALLOC_FALLTHROUGH - case 4: k1 ^= tail[ 3] << 24; JEMALLOC_FALLTHROUGH - case 3: k1 ^= tail[ 2] << 16; JEMALLOC_FALLTHROUGH - case 2: k1 ^= tail[ 1] << 8; JEMALLOC_FALLTHROUGH + JEMALLOC_FALLTHROUGH; + case 4: k1 ^= (uint32_t) tail[ 3] << 24; JEMALLOC_FALLTHROUGH; + case 3: k1 ^= tail[ 2] << 16; JEMALLOC_FALLTHROUGH; + case 2: k1 ^= tail[ 1] << 8; JEMALLOC_FALLTHROUGH; case 1: k1 ^= tail[ 0] << 0; k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; break; @@ -261,24 +261,25 @@ hash_x64_128(const void *key, const int len, const uint32_t seed, uint64_t k2 = 0; switch (len & 15) { - case 15: k2 ^= ((uint64_t)(tail[14])) << 48; JEMALLOC_FALLTHROUGH - case 14: k2 ^= ((uint64_t)(tail[13])) << 40; JEMALLOC_FALLTHROUGH - case 13: k2 ^= ((uint64_t)(tail[12])) << 32; JEMALLOC_FALLTHROUGH - case 12: k2 ^= ((uint64_t)(tail[11])) << 24; JEMALLOC_FALLTHROUGH - case 11: k2 ^= ((uint64_t)(tail[10])) << 16; JEMALLOC_FALLTHROUGH - case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8; JEMALLOC_FALLTHROUGH + case 15: k2 ^= ((uint64_t)(tail[14])) << 48; JEMALLOC_FALLTHROUGH; + case 14: k2 ^= ((uint64_t)(tail[13])) << 40; JEMALLOC_FALLTHROUGH; + case 13: k2 ^= ((uint64_t)(tail[12])) << 32; JEMALLOC_FALLTHROUGH; + case 12: k2 ^= ((uint64_t)(tail[11])) << 24; JEMALLOC_FALLTHROUGH; + case 11: k2 ^= ((uint64_t)(tail[10])) << 16; JEMALLOC_FALLTHROUGH; + case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8; JEMALLOC_FALLTHROUGH; case 9: k2 ^= ((uint64_t)(tail[ 8])) << 0; k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2; - JEMALLOC_FALLTHROUGH - case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56; JEMALLOC_FALLTHROUGH - case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48; JEMALLOC_FALLTHROUGH - case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40; JEMALLOC_FALLTHROUGH - case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32; JEMALLOC_FALLTHROUGH - case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24; JEMALLOC_FALLTHROUGH - case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16; JEMALLOC_FALLTHROUGH - case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8; JEMALLOC_FALLTHROUGH + JEMALLOC_FALLTHROUGH; + case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56; JEMALLOC_FALLTHROUGH; + case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48; JEMALLOC_FALLTHROUGH; + case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40; JEMALLOC_FALLTHROUGH; + case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32; JEMALLOC_FALLTHROUGH; + case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24; JEMALLOC_FALLTHROUGH; + case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16; JEMALLOC_FALLTHROUGH; + case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8; JEMALLOC_FALLTHROUGH; case 1: k1 ^= ((uint64_t)(tail[ 0])) << 0; k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1; + break; } } diff --git a/contrib/jemalloc/include/jemalloc/internal/hpa.h b/contrib/jemalloc/include/jemalloc/internal/hpa.h new file mode 100644 index 00000000000000..f3562853e80233 --- /dev/null +++ b/contrib/jemalloc/include/jemalloc/internal/hpa.h @@ -0,0 +1,182 @@ +#ifndef JEMALLOC_INTERNAL_HPA_H +#define JEMALLOC_INTERNAL_HPA_H + +#include "jemalloc/internal/exp_grow.h" +#include "jemalloc/internal/hpa_hooks.h" +#include "jemalloc/internal/hpa_opts.h" +#include "jemalloc/internal/pai.h" +#include "jemalloc/internal/psset.h" + +typedef struct hpa_central_s hpa_central_t; +struct hpa_central_s { + /* + * The mutex guarding most of the operations on the central data + * structure. + */ + malloc_mutex_t mtx; + /* + * Guards expansion of eden. We separate this from the regular mutex so + * that cheaper operations can still continue while we're doing the OS + * call. + */ + malloc_mutex_t grow_mtx; + /* + * Either NULL (if empty), or some integer multiple of a + * hugepage-aligned number of hugepages. We carve them off one at a + * time to satisfy new pageslab requests. + * + * Guarded by grow_mtx. + */ + void *eden; + size_t eden_len; + /* Source for metadata. */ + base_t *base; + /* Number of grow operations done on this hpa_central_t. */ + uint64_t age_counter; + + /* The HPA hooks. */ + hpa_hooks_t hooks; +}; + +typedef struct hpa_shard_nonderived_stats_s hpa_shard_nonderived_stats_t; +struct hpa_shard_nonderived_stats_s { + /* + * The number of times we've purged within a hugepage. + * + * Guarded by mtx. + */ + uint64_t npurge_passes; + /* + * The number of individual purge calls we perform (which should always + * be bigger than npurge_passes, since each pass purges at least one + * extent within a hugepage. + * + * Guarded by mtx. + */ + uint64_t npurges; + + /* + * The number of times we've hugified a pageslab. + * + * Guarded by mtx. + */ + uint64_t nhugifies; + /* + * The number of times we've dehugified a pageslab. + * + * Guarded by mtx. + */ + uint64_t ndehugifies; +}; + +/* Completely derived; only used by CTL. */ +typedef struct hpa_shard_stats_s hpa_shard_stats_t; +struct hpa_shard_stats_s { + psset_stats_t psset_stats; + hpa_shard_nonderived_stats_t nonderived_stats; +}; + +typedef struct hpa_shard_s hpa_shard_t; +struct hpa_shard_s { + /* + * pai must be the first member; we cast from a pointer to it to a + * pointer to the hpa_shard_t. + */ + pai_t pai; + + /* The central allocator we get our hugepages from. */ + hpa_central_t *central; + /* Protects most of this shard's state. */ + malloc_mutex_t mtx; + /* + * Guards the shard's access to the central allocator (preventing + * multiple threads operating on this shard from accessing the central + * allocator). + */ + malloc_mutex_t grow_mtx; + /* The base metadata allocator. */ + base_t *base; + + /* + * This edata cache is the one we use when allocating a small extent + * from a pageslab. The pageslab itself comes from the centralized + * allocator, and so will use its edata_cache. + */ + edata_cache_fast_t ecf; + + psset_t psset; + + /* + * How many grow operations have occurred. + * + * Guarded by grow_mtx. + */ + uint64_t age_counter; + + /* The arena ind we're associated with. */ + unsigned ind; + + /* + * Our emap. This is just a cache of the emap pointer in the associated + * hpa_central. + */ + emap_t *emap; + + /* The configuration choices for this hpa shard. */ + hpa_shard_opts_t opts; + + /* + * How many pages have we started but not yet finished purging in this + * hpa shard. + */ + size_t npending_purge; + + /* + * Those stats which are copied directly into the CTL-centric hpa shard + * stats. + */ + hpa_shard_nonderived_stats_t stats; + + /* + * Last time we performed purge on this shard. + */ + nstime_t last_purge; +}; + +/* + * Whether or not the HPA can be used given the current configuration. This is + * is not necessarily a guarantee that it backs its allocations by hugepages, + * just that it can function properly given the system it's running on. + */ +bool hpa_supported(); +bool hpa_central_init(hpa_central_t *central, base_t *base, const hpa_hooks_t *hooks); +bool hpa_shard_init(hpa_shard_t *shard, hpa_central_t *central, emap_t *emap, + base_t *base, edata_cache_t *edata_cache, unsigned ind, + const hpa_shard_opts_t *opts); + +void hpa_shard_stats_accum(hpa_shard_stats_t *dst, hpa_shard_stats_t *src); +void hpa_shard_stats_merge(tsdn_t *tsdn, hpa_shard_t *shard, + hpa_shard_stats_t *dst); + +/* + * Notify the shard that we won't use it for allocations much longer. Due to + * the possibility of races, we don't actually prevent allocations; just flush + * and disable the embedded edata_cache_small. + */ +void hpa_shard_disable(tsdn_t *tsdn, hpa_shard_t *shard); +void hpa_shard_destroy(tsdn_t *tsdn, hpa_shard_t *shard); + +void hpa_shard_set_deferral_allowed(tsdn_t *tsdn, hpa_shard_t *shard, + bool deferral_allowed); +void hpa_shard_do_deferred_work(tsdn_t *tsdn, hpa_shard_t *shard); + +/* + * We share the fork ordering with the PA and arena prefork handling; that's why + * these are 3 and 4 rather than 0 and 1. + */ +void hpa_shard_prefork3(tsdn_t *tsdn, hpa_shard_t *shard); +void hpa_shard_prefork4(tsdn_t *tsdn, hpa_shard_t *shard); +void hpa_shard_postfork_parent(tsdn_t *tsdn, hpa_shard_t *shard); +void hpa_shard_postfork_child(tsdn_t *tsdn, hpa_shard_t *shard); + +#endif /* JEMALLOC_INTERNAL_HPA_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/hpa_hooks.h b/contrib/jemalloc/include/jemalloc/internal/hpa_hooks.h new file mode 100644 index 00000000000000..4ea221cb0b42fa --- /dev/null +++ b/contrib/jemalloc/include/jemalloc/internal/hpa_hooks.h @@ -0,0 +1,17 @@ +#ifndef JEMALLOC_INTERNAL_HPA_HOOKS_H +#define JEMALLOC_INTERNAL_HPA_HOOKS_H + +typedef struct hpa_hooks_s hpa_hooks_t; +struct hpa_hooks_s { + void *(*map)(size_t size); + void (*unmap)(void *ptr, size_t size); + void (*purge)(void *ptr, size_t size); + void (*hugify)(void *ptr, size_t size); + void (*dehugify)(void *ptr, size_t size); + void (*curtime)(nstime_t *r_time, bool first_reading); + uint64_t (*ms_since)(nstime_t *r_time); +}; + +extern hpa_hooks_t hpa_hooks_default; + +#endif /* JEMALLOC_INTERNAL_HPA_HOOKS_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/hpa_opts.h b/contrib/jemalloc/include/jemalloc/internal/hpa_opts.h new file mode 100644 index 00000000000000..ee84fea137570a --- /dev/null +++ b/contrib/jemalloc/include/jemalloc/internal/hpa_opts.h @@ -0,0 +1,74 @@ +#ifndef JEMALLOC_INTERNAL_HPA_OPTS_H +#define JEMALLOC_INTERNAL_HPA_OPTS_H + +#include "jemalloc/internal/fxp.h" + +/* + * This file is morally part of hpa.h, but is split out for header-ordering + * reasons. + */ + +typedef struct hpa_shard_opts_s hpa_shard_opts_t; +struct hpa_shard_opts_s { + /* + * The largest size we'll allocate out of the shard. For those + * allocations refused, the caller (in practice, the PA module) will + * fall back to the more general (for now) PAC, which can always handle + * any allocation request. + */ + size_t slab_max_alloc; + + /* + * When the number of active bytes in a hugepage is >= + * hugification_threshold, we force hugify it. + */ + size_t hugification_threshold; + + /* + * The HPA purges whenever the number of pages exceeds dirty_mult * + * active_pages. This may be set to (fxp_t)-1 to disable purging. + */ + fxp_t dirty_mult; + + /* + * Whether or not the PAI methods are allowed to defer work to a + * subsequent hpa_shard_do_deferred_work() call. Practically, this + * corresponds to background threads being enabled. We track this + * ourselves for encapsulation purposes. + */ + bool deferral_allowed; + + /* + * How long a hugepage has to be a hugification candidate before it will + * actually get hugified. + */ + uint64_t hugify_delay_ms; + + /* + * Minimum amount of time between purges. + */ + uint64_t min_purge_interval_ms; +}; + +#define HPA_SHARD_OPTS_DEFAULT { \ + /* slab_max_alloc */ \ + 64 * 1024, \ + /* hugification_threshold */ \ + HUGEPAGE * 95 / 100, \ + /* dirty_mult */ \ + FXP_INIT_PERCENT(25), \ + /* \ + * deferral_allowed \ + * \ + * Really, this is always set by the arena during creation \ + * or by an hpa_shard_set_deferral_allowed call, so the value \ + * we put here doesn't matter. \ + */ \ + false, \ + /* hugify_delay_ms */ \ + 10 * 1000, \ + /* min_purge_interval_ms */ \ + 5 * 1000 \ +} + +#endif /* JEMALLOC_INTERNAL_HPA_OPTS_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/hpdata.h b/contrib/jemalloc/include/jemalloc/internal/hpdata.h new file mode 100644 index 00000000000000..1fb534db016aa4 --- /dev/null +++ b/contrib/jemalloc/include/jemalloc/internal/hpdata.h @@ -0,0 +1,413 @@ +#ifndef JEMALLOC_INTERNAL_HPDATA_H +#define JEMALLOC_INTERNAL_HPDATA_H + +#include "jemalloc/internal/fb.h" +#include "jemalloc/internal/ph.h" +#include "jemalloc/internal/ql.h" +#include "jemalloc/internal/typed_list.h" + +/* + * The metadata representation we use for extents in hugepages. While the PAC + * uses the edata_t to represent both active and inactive extents, the HP only + * uses the edata_t for active ones; instead, inactive extent state is tracked + * within hpdata associated with the enclosing hugepage-sized, hugepage-aligned + * region of virtual address space. + * + * An hpdata need not be "truly" backed by a hugepage (which is not necessarily + * an observable property of any given region of address space). It's just + * hugepage-sized and hugepage-aligned; it's *potentially* huge. + */ +typedef struct hpdata_s hpdata_t; +ph_structs(hpdata_age_heap, hpdata_t); +struct hpdata_s { + /* + * We likewise follow the edata convention of mangling names and forcing + * the use of accessors -- this lets us add some consistency checks on + * access. + */ + + /* + * The address of the hugepage in question. This can't be named h_addr, + * since that conflicts with a macro defined in Windows headers. + */ + void *h_address; + /* Its age (measured in psset operations). */ + uint64_t h_age; + /* Whether or not we think the hugepage is mapped that way by the OS. */ + bool h_huge; + + /* + * For some properties, we keep parallel sets of bools; h_foo_allowed + * and h_in_psset_foo_container. This is a decoupling mechanism to + * avoid bothering the hpa (which manages policies) from the psset + * (which is the mechanism used to enforce those policies). This allows + * all the container management logic to live in one place, without the + * HPA needing to know or care how that happens. + */ + + /* + * Whether or not the hpdata is allowed to be used to serve allocations, + * and whether or not the psset is currently tracking it as such. + */ + bool h_alloc_allowed; + bool h_in_psset_alloc_container; + + /* + * The same, but with purging. There's no corresponding + * h_in_psset_purge_container, because the psset (currently) always + * removes hpdatas from their containers during updates (to implement + * LRU for purging). + */ + bool h_purge_allowed; + + /* And with hugifying. */ + bool h_hugify_allowed; + /* When we became a hugification candidate. */ + nstime_t h_time_hugify_allowed; + bool h_in_psset_hugify_container; + + /* Whether or not a purge or hugify is currently happening. */ + bool h_mid_purge; + bool h_mid_hugify; + + /* + * Whether or not the hpdata is being updated in the psset (i.e. if + * there has been a psset_update_begin call issued without a matching + * psset_update_end call). Eventually this will expand to other types + * of updates. + */ + bool h_updating; + + /* Whether or not the hpdata is in a psset. */ + bool h_in_psset; + + union { + /* When nonempty (and also nonfull), used by the psset bins. */ + hpdata_age_heap_link_t age_link; + /* + * When empty (or not corresponding to any hugepage), list + * linkage. + */ + ql_elm(hpdata_t) ql_link_empty; + }; + + /* + * Linkage for the psset to track candidates for purging and hugifying. + */ + ql_elm(hpdata_t) ql_link_purge; + ql_elm(hpdata_t) ql_link_hugify; + + /* The length of the largest contiguous sequence of inactive pages. */ + size_t h_longest_free_range; + + /* Number of active pages. */ + size_t h_nactive; + + /* A bitmap with bits set in the active pages. */ + fb_group_t active_pages[FB_NGROUPS(HUGEPAGE_PAGES)]; + + /* + * Number of dirty or active pages, and a bitmap tracking them. One + * way to think of this is as which pages are dirty from the OS's + * perspective. + */ + size_t h_ntouched; + + /* The touched pages (using the same definition as above). */ + fb_group_t touched_pages[FB_NGROUPS(HUGEPAGE_PAGES)]; +}; + +TYPED_LIST(hpdata_empty_list, hpdata_t, ql_link_empty) +TYPED_LIST(hpdata_purge_list, hpdata_t, ql_link_purge) +TYPED_LIST(hpdata_hugify_list, hpdata_t, ql_link_hugify) + +ph_proto(, hpdata_age_heap, hpdata_t); + +static inline void * +hpdata_addr_get(const hpdata_t *hpdata) { + return hpdata->h_address; +} + +static inline void +hpdata_addr_set(hpdata_t *hpdata, void *addr) { + assert(HUGEPAGE_ADDR2BASE(addr) == addr); + hpdata->h_address = addr; +} + +static inline uint64_t +hpdata_age_get(const hpdata_t *hpdata) { + return hpdata->h_age; +} + +static inline void +hpdata_age_set(hpdata_t *hpdata, uint64_t age) { + hpdata->h_age = age; +} + +static inline bool +hpdata_huge_get(const hpdata_t *hpdata) { + return hpdata->h_huge; +} + +static inline bool +hpdata_alloc_allowed_get(const hpdata_t *hpdata) { + return hpdata->h_alloc_allowed; +} + +static inline void +hpdata_alloc_allowed_set(hpdata_t *hpdata, bool alloc_allowed) { + hpdata->h_alloc_allowed = alloc_allowed; +} + +static inline bool +hpdata_in_psset_alloc_container_get(const hpdata_t *hpdata) { + return hpdata->h_in_psset_alloc_container; +} + +static inline void +hpdata_in_psset_alloc_container_set(hpdata_t *hpdata, bool in_container) { + assert(in_container != hpdata->h_in_psset_alloc_container); + hpdata->h_in_psset_alloc_container = in_container; +} + +static inline bool +hpdata_purge_allowed_get(const hpdata_t *hpdata) { + return hpdata->h_purge_allowed; +} + +static inline void +hpdata_purge_allowed_set(hpdata_t *hpdata, bool purge_allowed) { + assert(purge_allowed == false || !hpdata->h_mid_purge); + hpdata->h_purge_allowed = purge_allowed; +} + +static inline bool +hpdata_hugify_allowed_get(const hpdata_t *hpdata) { + return hpdata->h_hugify_allowed; +} + +static inline void +hpdata_allow_hugify(hpdata_t *hpdata, nstime_t now) { + assert(!hpdata->h_mid_hugify); + hpdata->h_hugify_allowed = true; + hpdata->h_time_hugify_allowed = now; +} + +static inline nstime_t +hpdata_time_hugify_allowed(hpdata_t *hpdata) { + return hpdata->h_time_hugify_allowed; +} + +static inline void +hpdata_disallow_hugify(hpdata_t *hpdata) { + hpdata->h_hugify_allowed = false; +} + +static inline bool +hpdata_in_psset_hugify_container_get(const hpdata_t *hpdata) { + return hpdata->h_in_psset_hugify_container; +} + +static inline void +hpdata_in_psset_hugify_container_set(hpdata_t *hpdata, bool in_container) { + assert(in_container != hpdata->h_in_psset_hugify_container); + hpdata->h_in_psset_hugify_container = in_container; +} + +static inline bool +hpdata_mid_purge_get(const hpdata_t *hpdata) { + return hpdata->h_mid_purge; +} + +static inline void +hpdata_mid_purge_set(hpdata_t *hpdata, bool mid_purge) { + assert(mid_purge != hpdata->h_mid_purge); + hpdata->h_mid_purge = mid_purge; +} + +static inline bool +hpdata_mid_hugify_get(const hpdata_t *hpdata) { + return hpdata->h_mid_hugify; +} + +static inline void +hpdata_mid_hugify_set(hpdata_t *hpdata, bool mid_hugify) { + assert(mid_hugify != hpdata->h_mid_hugify); + hpdata->h_mid_hugify = mid_hugify; +} + +static inline bool +hpdata_changing_state_get(const hpdata_t *hpdata) { + return hpdata->h_mid_purge || hpdata->h_mid_hugify; +} + + +static inline bool +hpdata_updating_get(const hpdata_t *hpdata) { + return hpdata->h_updating; +} + +static inline void +hpdata_updating_set(hpdata_t *hpdata, bool updating) { + assert(updating != hpdata->h_updating); + hpdata->h_updating = updating; +} + +static inline bool +hpdata_in_psset_get(const hpdata_t *hpdata) { + return hpdata->h_in_psset; +} + +static inline void +hpdata_in_psset_set(hpdata_t *hpdata, bool in_psset) { + assert(in_psset != hpdata->h_in_psset); + hpdata->h_in_psset = in_psset; +} + +static inline size_t +hpdata_longest_free_range_get(const hpdata_t *hpdata) { + return hpdata->h_longest_free_range; +} + +static inline void +hpdata_longest_free_range_set(hpdata_t *hpdata, size_t longest_free_range) { + assert(longest_free_range <= HUGEPAGE_PAGES); + hpdata->h_longest_free_range = longest_free_range; +} + +static inline size_t +hpdata_nactive_get(hpdata_t *hpdata) { + return hpdata->h_nactive; +} + +static inline size_t +hpdata_ntouched_get(hpdata_t *hpdata) { + return hpdata->h_ntouched; +} + +static inline size_t +hpdata_ndirty_get(hpdata_t *hpdata) { + return hpdata->h_ntouched - hpdata->h_nactive; +} + +static inline size_t +hpdata_nretained_get(hpdata_t *hpdata) { + return HUGEPAGE_PAGES - hpdata->h_ntouched; +} + +static inline void +hpdata_assert_empty(hpdata_t *hpdata) { + assert(fb_empty(hpdata->active_pages, HUGEPAGE_PAGES)); + assert(hpdata->h_nactive == 0); +} + +/* + * Only used in tests, and in hpdata_assert_consistent, below. Verifies some + * consistency properties of the hpdata (e.g. that cached counts of page stats + * match computed ones). + */ +static inline bool +hpdata_consistent(hpdata_t *hpdata) { + if(fb_urange_longest(hpdata->active_pages, HUGEPAGE_PAGES) + != hpdata_longest_free_range_get(hpdata)) { + return false; + } + if (fb_scount(hpdata->active_pages, HUGEPAGE_PAGES, 0, HUGEPAGE_PAGES) + != hpdata->h_nactive) { + return false; + } + if (fb_scount(hpdata->touched_pages, HUGEPAGE_PAGES, 0, HUGEPAGE_PAGES) + != hpdata->h_ntouched) { + return false; + } + if (hpdata->h_ntouched < hpdata->h_nactive) { + return false; + } + if (hpdata->h_huge && hpdata->h_ntouched != HUGEPAGE_PAGES) { + return false; + } + if (hpdata_changing_state_get(hpdata) + && ((hpdata->h_purge_allowed) || hpdata->h_hugify_allowed)) { + return false; + } + if (hpdata_hugify_allowed_get(hpdata) + != hpdata_in_psset_hugify_container_get(hpdata)) { + return false; + } + return true; +} + +static inline void +hpdata_assert_consistent(hpdata_t *hpdata) { + assert(hpdata_consistent(hpdata)); +} + +static inline bool +hpdata_empty(hpdata_t *hpdata) { + return hpdata->h_nactive == 0; +} + +static inline bool +hpdata_full(hpdata_t *hpdata) { + return hpdata->h_nactive == HUGEPAGE_PAGES; +} + +void hpdata_init(hpdata_t *hpdata, void *addr, uint64_t age); + +/* + * Given an hpdata which can serve an allocation request, pick and reserve an + * offset within that allocation. + */ +void *hpdata_reserve_alloc(hpdata_t *hpdata, size_t sz); +void hpdata_unreserve(hpdata_t *hpdata, void *begin, size_t sz); + +/* + * The hpdata_purge_prepare_t allows grabbing the metadata required to purge + * subranges of a hugepage while holding a lock, drop the lock during the actual + * purging of them, and reacquire it to update the metadata again. + */ +typedef struct hpdata_purge_state_s hpdata_purge_state_t; +struct hpdata_purge_state_s { + size_t npurged; + size_t ndirty_to_purge; + fb_group_t to_purge[FB_NGROUPS(HUGEPAGE_PAGES)]; + size_t next_purge_search_begin; +}; + +/* + * Initializes purge state. The access to hpdata must be externally + * synchronized with other hpdata_* calls. + * + * You can tell whether or not a thread is purging or hugifying a given hpdata + * via hpdata_changing_state_get(hpdata). Racing hugification or purging + * operations aren't allowed. + * + * Once you begin purging, you have to follow through and call hpdata_purge_next + * until you're done, and then end. Allocating out of an hpdata undergoing + * purging is not allowed. + * + * Returns the number of dirty pages that will be purged. + */ +size_t hpdata_purge_begin(hpdata_t *hpdata, hpdata_purge_state_t *purge_state); + +/* + * If there are more extents to purge, sets *r_purge_addr and *r_purge_size to + * true, and returns true. Otherwise, returns false to indicate that we're + * done. + * + * This requires exclusive access to the purge state, but *not* to the hpdata. + * In particular, unreserve calls are allowed while purging (i.e. you can dalloc + * into one part of the hpdata while purging a different part). + */ +bool hpdata_purge_next(hpdata_t *hpdata, hpdata_purge_state_t *purge_state, + void **r_purge_addr, size_t *r_purge_size); +/* + * Updates the hpdata metadata after all purging is done. Needs external + * synchronization. + */ +void hpdata_purge_end(hpdata_t *hpdata, hpdata_purge_state_t *purge_state); + +void hpdata_hugify(hpdata_t *hpdata); +void hpdata_dehugify(hpdata_t *hpdata); + +#endif /* JEMALLOC_INTERNAL_HPDATA_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/inspect.h b/contrib/jemalloc/include/jemalloc/internal/inspect.h new file mode 100644 index 00000000000000..65fef51dfa8191 --- /dev/null +++ b/contrib/jemalloc/include/jemalloc/internal/inspect.h @@ -0,0 +1,40 @@ +#ifndef JEMALLOC_INTERNAL_INSPECT_H +#define JEMALLOC_INTERNAL_INSPECT_H + +/* + * This module contains the heap introspection capabilities. For now they are + * exposed purely through mallctl APIs in the experimental namespace, but this + * may change over time. + */ + +/* + * The following two structs are for experimental purposes. See + * experimental_utilization_query_ctl and + * experimental_utilization_batch_query_ctl in src/ctl.c. + */ +typedef struct inspect_extent_util_stats_s inspect_extent_util_stats_t; +struct inspect_extent_util_stats_s { + size_t nfree; + size_t nregs; + size_t size; +}; + +typedef struct inspect_extent_util_stats_verbose_s + inspect_extent_util_stats_verbose_t; + +struct inspect_extent_util_stats_verbose_s { + void *slabcur_addr; + size_t nfree; + size_t nregs; + size_t size; + size_t bin_nfree; + size_t bin_nregs; +}; + +void inspect_extent_util_stats_get(tsdn_t *tsdn, const void *ptr, + size_t *nfree, size_t *nregs, size_t *size); +void inspect_extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr, + size_t *nfree, size_t *nregs, size_t *size, + size_t *bin_nfree, size_t *bin_nregs, void **slabcur_addr); + +#endif /* JEMALLOC_INTERNAL_INSPECT_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h index a0e4f5af0124b1..3cbed72b0c7a9c 100644 --- a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h +++ b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h @@ -8,6 +8,7 @@ #ifdef _WIN32 # include # include "msvc_compat/windows_extra.h" +# include "msvc_compat/strings.h" # ifdef _WIN64 # if LG_VADDR <= 32 # error Generate the headers using x64 vcargs @@ -34,8 +35,12 @@ # include # endif # include -# ifdef __FreeBSD__ +# if defined(__FreeBSD__) || defined(__DragonFly__) # include +# include +# if defined(__FreeBSD__) +# define cpu_set_t cpuset_t +# endif # endif # include # ifdef JEMALLOC_OS_UNFAIR_LOCK @@ -94,4 +99,13 @@ isblank(int c) { #endif #include +/* + * The Win32 midl compiler has #define small char; we don't use midl, but + * "small" is a nice identifier to have available when talking about size + * classes. + */ +#ifdef small +# undef small +#endif + #endif /* JEMALLOC_INTERNAL_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h index 1aedb916976bf7..900ae867f321de 100644 --- a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h +++ b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h @@ -45,17 +45,17 @@ #define LG_VADDR 48 /* Defined if C11 atomics are available. */ -#define JEMALLOC_C11_ATOMICS 1 +#define JEMALLOC_C11_ATOMICS /* Defined if GCC __atomic atomics are available. */ -#define JEMALLOC_GCC_ATOMIC_ATOMICS 1 +#define JEMALLOC_GCC_ATOMIC_ATOMICS /* and the 8-bit variant support. */ -#define JEMALLOC_GCC_U8_ATOMIC_ATOMICS 1 +#define JEMALLOC_GCC_U8_ATOMIC_ATOMICS /* Defined if GCC __sync atomics are available. */ -#define JEMALLOC_GCC_SYNC_ATOMICS 1 +#define JEMALLOC_GCC_SYNC_ATOMICS /* and the 8-bit variant support. */ -#define JEMALLOC_GCC_U8_SYNC_ATOMICS 1 +#define JEMALLOC_GCC_U8_SYNC_ATOMICS /* * Defined if __builtin_clz() and __builtin_clzl() are available. @@ -73,7 +73,7 @@ /* * Defined if secure_getenv(3) is available. */ -/* #undef JEMALLOC_HAVE_SECURE_GETENV */ +#define JEMALLOC_HAVE_SECURE_GETENV /* * Defined if issetugid(2) is available. @@ -84,23 +84,34 @@ #define JEMALLOC_HAVE_PTHREAD_ATFORK /* Defined if pthread_setname_np(3) is available. */ -/* #undef JEMALLOC_HAVE_PTHREAD_SETNAME_NP */ +#define JEMALLOC_HAVE_PTHREAD_SETNAME_NP + +/* Defined if pthread_getname_np(3) is available. */ +#define JEMALLOC_HAVE_PTHREAD_GETNAME_NP + +/* Defined if pthread_get_name_np(3) is available. */ +#define JEMALLOC_HAVE_PTHREAD_GET_NAME_NP /* * Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available. */ -/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */ +#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE /* * Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available. */ -#define JEMALLOC_HAVE_CLOCK_MONOTONIC 1 +#define JEMALLOC_HAVE_CLOCK_MONOTONIC /* * Defined if mach_absolute_time() is available. */ /* #undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME */ +/* + * Defined if clock_gettime(CLOCK_REALTIME, ...) is available. + */ +#define JEMALLOC_HAVE_CLOCK_REALTIME + /* * Defined if _malloc_thread_cleanup() exists. At least in the case of * FreeBSD, pthread_key_create() allocates, which if used during malloc @@ -122,7 +133,7 @@ * _pthread_mutex_init_calloc_cb(), in which case the function is used in order * to avoid recursive allocation during mutex initialization. */ -#define JEMALLOC_MUTEX_INIT_CB 1 +#define JEMALLOC_MUTEX_INIT_CB /* Non-empty if the tls_model attribute is supported. */ #define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec"))) @@ -163,6 +174,9 @@ /* Support utrace(2)-based tracing. */ #define JEMALLOC_UTRACE +/* Support utrace(2)-based tracing (label based signature). */ +/* #undef JEMALLOC_UTRACE_LABEL */ + /* Support optional abort() on OOM. */ #define JEMALLOC_XMALLOC @@ -178,6 +192,9 @@ /* One page is 2^LG_PAGE bytes. */ #define LG_PAGE 12 +/* Maximum number of regions in a slab. */ +/* #undef CONFIG_LG_SLAB_MAXREGS */ + /* * One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the * system does not explicitly support huge pages; system calls that require @@ -291,17 +308,46 @@ */ /* #undef JEMALLOC_MADVISE_DONTDUMP */ +/* + * Defined if MADV_[NO]CORE is supported as an argument to madvise. + */ +#define JEMALLOC_MADVISE_NOCORE + +/* Defined if mprotect(2) is available. */ +#define JEMALLOC_HAVE_MPROTECT + /* * Defined if transparent huge pages (THPs) are supported via the * MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled. */ /* #undef JEMALLOC_THP */ +/* Defined if posix_madvise is available. */ +/* #undef JEMALLOC_HAVE_POSIX_MADVISE */ + +/* + * Method for purging unused pages using posix_madvise. + * + * posix_madvise(..., POSIX_MADV_DONTNEED) + */ +/* #undef JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED */ +/* #undef JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED_ZEROS */ + +/* + * Defined if memcntl page admin call is supported + */ +/* #undef JEMALLOC_HAVE_MEMCNTL */ + +/* + * Defined if malloc_size is supported + */ +/* #undef JEMALLOC_HAVE_MALLOC_SIZE */ + /* Define if operating system has alloca.h header. */ /* #undef JEMALLOC_HAS_ALLOCA_H */ /* C99 restrict keyword supported. */ -#define JEMALLOC_HAS_RESTRICT 1 +#define JEMALLOC_HAS_RESTRICT /* For use by hash code. */ /* #undef JEMALLOC_BIG_ENDIAN */ @@ -334,15 +380,15 @@ #define JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP /* GNU specific sched_getcpu support */ -/* #undef JEMALLOC_HAVE_SCHED_GETCPU */ +#define JEMALLOC_HAVE_SCHED_GETCPU /* GNU specific sched_setaffinity support */ -/* #undef JEMALLOC_HAVE_SCHED_SETAFFINITY */ +#define JEMALLOC_HAVE_SCHED_SETAFFINITY /* * If defined, all the features necessary for background threads are present. */ -#define JEMALLOC_BACKGROUND_THREAD 1 +#define JEMALLOC_BACKGROUND_THREAD /* * If defined, jemalloc symbols are not exported (doesn't work when @@ -354,7 +400,7 @@ #define JEMALLOC_CONFIG_MALLOC_CONF "abort_conf:false" /* If defined, jemalloc takes the malloc/free/etc. symbol names. */ -#define JEMALLOC_IS_MALLOC 1 +#define JEMALLOC_IS_MALLOC /* * Defined if strerror_r returns char * if _GNU_SOURCE is defined. @@ -364,4 +410,19 @@ /* Performs additional safety checks when defined. */ /* #undef JEMALLOC_OPT_SAFETY_CHECKS */ +/* Is C++ support being built? */ +#define JEMALLOC_ENABLE_CXX + +/* Performs additional size checks when defined. */ +/* #undef JEMALLOC_OPT_SIZE_CHECKS */ + +/* Allows sampled junk and stash for checking use-after-free when defined. */ +/* #undef JEMALLOC_UAF_DETECTION */ + +/* Darwin VM_MAKE_TAG support */ +/* #undef JEMALLOC_HAVE_VM_MAKE_TAG */ + +/* If defined, realloc(ptr, 0) defaults to "free" instead of "alloc". */ +/* #undef JEMALLOC_ZERO_REALLOC_DEFAULT_FREE */ + #endif /* JEMALLOC_INTERNAL_DEFS_H_ */ diff --git a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_externs.h b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_externs.h index d291170beefa83..fc834c67373d18 100644 --- a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_externs.h +++ b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_externs.h @@ -2,7 +2,10 @@ #define JEMALLOC_INTERNAL_EXTERNS_H #include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/hpa_opts.h" +#include "jemalloc/internal/sec_opts.h" #include "jemalloc/internal/tsd_types.h" +#include "jemalloc/internal/nstime.h" /* TSD checks this to set thread local slow state accordingly. */ extern bool malloc_slow; @@ -10,14 +13,30 @@ extern bool malloc_slow; /* Run-time options. */ extern bool opt_abort; extern bool opt_abort_conf; +extern bool opt_trust_madvise; extern bool opt_confirm_conf; +extern bool opt_hpa; +extern hpa_shard_opts_t opt_hpa_opts; +extern sec_opts_t opt_hpa_sec_opts; + extern const char *opt_junk; extern bool opt_junk_alloc; extern bool opt_junk_free; +extern void (*junk_free_callback)(void *ptr, size_t size); +extern void (*junk_alloc_callback)(void *ptr, size_t size); extern bool opt_utrace; extern bool opt_xmalloc; +extern bool opt_experimental_infallible_new; extern bool opt_zero; extern unsigned opt_narenas; +extern zero_realloc_action_t opt_zero_realloc_action; +extern malloc_init_t malloc_init_state; +extern const char *zero_realloc_mode_names[]; +extern atomic_zu_t zero_realloc_count; +extern bool opt_cache_oblivious; + +/* Escape free-fastpath when ptr & mask == 0 (for sanitization purpose). */ +extern uintptr_t san_cache_bin_nonfast_mask; /* Number of CPUs. */ extern unsigned ncpus; @@ -41,17 +60,16 @@ void *bootstrap_calloc(size_t num, size_t size); void bootstrap_free(void *ptr); void arena_set(unsigned ind, arena_t *arena); unsigned narenas_total_get(void); -arena_t *arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks); -arena_tdata_t *arena_tdata_get_hard(tsd_t *tsd, unsigned ind); +arena_t *arena_init(tsdn_t *tsdn, unsigned ind, const arena_config_t *config); arena_t *arena_choose_hard(tsd_t *tsd, bool internal); -void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind); +void arena_migrate(tsd_t *tsd, arena_t *oldarena, arena_t *newarena); void iarena_cleanup(tsd_t *tsd); void arena_cleanup(tsd_t *tsd); -void arenas_tdata_cleanup(tsd_t *tsd); +size_t batch_alloc(void **ptrs, size_t num, size_t size, int flags); void jemalloc_prefork(void); void jemalloc_postfork_parent(void); void jemalloc_postfork_child(void); -bool malloc_initialized(void); void je_sdallocx_noflags(void *ptr, size_t size); +void *malloc_default(size_t size); #endif /* JEMALLOC_INTERNAL_EXTERNS_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_includes.h b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_includes.h index 437eaa4079399f..751c112ff4c057 100644 --- a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_includes.h +++ b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_includes.h @@ -10,7 +10,7 @@ * structs, externs, and inlines), and included each header file multiple times * in this file, picking out the portion we want on each pass using the * following #defines: - * JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data + * JEMALLOC_H_TYPES : Preprocessor-defined constants and pseudo-opaque data * types. * JEMALLOC_H_STRUCTS : Data structures. * JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes. @@ -40,8 +40,6 @@ /* TYPES */ /******************************************************************************/ -#include "jemalloc/internal/extent_types.h" -#include "jemalloc/internal/base_types.h" #include "jemalloc/internal/arena_types.h" #include "jemalloc/internal/tcache_types.h" #include "jemalloc/internal/prof_types.h" @@ -50,11 +48,8 @@ /* STRUCTS */ /******************************************************************************/ -#include "jemalloc/internal/arena_structs_a.h" -#include "jemalloc/internal/extent_structs.h" -#include "jemalloc/internal/base_structs.h" #include "jemalloc/internal/prof_structs.h" -#include "jemalloc/internal/arena_structs_b.h" +#include "jemalloc/internal/arena_structs.h" #include "jemalloc/internal/tcache_structs.h" #include "jemalloc/internal/background_thread_structs.h" @@ -63,8 +58,6 @@ /******************************************************************************/ #include "jemalloc/internal/jemalloc_internal_externs.h" -#include "jemalloc/internal/extent_externs.h" -#include "jemalloc/internal/base_externs.h" #include "jemalloc/internal/arena_externs.h" #include "jemalloc/internal/large_externs.h" #include "jemalloc/internal/tcache_externs.h" @@ -76,19 +69,16 @@ /******************************************************************************/ #include "jemalloc/internal/jemalloc_internal_inlines_a.h" -#include "jemalloc/internal/base_inlines.h" /* * Include portions of arena code interleaved with tcache code in order to * resolve circular dependencies. */ -#include "jemalloc/internal/prof_inlines_a.h" #include "jemalloc/internal/arena_inlines_a.h" -#include "jemalloc/internal/extent_inlines.h" #include "jemalloc/internal/jemalloc_internal_inlines_b.h" #include "jemalloc/internal/tcache_inlines.h" #include "jemalloc/internal/arena_inlines_b.h" #include "jemalloc/internal/jemalloc_internal_inlines_c.h" -#include "jemalloc/internal/prof_inlines_b.h" +#include "jemalloc/internal/prof_inlines.h" #include "jemalloc/internal/background_thread_inlines.h" #endif /* JEMALLOC_INTERNAL_INCLUDES_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_a.h b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_a.h index ddde9b4e63e671..9e27cc3012fde1 100644 --- a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_a.h +++ b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_a.h @@ -56,31 +56,6 @@ percpu_arena_ind_limit(percpu_arena_mode_t mode) { } } -static inline arena_tdata_t * -arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing) { - arena_tdata_t *tdata; - arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd); - - if (unlikely(arenas_tdata == NULL)) { - /* arenas_tdata hasn't been initialized yet. */ - return arena_tdata_get_hard(tsd, ind); - } - if (unlikely(ind >= tsd_narenas_tdata_get(tsd))) { - /* - * ind is invalid, cache is old (too small), or tdata to be - * initialized. - */ - return (refresh_if_missing ? arena_tdata_get_hard(tsd, ind) : - NULL); - } - - tdata = &arenas_tdata[ind]; - if (likely(tdata != NULL) || !refresh_if_missing) { - return tdata; - } - return arena_tdata_get_hard(tsd, ind); -} - static inline arena_t * arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing) { arena_t *ret; @@ -90,36 +65,12 @@ arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing) { ret = (arena_t *)atomic_load_p(&arenas[ind], ATOMIC_ACQUIRE); if (unlikely(ret == NULL)) { if (init_if_missing) { - ret = arena_init(tsdn, ind, - (extent_hooks_t *)&extent_hooks_default); + ret = arena_init(tsdn, ind, &arena_config_default); } } return ret; } -static inline ticker_t * -decay_ticker_get(tsd_t *tsd, unsigned ind) { - arena_tdata_t *tdata; - - tdata = arena_tdata_get(tsd, ind, true); - if (unlikely(tdata == NULL)) { - return NULL; - } - return &tdata->decay_ticker; -} - -JEMALLOC_ALWAYS_INLINE cache_bin_t * -tcache_small_bin_get(tcache_t *tcache, szind_t binind) { - assert(binind < SC_NBINS); - return &tcache->bins_small[binind]; -} - -JEMALLOC_ALWAYS_INLINE cache_bin_t * -tcache_large_bin_get(tcache_t *tcache, szind_t binind) { - assert(binind >= SC_NBINS &&binind < nhbins); - return &tcache->bins_large[binind - SC_NBINS]; -} - JEMALLOC_ALWAYS_INLINE bool tcache_available(tsd_t *tsd) { /* @@ -129,9 +80,9 @@ tcache_available(tsd_t *tsd) { */ if (likely(tsd_tcache_enabled_get(tsd))) { /* Associated arena == NULL implies tcache init in progress. */ - assert(tsd_tcachep_get(tsd)->arena == NULL || - tcache_small_bin_get(tsd_tcachep_get(tsd), 0)->avail != - NULL); + if (config_debug && tsd_tcache_slowp_get(tsd)->arena != NULL) { + tcache_assert_initialized(tsd_tcachep_get(tsd)); + } return true; } @@ -147,28 +98,25 @@ tcache_get(tsd_t *tsd) { return tsd_tcachep_get(tsd); } +JEMALLOC_ALWAYS_INLINE tcache_slow_t * +tcache_slow_get(tsd_t *tsd) { + if (!tcache_available(tsd)) { + return NULL; + } + + return tsd_tcache_slowp_get(tsd); +} + static inline void pre_reentrancy(tsd_t *tsd, arena_t *arena) { /* arena is the current context. Reentry from a0 is not allowed. */ assert(arena != arena_get(tsd_tsdn(tsd), 0, false)); - - bool fast = tsd_fast(tsd); - assert(tsd_reentrancy_level_get(tsd) < INT8_MAX); - ++*tsd_reentrancy_levelp_get(tsd); - if (fast) { - /* Prepare slow path for reentrancy. */ - tsd_slow_update(tsd); - assert(tsd_state_get(tsd) == tsd_state_nominal_slow); - } + tsd_pre_reentrancy_raw(tsd); } static inline void post_reentrancy(tsd_t *tsd) { - int8_t *reentrancy_level = tsd_reentrancy_levelp_get(tsd); - assert(*reentrancy_level > 0); - if (--*reentrancy_level == 0) { - tsd_slow_update(tsd); - } + tsd_post_reentrancy_raw(tsd); } #endif /* JEMALLOC_INTERNAL_INLINES_A_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_b.h b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_b.h index 70d6e57885708d..152f8a039569a0 100644 --- a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_b.h +++ b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_b.h @@ -1,7 +1,31 @@ #ifndef JEMALLOC_INTERNAL_INLINES_B_H #define JEMALLOC_INTERNAL_INLINES_B_H -#include "jemalloc/internal/rtree.h" +#include "jemalloc/internal/extent.h" + +static inline void +percpu_arena_update(tsd_t *tsd, unsigned cpu) { + assert(have_percpu_arena); + arena_t *oldarena = tsd_arena_get(tsd); + assert(oldarena != NULL); + unsigned oldind = arena_ind_get(oldarena); + + if (oldind != cpu) { + unsigned newind = cpu; + arena_t *newarena = arena_get(tsd_tsdn(tsd), newind, true); + assert(newarena != NULL); + + /* Set new arena/tcache associations. */ + arena_migrate(tsd, oldarena, newarena); + tcache_t *tcache = tcache_get(tsd); + if (tcache != NULL) { + tcache_slow_t *tcache_slow = tsd_tcache_slowp_get(tsd); + tcache_arena_reassociate(tsd_tsdn(tsd), tcache_slow, + tcache, newarena); + } + } +} + /* Choose an arena based on a per-thread value. */ static inline arena_t * @@ -22,18 +46,19 @@ arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) { ret = arena_choose_hard(tsd, internal); assert(ret); if (tcache_available(tsd)) { - tcache_t *tcache = tcache_get(tsd); - if (tcache->arena != NULL) { - /* See comments in tcache_data_init().*/ - assert(tcache->arena == + tcache_slow_t *tcache_slow = tsd_tcache_slowp_get(tsd); + tcache_t *tcache = tsd_tcachep_get(tsd); + if (tcache_slow->arena != NULL) { + /* See comments in tsd_tcache_data_init().*/ + assert(tcache_slow->arena == arena_get(tsd_tsdn(tsd), 0, false)); - if (tcache->arena != ret) { + if (tcache_slow->arena != ret) { tcache_arena_reassociate(tsd_tsdn(tsd), - tcache, ret); + tcache_slow, tcache, ret); } } else { - tcache_arena_associate(tsd_tsdn(tsd), tcache, - ret); + tcache_arena_associate(tsd_tsdn(tsd), + tcache_slow, tcache, ret); } } } @@ -75,13 +100,4 @@ arena_is_auto(arena_t *arena) { return (arena_ind_get(arena) < manual_arena_base); } -JEMALLOC_ALWAYS_INLINE extent_t * -iealloc(tsdn_t *tsdn, const void *ptr) { - rtree_ctx_t rtree_ctx_fallback; - rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); - - return rtree_extent_read(tsdn, &extents_rtree, rtree_ctx, - (uintptr_t)ptr, true); -} - #endif /* JEMALLOC_INTERNAL_INLINES_B_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h index cdb10eb21f73ed..b0868b7d616b3d 100644 --- a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h +++ b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h @@ -3,7 +3,9 @@ #include "jemalloc/internal/hook.h" #include "jemalloc/internal/jemalloc_internal_types.h" +#include "jemalloc/internal/log.h" #include "jemalloc/internal/sz.h" +#include "jemalloc/internal/thread_event.h" #include "jemalloc/internal/witness.h" /* @@ -101,8 +103,8 @@ ivsalloc(tsdn_t *tsdn, const void *ptr) { } JEMALLOC_ALWAYS_INLINE void -idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, alloc_ctx_t *alloc_ctx, - bool is_internal, bool slow_path) { +idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, + emap_alloc_ctx_t *alloc_ctx, bool is_internal, bool slow_path) { assert(ptr != NULL); assert(!is_internal || tcache == NULL); assert(!is_internal || arena_is_auto(iaalloc(tsdn, ptr))); @@ -125,7 +127,7 @@ idalloc(tsd_t *tsd, void *ptr) { JEMALLOC_ALWAYS_INLINE void isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, - alloc_ctx_t *alloc_ctx, bool slow_path) { + emap_alloc_ctx_t *alloc_ctx, bool slow_path) { witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); arena_sdalloc(tsdn, ptr, size, tcache, alloc_ctx, slow_path); @@ -219,4 +221,120 @@ ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra, newsize); } +JEMALLOC_ALWAYS_INLINE void +fastpath_success_finish(tsd_t *tsd, uint64_t allocated_after, + cache_bin_t *bin, void *ret) { + thread_allocated_set(tsd, allocated_after); + if (config_stats) { + bin->tstats.nrequests++; + } + + LOG("core.malloc.exit", "result: %p", ret); +} + +JEMALLOC_ALWAYS_INLINE bool +malloc_initialized(void) { + return (malloc_init_state == malloc_init_initialized); +} + +/* + * malloc() fastpath. Included here so that we can inline it into operator new; + * function call overhead there is non-negligible as a fraction of total CPU in + * allocation-heavy C++ programs. We take the fallback alloc to allow malloc + * (which can return NULL) to differ in its behavior from operator new (which + * can't). It matches the signature of malloc / operator new so that we can + * tail-call the fallback allocator, allowing us to avoid setting up the call + * frame in the common case. + * + * Fastpath assumes size <= SC_LOOKUP_MAXCLASS, and that we hit + * tcache. If either of these is false, we tail-call to the slowpath, + * malloc_default(). Tail-calling is used to avoid any caller-saved + * registers. + * + * fastpath supports ticker and profiling, both of which will also + * tail-call to the slowpath if they fire. + */ +JEMALLOC_ALWAYS_INLINE void * +imalloc_fastpath(size_t size, void *(fallback_alloc)(size_t)) { + LOG("core.malloc.entry", "size: %zu", size); + if (tsd_get_allocates() && unlikely(!malloc_initialized())) { + return fallback_alloc(size); + } + + tsd_t *tsd = tsd_get(false); + if (unlikely((size > SC_LOOKUP_MAXCLASS) || tsd == NULL)) { + return fallback_alloc(size); + } + /* + * The code below till the branch checking the next_event threshold may + * execute before malloc_init(), in which case the threshold is 0 to + * trigger slow path and initialization. + * + * Note that when uninitialized, only the fast-path variants of the sz / + * tsd facilities may be called. + */ + szind_t ind; + /* + * The thread_allocated counter in tsd serves as a general purpose + * accumulator for bytes of allocation to trigger different types of + * events. usize is always needed to advance thread_allocated, though + * it's not always needed in the core allocation logic. + */ + size_t usize; + sz_size2index_usize_fastpath(size, &ind, &usize); + /* Fast path relies on size being a bin. */ + assert(ind < SC_NBINS); + assert((SC_LOOKUP_MAXCLASS < SC_SMALL_MAXCLASS) && + (size <= SC_SMALL_MAXCLASS)); + + uint64_t allocated, threshold; + te_malloc_fastpath_ctx(tsd, &allocated, &threshold); + uint64_t allocated_after = allocated + usize; + /* + * The ind and usize might be uninitialized (or partially) before + * malloc_init(). The assertions check for: 1) full correctness (usize + * & ind) when initialized; and 2) guaranteed slow-path (threshold == 0) + * when !initialized. + */ + if (!malloc_initialized()) { + assert(threshold == 0); + } else { + assert(ind == sz_size2index(size)); + assert(usize > 0 && usize == sz_index2size(ind)); + } + /* + * Check for events and tsd non-nominal (fast_threshold will be set to + * 0) in a single branch. + */ + if (unlikely(allocated_after >= threshold)) { + return fallback_alloc(size); + } + assert(tsd_fast(tsd)); + + tcache_t *tcache = tsd_tcachep_get(tsd); + assert(tcache == tcache_get(tsd)); + cache_bin_t *bin = &tcache->bins[ind]; + bool tcache_success; + void *ret; + + /* + * We split up the code this way so that redundant low-water + * computation doesn't happen on the (more common) case in which we + * don't touch the low water mark. The compiler won't do this + * duplication on its own. + */ + ret = cache_bin_alloc_easy(bin, &tcache_success); + if (tcache_success) { + fastpath_success_finish(tsd, allocated_after, bin, ret); + return ret; + } + ret = cache_bin_alloc(bin, &tcache_success); + if (tcache_success) { + fastpath_success_finish(tsd, allocated_after, bin, ret); + return ret; + } + + return fallback_alloc(size); +} + #endif /* JEMALLOC_INTERNAL_INLINES_C_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h index d8ea06f6d069d1..e97b5f90730cff 100644 --- a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h +++ b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h @@ -4,7 +4,11 @@ #ifdef JEMALLOC_DEBUG # define JEMALLOC_ALWAYS_INLINE static inline #else -# define JEMALLOC_ALWAYS_INLINE JEMALLOC_ATTR(always_inline) static inline +# ifdef _MSC_VER +# define JEMALLOC_ALWAYS_INLINE static __forceinline +# else +# define JEMALLOC_ALWAYS_INLINE JEMALLOC_ATTR(always_inline) static inline +# endif #endif #ifdef _MSC_VER # define inline _inline @@ -40,13 +44,6 @@ #define JEMALLOC_VA_ARGS_HEAD(head, ...) head #define JEMALLOC_VA_ARGS_TAIL(head, ...) __VA_ARGS__ -#if (defined(__GNUC__) || defined(__GNUG__)) && !defined(__clang__) \ - && defined(JEMALLOC_HAVE_ATTR) && (__GNUC__ >= 7) -#define JEMALLOC_FALLTHROUGH JEMALLOC_ATTR(fallthrough); -#else -#define JEMALLOC_FALLTHROUGH /* falls through */ -#endif - /* Diagnostic suppression macros */ #if defined(_MSC_VER) && !defined(__clang__) # define JEMALLOC_DIAGNOSTIC_PUSH __pragma(warning(push)) diff --git a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_types.h b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_types.h index e296c5a7e847b7..62c2b59c71c20a 100644 --- a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_types.h +++ b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_types.h @@ -3,15 +3,31 @@ #include "jemalloc/internal/quantum.h" -/* Page size index type. */ -typedef unsigned pszind_t; - -/* Size class index type. */ -typedef unsigned szind_t; - /* Processor / core id type. */ typedef int malloc_cpuid_t; +/* When realloc(non-null-ptr, 0) is called, what happens? */ +enum zero_realloc_action_e { + /* Realloc(ptr, 0) is free(ptr); return malloc(0); */ + zero_realloc_action_alloc = 0, + /* Realloc(ptr, 0) is free(ptr); */ + zero_realloc_action_free = 1, + /* Realloc(ptr, 0) aborts. */ + zero_realloc_action_abort = 2 +}; +typedef enum zero_realloc_action_e zero_realloc_action_t; + +/* Signature of write callback. */ +typedef void (write_cb_t)(void *, const char *); + +enum malloc_init_e { + malloc_init_uninitialized = 3, + malloc_init_a0_initialized = 2, + malloc_init_recursible = 1, + malloc_init_initialized = 0 /* Common case --> jnz. */ +}; +typedef enum malloc_init_e malloc_init_t; + /* * Flags bits: * diff --git a/contrib/jemalloc/include/jemalloc/internal/jemalloc_preamble.h b/contrib/jemalloc/include/jemalloc/internal/jemalloc_preamble.h index d1dcc50d5a3b18..33d509e4591ee0 100644 --- a/contrib/jemalloc/include/jemalloc/internal/jemalloc_preamble.h +++ b/contrib/jemalloc/include/jemalloc/internal/jemalloc_preamble.h @@ -4,8 +4,14 @@ #include "jemalloc_internal_defs.h" #include "jemalloc/internal/jemalloc_internal_decls.h" -#ifdef JEMALLOC_UTRACE +#if defined(JEMALLOC_UTRACE) || defined(JEMALLOC_UTRACE_LABEL) #include +# if defined(JEMALLOC_UTRACE) +# define UTRACE_CALL(p, l) utrace(p, l) +# else +# define UTRACE_CALL(p, l) utrace("jemalloc_process", p, l) +# define JEMALLOC_UTRACE +# endif #endif #include "un-namespace.h" @@ -177,6 +183,35 @@ static const bool config_opt_safety_checks = #endif ; +/* + * Extra debugging of sized deallocations too onerous to be included in the + * general safety checks. + */ +static const bool config_opt_size_checks = +#if defined(JEMALLOC_OPT_SIZE_CHECKS) || defined(JEMALLOC_DEBUG) + true +#else + false +#endif + ; + +static const bool config_uaf_detection = +#if defined(JEMALLOC_UAF_DETECTION) || defined(JEMALLOC_DEBUG) + true +#else + false +#endif + ; + +/* Whether or not the C++ extensions are enabled. */ +static const bool config_enable_cxx = +#ifdef JEMALLOC_ENABLE_CXX + true +#else + false +#endif +; + #if defined(_WIN32) || defined(JEMALLOC_HAVE_SCHED_GETCPU) /* Currently percpu_arena depends on sched_getcpu. */ #define JEMALLOC_PERCPU_ARENA @@ -206,5 +241,20 @@ static const bool have_background_thread = false #endif ; +static const bool config_high_res_timer = +#ifdef JEMALLOC_HAVE_CLOCK_REALTIME + true +#else + false +#endif + ; + +static const bool have_memcntl = +#ifdef JEMALLOC_HAVE_MEMCNTL + true +#else + false +#endif + ; #endif /* JEMALLOC_PREAMBLE_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/large_externs.h b/contrib/jemalloc/include/jemalloc/internal/large_externs.h index a05019e8a54207..8e09122dfb7b82 100644 --- a/contrib/jemalloc/include/jemalloc/internal/large_externs.h +++ b/contrib/jemalloc/include/jemalloc/internal/large_externs.h @@ -6,27 +6,19 @@ void *large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero); void *large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero); -bool large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min, +bool large_ralloc_no_move(tsdn_t *tsdn, edata_t *edata, size_t usize_min, size_t usize_max, bool zero); void *large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize, size_t alignment, bool zero, tcache_t *tcache, hook_ralloc_args_t *hook_args); -typedef void (large_dalloc_junk_t)(void *, size_t); -extern large_dalloc_junk_t *JET_MUTABLE large_dalloc_junk; - -typedef void (large_dalloc_maybe_junk_t)(void *, size_t); -extern large_dalloc_maybe_junk_t *JET_MUTABLE large_dalloc_maybe_junk; - -void large_dalloc_prep_junked_locked(tsdn_t *tsdn, extent_t *extent); -void large_dalloc_finish(tsdn_t *tsdn, extent_t *extent); -void large_dalloc(tsdn_t *tsdn, extent_t *extent); -size_t large_salloc(tsdn_t *tsdn, const extent_t *extent); -prof_tctx_t *large_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent); -void large_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx); -void large_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent); - -nstime_t large_prof_alloc_time_get(const extent_t *extent); -void large_prof_alloc_time_set(extent_t *extent, nstime_t time); +void large_dalloc_prep_locked(tsdn_t *tsdn, edata_t *edata); +void large_dalloc_finish(tsdn_t *tsdn, edata_t *edata); +void large_dalloc(tsdn_t *tsdn, edata_t *edata); +size_t large_salloc(tsdn_t *tsdn, const edata_t *edata); +void large_prof_info_get(tsd_t *tsd, edata_t *edata, prof_info_t *prof_info, + bool reset_recent); +void large_prof_tctx_reset(edata_t *edata); +void large_prof_info_set(edata_t *edata, prof_tctx_t *tctx, size_t size); #endif /* JEMALLOC_INTERNAL_LARGE_EXTERNS_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/lockedint.h b/contrib/jemalloc/include/jemalloc/internal/lockedint.h new file mode 100644 index 00000000000000..d020ebec1c4bba --- /dev/null +++ b/contrib/jemalloc/include/jemalloc/internal/lockedint.h @@ -0,0 +1,204 @@ +#ifndef JEMALLOC_INTERNAL_LOCKEDINT_H +#define JEMALLOC_INTERNAL_LOCKEDINT_H + +/* + * In those architectures that support 64-bit atomics, we use atomic updates for + * our 64-bit values. Otherwise, we use a plain uint64_t and synchronize + * externally. + */ + +typedef struct locked_u64_s locked_u64_t; +#ifdef JEMALLOC_ATOMIC_U64 +struct locked_u64_s { + atomic_u64_t val; +}; +#else +/* Must hold the associated mutex. */ +struct locked_u64_s { + uint64_t val; +}; +#endif + +typedef struct locked_zu_s locked_zu_t; +struct locked_zu_s { + atomic_zu_t val; +}; + +#ifndef JEMALLOC_ATOMIC_U64 +# define LOCKEDINT_MTX_DECLARE(name) malloc_mutex_t name; +# define LOCKEDINT_MTX_INIT(mu, name, rank, rank_mode) \ + malloc_mutex_init(&(mu), name, rank, rank_mode) +# define LOCKEDINT_MTX(mtx) (&(mtx)) +# define LOCKEDINT_MTX_LOCK(tsdn, mu) malloc_mutex_lock(tsdn, &(mu)) +# define LOCKEDINT_MTX_UNLOCK(tsdn, mu) malloc_mutex_unlock(tsdn, &(mu)) +# define LOCKEDINT_MTX_PREFORK(tsdn, mu) malloc_mutex_prefork(tsdn, &(mu)) +# define LOCKEDINT_MTX_POSTFORK_PARENT(tsdn, mu) \ + malloc_mutex_postfork_parent(tsdn, &(mu)) +# define LOCKEDINT_MTX_POSTFORK_CHILD(tsdn, mu) \ + malloc_mutex_postfork_child(tsdn, &(mu)) +#else +# define LOCKEDINT_MTX_DECLARE(name) +# define LOCKEDINT_MTX(mtx) NULL +# define LOCKEDINT_MTX_INIT(mu, name, rank, rank_mode) false +# define LOCKEDINT_MTX_LOCK(tsdn, mu) +# define LOCKEDINT_MTX_UNLOCK(tsdn, mu) +# define LOCKEDINT_MTX_PREFORK(tsdn, mu) +# define LOCKEDINT_MTX_POSTFORK_PARENT(tsdn, mu) +# define LOCKEDINT_MTX_POSTFORK_CHILD(tsdn, mu) +#endif + +#ifdef JEMALLOC_ATOMIC_U64 +# define LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx) assert((mtx) == NULL) +#else +# define LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx) \ + malloc_mutex_assert_owner(tsdn, (mtx)) +#endif + +static inline uint64_t +locked_read_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p) { + LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx); +#ifdef JEMALLOC_ATOMIC_U64 + return atomic_load_u64(&p->val, ATOMIC_RELAXED); +#else + return p->val; +#endif +} + +static inline void +locked_inc_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p, + uint64_t x) { + LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx); +#ifdef JEMALLOC_ATOMIC_U64 + atomic_fetch_add_u64(&p->val, x, ATOMIC_RELAXED); +#else + p->val += x; +#endif +} + +static inline void +locked_dec_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p, + uint64_t x) { + LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx); +#ifdef JEMALLOC_ATOMIC_U64 + uint64_t r = atomic_fetch_sub_u64(&p->val, x, ATOMIC_RELAXED); + assert(r - x <= r); +#else + p->val -= x; + assert(p->val + x >= p->val); +#endif +} + +/* Increment and take modulus. Returns whether the modulo made any change. */ +static inline bool +locked_inc_mod_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p, + const uint64_t x, const uint64_t modulus) { + LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx); + uint64_t before, after; + bool overflow; +#ifdef JEMALLOC_ATOMIC_U64 + before = atomic_load_u64(&p->val, ATOMIC_RELAXED); + do { + after = before + x; + assert(after >= before); + overflow = (after >= modulus); + if (overflow) { + after %= modulus; + } + } while (!atomic_compare_exchange_weak_u64(&p->val, &before, after, + ATOMIC_RELAXED, ATOMIC_RELAXED)); +#else + before = p->val; + after = before + x; + overflow = (after >= modulus); + if (overflow) { + after %= modulus; + } + p->val = after; +#endif + return overflow; +} + +/* + * Non-atomically sets *dst += src. *dst needs external synchronization. + * This lets us avoid the cost of a fetch_add when its unnecessary (note that + * the types here are atomic). + */ +static inline void +locked_inc_u64_unsynchronized(locked_u64_t *dst, uint64_t src) { +#ifdef JEMALLOC_ATOMIC_U64 + uint64_t cur_dst = atomic_load_u64(&dst->val, ATOMIC_RELAXED); + atomic_store_u64(&dst->val, src + cur_dst, ATOMIC_RELAXED); +#else + dst->val += src; +#endif +} + +static inline uint64_t +locked_read_u64_unsynchronized(locked_u64_t *p) { +#ifdef JEMALLOC_ATOMIC_U64 + return atomic_load_u64(&p->val, ATOMIC_RELAXED); +#else + return p->val; +#endif +} + +static inline void +locked_init_u64_unsynchronized(locked_u64_t *p, uint64_t x) { +#ifdef JEMALLOC_ATOMIC_U64 + atomic_store_u64(&p->val, x, ATOMIC_RELAXED); +#else + p->val = x; +#endif +} + +static inline size_t +locked_read_zu(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_zu_t *p) { + LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx); +#ifdef JEMALLOC_ATOMIC_U64 + return atomic_load_zu(&p->val, ATOMIC_RELAXED); +#else + return atomic_load_zu(&p->val, ATOMIC_RELAXED); +#endif +} + +static inline void +locked_inc_zu(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_zu_t *p, + size_t x) { + LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx); +#ifdef JEMALLOC_ATOMIC_U64 + atomic_fetch_add_zu(&p->val, x, ATOMIC_RELAXED); +#else + size_t cur = atomic_load_zu(&p->val, ATOMIC_RELAXED); + atomic_store_zu(&p->val, cur + x, ATOMIC_RELAXED); +#endif +} + +static inline void +locked_dec_zu(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_zu_t *p, + size_t x) { + LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx); +#ifdef JEMALLOC_ATOMIC_U64 + size_t r = atomic_fetch_sub_zu(&p->val, x, ATOMIC_RELAXED); + assert(r - x <= r); +#else + size_t cur = atomic_load_zu(&p->val, ATOMIC_RELAXED); + atomic_store_zu(&p->val, cur - x, ATOMIC_RELAXED); +#endif +} + +/* Like the _u64 variant, needs an externally synchronized *dst. */ +static inline void +locked_inc_zu_unsynchronized(locked_zu_t *dst, size_t src) { + size_t cur_dst = atomic_load_zu(&dst->val, ATOMIC_RELAXED); + atomic_store_zu(&dst->val, src + cur_dst, ATOMIC_RELAXED); +} + +/* + * Unlike the _u64 variant, this is safe to call unconditionally. + */ +static inline size_t +locked_read_atomic_zu(locked_zu_t *p) { + return atomic_load_zu(&p->val, ATOMIC_RELAXED); +} + +#endif /* JEMALLOC_INTERNAL_LOCKEDINT_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/malloc_io.h b/contrib/jemalloc/include/jemalloc/internal/malloc_io.h index 1d1a414e0f0b56..a375bdae084aa0 100644 --- a/contrib/jemalloc/include/jemalloc/internal/malloc_io.h +++ b/contrib/jemalloc/include/jemalloc/internal/malloc_io.h @@ -1,6 +1,8 @@ #ifndef JEMALLOC_INTERNAL_MALLOC_IO_H #define JEMALLOC_INTERNAL_MALLOC_IO_H +#include "jemalloc/internal/jemalloc_internal_types.h" + #ifdef _WIN32 # ifdef _WIN64 # define FMT64_PREFIX "ll" @@ -40,6 +42,7 @@ */ #define MALLOC_PRINTF_BUFSIZE 4096 +write_cb_t wrtmessage; int buferror(int err, char *buf, size_t buflen); uintmax_t malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base); @@ -57,10 +60,10 @@ size_t malloc_snprintf(char *str, size_t size, const char *format, ...) * The caller can set write_cb to null to choose to print with the * je_malloc_message hook. */ -void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque, - const char *format, va_list ap); -void malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque, - const char *format, ...) JEMALLOC_FORMAT_PRINTF(3, 4); +void malloc_vcprintf(write_cb_t *write_cb, void *cbopaque, const char *format, + va_list ap); +void malloc_cprintf(write_cb_t *write_cb, void *cbopaque, const char *format, + ...) JEMALLOC_FORMAT_PRINTF(3, 4); void malloc_printf(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2); static inline ssize_t diff --git a/contrib/jemalloc/include/jemalloc/internal/mpsc_queue.h b/contrib/jemalloc/include/jemalloc/internal/mpsc_queue.h new file mode 100644 index 00000000000000..316ea9b16eddb7 --- /dev/null +++ b/contrib/jemalloc/include/jemalloc/internal/mpsc_queue.h @@ -0,0 +1,134 @@ +#ifndef JEMALLOC_INTERNAL_MPSC_QUEUE_H +#define JEMALLOC_INTERNAL_MPSC_QUEUE_H + +#include "jemalloc/internal/atomic.h" + +/* + * A concurrent implementation of a multi-producer, single-consumer queue. It + * supports three concurrent operations: + * - Push + * - Push batch + * - Pop batch + * + * These operations are all lock-free. + * + * The implementation is the simple two-stack queue built on a Treiber stack. + * It's not terribly efficient, but this isn't expected to go into anywhere with + * hot code. In fact, we don't really even need queue semantics in any + * anticipated use cases; we could get away with just the stack. But this way + * lets us frame the API in terms of the existing list types, which is a nice + * convenience. We can save on cache misses by introducing our own (parallel) + * single-linked list type here, and dropping FIFO semantics, if we need this to + * get faster. Since we're currently providing queue semantics though, we use + * the prev field in the link rather than the next field for Treiber-stack + * linkage, so that we can preserve order for bash-pushed lists (recall that the + * two-stack tricks reverses orders in the lock-free first stack). + */ + +#define mpsc_queue(a_type) \ +struct { \ + atomic_p_t tail; \ +} + +#define mpsc_queue_proto(a_attr, a_prefix, a_queue_type, a_type, \ + a_list_type) \ +/* Initialize a queue. */ \ +a_attr void \ +a_prefix##new(a_queue_type *queue); \ +/* Insert all items in src into the queue, clearing src. */ \ +a_attr void \ +a_prefix##push_batch(a_queue_type *queue, a_list_type *src); \ +/* Insert node into the queue. */ \ +a_attr void \ +a_prefix##push(a_queue_type *queue, a_type *node); \ +/* \ + * Pop all items in the queue into the list at dst. dst should already \ + * be initialized (and may contain existing items, which then remain \ + * in dst). \ + */ \ +a_attr void \ +a_prefix##pop_batch(a_queue_type *queue, a_list_type *dst); + +#define mpsc_queue_gen(a_attr, a_prefix, a_queue_type, a_type, \ + a_list_type, a_link) \ +a_attr void \ +a_prefix##new(a_queue_type *queue) { \ + atomic_store_p(&queue->tail, NULL, ATOMIC_RELAXED); \ +} \ +a_attr void \ +a_prefix##push_batch(a_queue_type *queue, a_list_type *src) { \ + /* \ + * Reuse the ql list next field as the Treiber stack next \ + * field. \ + */ \ + a_type *first = ql_first(src); \ + a_type *last = ql_last(src, a_link); \ + void* cur_tail = atomic_load_p(&queue->tail, ATOMIC_RELAXED); \ + do { \ + /* \ + * Note that this breaks the queue ring structure; \ + * it's not a ring any more! \ + */ \ + first->a_link.qre_prev = cur_tail; \ + /* \ + * Note: the upcoming CAS doesn't need an atomic; every \ + * push only needs to synchronize with the next pop, \ + * which we get from the release sequence rules. \ + */ \ + } while (!atomic_compare_exchange_weak_p(&queue->tail, \ + &cur_tail, last, ATOMIC_RELEASE, ATOMIC_RELAXED)); \ + ql_new(src); \ +} \ +a_attr void \ +a_prefix##push(a_queue_type *queue, a_type *node) { \ + ql_elm_new(node, a_link); \ + a_list_type list; \ + ql_new(&list); \ + ql_head_insert(&list, node, a_link); \ + a_prefix##push_batch(queue, &list); \ +} \ +a_attr void \ +a_prefix##pop_batch(a_queue_type *queue, a_list_type *dst) { \ + a_type *tail = atomic_load_p(&queue->tail, ATOMIC_RELAXED); \ + if (tail == NULL) { \ + /* \ + * In the common special case where there are no \ + * pending elements, bail early without a costly RMW. \ + */ \ + return; \ + } \ + tail = atomic_exchange_p(&queue->tail, NULL, ATOMIC_ACQUIRE); \ + /* \ + * It's a single-consumer queue, so if cur started non-NULL, \ + * it'd better stay non-NULL. \ + */ \ + assert(tail != NULL); \ + /* \ + * We iterate through the stack and both fix up the link \ + * structure (stack insertion broke the list requirement that \ + * the list be circularly linked). It's just as efficient at \ + * this point to make the queue a "real" queue, so do that as \ + * well. \ + * If this ever gets to be a hot spot, we can omit this fixup \ + * and make the queue a bag (i.e. not necessarily ordered), but \ + * that would mean jettisoning the existing list API as the \ + * batch pushing/popping interface. \ + */ \ + a_list_type reversed; \ + ql_new(&reversed); \ + while (tail != NULL) { \ + /* \ + * Pop an item off the stack, prepend it onto the list \ + * (reversing the order). Recall that we use the \ + * list prev field as the Treiber stack next field to \ + * preserve order of batch-pushed items when reversed. \ + */ \ + a_type *next = tail->a_link.qre_prev; \ + ql_elm_new(tail, a_link); \ + ql_head_insert(&reversed, tail, a_link); \ + tail = next; \ + } \ + ql_concat(dst, &reversed, a_link); \ +} + +#endif /* JEMALLOC_INTERNAL_MPSC_QUEUE_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/mutex.h b/contrib/jemalloc/include/jemalloc/internal/mutex.h index 94af161836492c..8468e7bcb864f6 100644 --- a/contrib/jemalloc/include/jemalloc/internal/mutex.h +++ b/contrib/jemalloc/include/jemalloc/internal/mutex.h @@ -6,6 +6,8 @@ #include "jemalloc/internal/tsd.h" #include "jemalloc/internal/witness.h" +extern int64_t opt_mutex_max_spin; + typedef enum { /* Can only acquire one mutex of a given witness rank at a time. */ malloc_mutex_rank_exclusive, @@ -43,7 +45,7 @@ struct malloc_mutex_s { #else pthread_mutex_t lock; #endif - /* + /* * Hint flag to avoid exclusive cache line contention * during spin waiting */ @@ -67,12 +69,6 @@ struct malloc_mutex_s { #endif }; -/* - * Based on benchmark results, a fixed spin with this amount of retries works - * well for our critical sections. - */ -#define MALLOC_MUTEX_MAX_SPIN 250 - #ifdef _WIN32 # if _WIN32_WINNT >= 0x0600 # define MALLOC_MUTEX_LOCK(m) AcquireSRWLockExclusive(&(m)->lock) @@ -243,22 +239,25 @@ malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) { witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness); } -/* Copy the prof data from mutex for processing. */ static inline void -malloc_mutex_prof_read(tsdn_t *tsdn, mutex_prof_data_t *data, - malloc_mutex_t *mutex) { - mutex_prof_data_t *source = &mutex->prof_data; - /* Can only read holding the mutex. */ - malloc_mutex_assert_owner(tsdn, mutex); - +malloc_mutex_prof_copy(mutex_prof_data_t *dst, mutex_prof_data_t *source) { /* * Not *really* allowed (we shouldn't be doing non-atomic loads of * atomic data), but the mutex protection makes this safe, and writing * a member-for-member copy is tedious for this situation. */ - *data = *source; + *dst = *source; /* n_wait_thds is not reported (modified w/o locking). */ - atomic_store_u32(&data->n_waiting_thds, 0, ATOMIC_RELAXED); + atomic_store_u32(&dst->n_waiting_thds, 0, ATOMIC_RELAXED); +} + +/* Copy the prof data from mutex for processing. */ +static inline void +malloc_mutex_prof_read(tsdn_t *tsdn, mutex_prof_data_t *data, + malloc_mutex_t *mutex) { + /* Can only read holding the mutex. */ + malloc_mutex_assert_owner(tsdn, mutex); + malloc_mutex_prof_copy(data, &mutex->prof_data); } static inline void @@ -283,4 +282,36 @@ malloc_mutex_prof_accum(tsdn_t *tsdn, mutex_prof_data_t *data, data->n_lock_ops += source->n_lock_ops; } +/* Compare the prof data and update to the maximum. */ +static inline void +malloc_mutex_prof_max_update(tsdn_t *tsdn, mutex_prof_data_t *data, + malloc_mutex_t *mutex) { + mutex_prof_data_t *source = &mutex->prof_data; + /* Can only read holding the mutex. */ + malloc_mutex_assert_owner(tsdn, mutex); + + if (nstime_compare(&source->tot_wait_time, &data->tot_wait_time) > 0) { + nstime_copy(&data->tot_wait_time, &source->tot_wait_time); + } + if (nstime_compare(&source->max_wait_time, &data->max_wait_time) > 0) { + nstime_copy(&data->max_wait_time, &source->max_wait_time); + } + if (source->n_wait_times > data->n_wait_times) { + data->n_wait_times = source->n_wait_times; + } + if (source->n_spin_acquired > data->n_spin_acquired) { + data->n_spin_acquired = source->n_spin_acquired; + } + if (source->max_n_thds > data->max_n_thds) { + data->max_n_thds = source->max_n_thds; + } + if (source->n_owner_switches > data->n_owner_switches) { + data->n_owner_switches = source->n_owner_switches; + } + if (source->n_lock_ops > data->n_lock_ops) { + data->n_lock_ops = source->n_lock_ops; + } + /* n_wait_thds is not reported. */ +} + #endif /* JEMALLOC_INTERNAL_MUTEX_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/mutex_pool.h b/contrib/jemalloc/include/jemalloc/internal/mutex_pool.h deleted file mode 100644 index 726cece90bc721..00000000000000 --- a/contrib/jemalloc/include/jemalloc/internal/mutex_pool.h +++ /dev/null @@ -1,94 +0,0 @@ -#ifndef JEMALLOC_INTERNAL_MUTEX_POOL_H -#define JEMALLOC_INTERNAL_MUTEX_POOL_H - -#include "jemalloc/internal/hash.h" -#include "jemalloc/internal/mutex.h" -#include "jemalloc/internal/witness.h" - -/* We do mod reductions by this value, so it should be kept a power of 2. */ -#define MUTEX_POOL_SIZE 256 - -typedef struct mutex_pool_s mutex_pool_t; -struct mutex_pool_s { - malloc_mutex_t mutexes[MUTEX_POOL_SIZE]; -}; - -bool mutex_pool_init(mutex_pool_t *pool, const char *name, witness_rank_t rank); - -/* Internal helper - not meant to be called outside this module. */ -static inline malloc_mutex_t * -mutex_pool_mutex(mutex_pool_t *pool, uintptr_t key) { - size_t hash_result[2]; - hash(&key, sizeof(key), 0xd50dcc1b, hash_result); - return &pool->mutexes[hash_result[0] % MUTEX_POOL_SIZE]; -} - -static inline void -mutex_pool_assert_not_held(tsdn_t *tsdn, mutex_pool_t *pool) { - for (int i = 0; i < MUTEX_POOL_SIZE; i++) { - malloc_mutex_assert_not_owner(tsdn, &pool->mutexes[i]); - } -} - -/* - * Note that a mutex pool doesn't work exactly the way an embdedded mutex would. - * You're not allowed to acquire mutexes in the pool one at a time. You have to - * acquire all the mutexes you'll need in a single function call, and then - * release them all in a single function call. - */ - -static inline void -mutex_pool_lock(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key) { - mutex_pool_assert_not_held(tsdn, pool); - - malloc_mutex_t *mutex = mutex_pool_mutex(pool, key); - malloc_mutex_lock(tsdn, mutex); -} - -static inline void -mutex_pool_unlock(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key) { - malloc_mutex_t *mutex = mutex_pool_mutex(pool, key); - malloc_mutex_unlock(tsdn, mutex); - - mutex_pool_assert_not_held(tsdn, pool); -} - -static inline void -mutex_pool_lock2(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key1, - uintptr_t key2) { - mutex_pool_assert_not_held(tsdn, pool); - - malloc_mutex_t *mutex1 = mutex_pool_mutex(pool, key1); - malloc_mutex_t *mutex2 = mutex_pool_mutex(pool, key2); - if ((uintptr_t)mutex1 < (uintptr_t)mutex2) { - malloc_mutex_lock(tsdn, mutex1); - malloc_mutex_lock(tsdn, mutex2); - } else if ((uintptr_t)mutex1 == (uintptr_t)mutex2) { - malloc_mutex_lock(tsdn, mutex1); - } else { - malloc_mutex_lock(tsdn, mutex2); - malloc_mutex_lock(tsdn, mutex1); - } -} - -static inline void -mutex_pool_unlock2(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key1, - uintptr_t key2) { - malloc_mutex_t *mutex1 = mutex_pool_mutex(pool, key1); - malloc_mutex_t *mutex2 = mutex_pool_mutex(pool, key2); - if (mutex1 == mutex2) { - malloc_mutex_unlock(tsdn, mutex1); - } else { - malloc_mutex_unlock(tsdn, mutex1); - malloc_mutex_unlock(tsdn, mutex2); - } - - mutex_pool_assert_not_held(tsdn, pool); -} - -static inline void -mutex_pool_assert_owner(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key) { - malloc_mutex_assert_owner(tsdn, mutex_pool_mutex(pool, key)); -} - -#endif /* JEMALLOC_INTERNAL_MUTEX_POOL_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/mutex_prof.h b/contrib/jemalloc/include/jemalloc/internal/mutex_prof.h index 2cb8fb0cbf7ba1..4a526a5aeb86bb 100644 --- a/contrib/jemalloc/include/jemalloc/internal/mutex_prof.h +++ b/contrib/jemalloc/include/jemalloc/internal/mutex_prof.h @@ -7,8 +7,14 @@ #define MUTEX_PROF_GLOBAL_MUTEXES \ OP(background_thread) \ + OP(max_per_bg_thd) \ OP(ctl) \ - OP(prof) + OP(prof) \ + OP(prof_thds_data) \ + OP(prof_dump) \ + OP(prof_recent_alloc) \ + OP(prof_recent_dump) \ + OP(prof_stats) typedef enum { #define OP(mtx) global_prof_mutex_##mtx, @@ -26,7 +32,10 @@ typedef enum { OP(decay_dirty) \ OP(decay_muzzy) \ OP(base) \ - OP(tcache_list) + OP(tcache_list) \ + OP(hpa_shard) \ + OP(hpa_shard_grow) \ + OP(hpa_sec) typedef enum { #define OP(mtx) arena_prof_mutex_##mtx, diff --git a/contrib/jemalloc/include/jemalloc/internal/nstime.h b/contrib/jemalloc/include/jemalloc/internal/nstime.h index 17c177c7f4b38d..486e5ccacc73ed 100644 --- a/contrib/jemalloc/include/jemalloc/internal/nstime.h +++ b/contrib/jemalloc/include/jemalloc/internal/nstime.h @@ -3,12 +3,23 @@ /* Maximum supported number of seconds (~584 years). */ #define NSTIME_SEC_MAX KQU(18446744072) -#define NSTIME_ZERO_INITIALIZER {0} + +#define NSTIME_MAGIC ((uint32_t)0xb8a9ce37) +#ifdef JEMALLOC_DEBUG +# define NSTIME_ZERO_INITIALIZER {0, NSTIME_MAGIC} +#else +# define NSTIME_ZERO_INITIALIZER {0} +#endif typedef struct { uint64_t ns; +#ifdef JEMALLOC_DEBUG + uint32_t magic; /* Tracks if initialized. */ +#endif } nstime_t; +static const nstime_t nstime_zero = NSTIME_ZERO_INITIALIZER; + void nstime_init(nstime_t *time, uint64_t ns); void nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec); uint64_t nstime_ns(const nstime_t *time); @@ -24,11 +35,39 @@ void nstime_isubtract(nstime_t *time, uint64_t subtrahend); void nstime_imultiply(nstime_t *time, uint64_t multiplier); void nstime_idivide(nstime_t *time, uint64_t divisor); uint64_t nstime_divide(const nstime_t *time, const nstime_t *divisor); +uint64_t nstime_ns_since(const nstime_t *past); typedef bool (nstime_monotonic_t)(void); extern nstime_monotonic_t *JET_MUTABLE nstime_monotonic; -typedef bool (nstime_update_t)(nstime_t *); +typedef void (nstime_update_t)(nstime_t *); extern nstime_update_t *JET_MUTABLE nstime_update; +typedef void (nstime_prof_update_t)(nstime_t *); +extern nstime_prof_update_t *JET_MUTABLE nstime_prof_update; + +void nstime_init_update(nstime_t *time); +void nstime_prof_init_update(nstime_t *time); + +enum prof_time_res_e { + prof_time_res_default = 0, + prof_time_res_high = 1 +}; +typedef enum prof_time_res_e prof_time_res_t; + +extern prof_time_res_t opt_prof_time_res; +extern const char *prof_time_res_mode_names[]; + +JEMALLOC_ALWAYS_INLINE void +nstime_init_zero(nstime_t *time) { + nstime_copy(time, &nstime_zero); +} + +JEMALLOC_ALWAYS_INLINE bool +nstime_equals_zero(nstime_t *time) { + int diff = nstime_compare(time, &nstime_zero); + assert(diff >= 0); + return diff == 0; +} + #endif /* JEMALLOC_INTERNAL_NSTIME_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/pa.h b/contrib/jemalloc/include/jemalloc/internal/pa.h new file mode 100644 index 00000000000000..4748a05b691e1e --- /dev/null +++ b/contrib/jemalloc/include/jemalloc/internal/pa.h @@ -0,0 +1,243 @@ +#ifndef JEMALLOC_INTERNAL_PA_H +#define JEMALLOC_INTERNAL_PA_H + +#include "jemalloc/internal/base.h" +#include "jemalloc/internal/decay.h" +#include "jemalloc/internal/ecache.h" +#include "jemalloc/internal/edata_cache.h" +#include "jemalloc/internal/emap.h" +#include "jemalloc/internal/hpa.h" +#include "jemalloc/internal/lockedint.h" +#include "jemalloc/internal/pac.h" +#include "jemalloc/internal/pai.h" +#include "jemalloc/internal/sec.h" + +/* + * The page allocator; responsible for acquiring pages of memory for + * allocations. It picks the implementation of the page allocator interface + * (i.e. a pai_t) to handle a given page-level allocation request. For now, the + * only such implementation is the PAC code ("page allocator classic"), but + * others will be coming soon. + */ + +typedef struct pa_central_s pa_central_t; +struct pa_central_s { + hpa_central_t hpa; +}; + +/* + * The stats for a particular pa_shard. Because of the way the ctl module + * handles stats epoch data collection (it has its own arena_stats, and merges + * the stats from each arena into it), this needs to live in the arena_stats_t; + * hence we define it here and let the pa_shard have a pointer (rather than the + * more natural approach of just embedding it in the pa_shard itself). + * + * We follow the arena_stats_t approach of marking the derived fields. These + * are the ones that are not maintained on their own; instead, their values are + * derived during those stats merges. + */ +typedef struct pa_shard_stats_s pa_shard_stats_t; +struct pa_shard_stats_s { + /* Number of edata_t structs allocated by base, but not being used. */ + size_t edata_avail; /* Derived. */ + /* + * Stats specific to the PAC. For now, these are the only stats that + * exist, but there will eventually be other page allocators. Things + * like edata_avail make sense in a cross-PA sense, but things like + * npurges don't. + */ + pac_stats_t pac_stats; +}; + +/* + * The local allocator handle. Keeps the state necessary to satisfy page-sized + * allocations. + * + * The contents are mostly internal to the PA module. The key exception is that + * arena decay code is allowed to grab pointers to the dirty and muzzy ecaches + * decay_ts, for a couple of queries, passing them back to a PA function, or + * acquiring decay.mtx and looking at decay.purging. The reasoning is that, + * while PA decides what and how to purge, the arena code decides when and where + * (e.g. on what thread). It's allowed to use the presence of another purger to + * decide. + * (The background thread code also touches some other decay internals, but + * that's not fundamental; its' just an artifact of a partial refactoring, and + * its accesses could be straightforwardly moved inside the decay module). + */ +typedef struct pa_shard_s pa_shard_t; +struct pa_shard_s { + /* The central PA this shard is associated with. */ + pa_central_t *central; + + /* + * Number of pages in active extents. + * + * Synchronization: atomic. + */ + atomic_zu_t nactive; + + /* + * Whether or not we should prefer the hugepage allocator. Atomic since + * it may be concurrently modified by a thread setting extent hooks. + * Note that we still may do HPA operations in this arena; if use_hpa is + * changed from true to false, we'll free back to the hugepage allocator + * for those allocations. + */ + atomic_b_t use_hpa; + + /* + * If we never used the HPA to begin with, it wasn't initialized, and so + * we shouldn't try to e.g. acquire its mutexes during fork. This + * tracks that knowledge. + */ + bool ever_used_hpa; + + /* Allocates from a PAC. */ + pac_t pac; + + /* + * We place a small extent cache in front of the HPA, since we intend + * these configurations to use many fewer arenas, and therefore have a + * higher risk of hot locks. + */ + sec_t hpa_sec; + hpa_shard_t hpa_shard; + + /* The source of edata_t objects. */ + edata_cache_t edata_cache; + + unsigned ind; + + malloc_mutex_t *stats_mtx; + pa_shard_stats_t *stats; + + /* The emap this shard is tied to. */ + emap_t *emap; + + /* The base from which we get the ehooks and allocate metadat. */ + base_t *base; +}; + +static inline bool +pa_shard_dont_decay_muzzy(pa_shard_t *shard) { + return ecache_npages_get(&shard->pac.ecache_muzzy) == 0 && + pac_decay_ms_get(&shard->pac, extent_state_muzzy) <= 0; +} + +static inline ehooks_t * +pa_shard_ehooks_get(pa_shard_t *shard) { + return base_ehooks_get(shard->base); +} + +/* Returns true on error. */ +bool pa_central_init(pa_central_t *central, base_t *base, bool hpa, + hpa_hooks_t *hpa_hooks); + +/* Returns true on error. */ +bool pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, pa_central_t *central, + emap_t *emap, base_t *base, unsigned ind, pa_shard_stats_t *stats, + malloc_mutex_t *stats_mtx, nstime_t *cur_time, size_t oversize_threshold, + ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms); + +/* + * This isn't exposed to users; we allow late enablement of the HPA shard so + * that we can boot without worrying about the HPA, then turn it on in a0. + */ +bool pa_shard_enable_hpa(tsdn_t *tsdn, pa_shard_t *shard, + const hpa_shard_opts_t *hpa_opts, const sec_opts_t *hpa_sec_opts); + +/* + * We stop using the HPA when custom extent hooks are installed, but still + * redirect deallocations to it. + */ +void pa_shard_disable_hpa(tsdn_t *tsdn, pa_shard_t *shard); + +/* + * This does the PA-specific parts of arena reset (i.e. freeing all active + * allocations). + */ +void pa_shard_reset(tsdn_t *tsdn, pa_shard_t *shard); + +/* + * Destroy all the remaining retained extents. Should only be called after + * decaying all active, dirty, and muzzy extents to the retained state, as the + * last step in destroying the shard. + */ +void pa_shard_destroy(tsdn_t *tsdn, pa_shard_t *shard); + +/* Gets an edata for the given allocation. */ +edata_t *pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size, + size_t alignment, bool slab, szind_t szind, bool zero, bool guarded, + bool *deferred_work_generated); +/* Returns true on error, in which case nothing changed. */ +bool pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size, + size_t new_size, szind_t szind, bool zero, bool *deferred_work_generated); +/* + * The same. Sets *generated_dirty to true if we produced new dirty pages, and + * false otherwise. + */ +bool pa_shrink(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size, + size_t new_size, szind_t szind, bool *deferred_work_generated); +/* + * Frees the given edata back to the pa. Sets *generated_dirty if we produced + * new dirty pages (well, we always set it for now; but this need not be the + * case). + * (We could make generated_dirty the return value of course, but this is more + * consistent with the shrink pathway and our error codes here). + */ +void pa_dalloc(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, + bool *deferred_work_generated); +bool pa_decay_ms_set(tsdn_t *tsdn, pa_shard_t *shard, extent_state_t state, + ssize_t decay_ms, pac_purge_eagerness_t eagerness); +ssize_t pa_decay_ms_get(pa_shard_t *shard, extent_state_t state); + +/* + * Do deferred work on this PA shard. + * + * Morally, this should do both PAC decay and the HPA deferred work. For now, + * though, the arena, background thread, and PAC modules are tightly interwoven + * in a way that's tricky to extricate, so we only do the HPA-specific parts. + */ +void pa_shard_set_deferral_allowed(tsdn_t *tsdn, pa_shard_t *shard, + bool deferral_allowed); +void pa_shard_do_deferred_work(tsdn_t *tsdn, pa_shard_t *shard); +void pa_shard_try_deferred_work(tsdn_t *tsdn, pa_shard_t *shard); +uint64_t pa_shard_time_until_deferred_work(tsdn_t *tsdn, pa_shard_t *shard); + +/******************************************************************************/ +/* + * Various bits of "boring" functionality that are still part of this module, + * but that we relegate to pa_extra.c, to keep the core logic in pa.c as + * readable as possible. + */ + +/* + * These fork phases are synchronized with the arena fork phase numbering to + * make it easy to keep straight. That's why there's no prefork1. + */ +void pa_shard_prefork0(tsdn_t *tsdn, pa_shard_t *shard); +void pa_shard_prefork2(tsdn_t *tsdn, pa_shard_t *shard); +void pa_shard_prefork3(tsdn_t *tsdn, pa_shard_t *shard); +void pa_shard_prefork4(tsdn_t *tsdn, pa_shard_t *shard); +void pa_shard_prefork5(tsdn_t *tsdn, pa_shard_t *shard); +void pa_shard_postfork_parent(tsdn_t *tsdn, pa_shard_t *shard); +void pa_shard_postfork_child(tsdn_t *tsdn, pa_shard_t *shard); + +void pa_shard_basic_stats_merge(pa_shard_t *shard, size_t *nactive, + size_t *ndirty, size_t *nmuzzy); + +void pa_shard_stats_merge(tsdn_t *tsdn, pa_shard_t *shard, + pa_shard_stats_t *pa_shard_stats_out, pac_estats_t *estats_out, + hpa_shard_stats_t *hpa_stats_out, sec_stats_t *sec_stats_out, + size_t *resident); + +/* + * Reads the PA-owned mutex stats into the output stats array, at the + * appropriate positions. Morally, these stats should really live in + * pa_shard_stats_t, but the indices are sort of baked into the various mutex + * prof macros. This would be a good thing to do at some point. + */ +void pa_shard_mtx_stats_read(tsdn_t *tsdn, pa_shard_t *shard, + mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes]); + +#endif /* JEMALLOC_INTERNAL_PA_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/pac.h b/contrib/jemalloc/include/jemalloc/internal/pac.h new file mode 100644 index 00000000000000..9122aa0f3454bc --- /dev/null +++ b/contrib/jemalloc/include/jemalloc/internal/pac.h @@ -0,0 +1,178 @@ +#ifndef JEMALLOC_INTERNAL_PAC_H +#define JEMALLOC_INTERNAL_PAC_H + +#include "jemalloc/internal/exp_grow.h" +#include "jemalloc/internal/pai.h" +#include "san_bump.h" + + +/* + * Page allocator classic; an implementation of the PAI interface that: + * - Can be used for arenas with custom extent hooks. + * - Can always satisfy any allocation request (including highly-fragmentary + * ones). + * - Can use efficient OS-level zeroing primitives for demand-filled pages. + */ + +/* How "eager" decay/purging should be. */ +enum pac_purge_eagerness_e { + PAC_PURGE_ALWAYS, + PAC_PURGE_NEVER, + PAC_PURGE_ON_EPOCH_ADVANCE +}; +typedef enum pac_purge_eagerness_e pac_purge_eagerness_t; + +typedef struct pac_decay_stats_s pac_decay_stats_t; +struct pac_decay_stats_s { + /* Total number of purge sweeps. */ + locked_u64_t npurge; + /* Total number of madvise calls made. */ + locked_u64_t nmadvise; + /* Total number of pages purged. */ + locked_u64_t purged; +}; + +typedef struct pac_estats_s pac_estats_t; +struct pac_estats_s { + /* + * Stats for a given index in the range [0, SC_NPSIZES] in the various + * ecache_ts. + * We track both bytes and # of extents: two extents in the same bucket + * may have different sizes if adjacent size classes differ by more than + * a page, so bytes cannot always be derived from # of extents. + */ + size_t ndirty; + size_t dirty_bytes; + size_t nmuzzy; + size_t muzzy_bytes; + size_t nretained; + size_t retained_bytes; +}; + +typedef struct pac_stats_s pac_stats_t; +struct pac_stats_s { + pac_decay_stats_t decay_dirty; + pac_decay_stats_t decay_muzzy; + + /* + * Number of unused virtual memory bytes currently retained. Retained + * bytes are technically mapped (though always decommitted or purged), + * but they are excluded from the mapped statistic (above). + */ + size_t retained; /* Derived. */ + + /* + * Number of bytes currently mapped, excluding retained memory (and any + * base-allocated memory, which is tracked by the arena stats). + * + * We name this "pac_mapped" to avoid confusion with the arena_stats + * "mapped". + */ + atomic_zu_t pac_mapped; + + /* VM space had to be leaked (undocumented). Normally 0. */ + atomic_zu_t abandoned_vm; +}; + +struct pac_s { + /* + * Must be the first member (we convert it to a PAC given only a + * pointer). The handle to the allocation interface. + */ + pai_t pai; + /* + * Collections of extents that were previously allocated. These are + * used when allocating extents, in an attempt to re-use address space. + * + * Synchronization: internal. + */ + ecache_t ecache_dirty; + ecache_t ecache_muzzy; + ecache_t ecache_retained; + + base_t *base; + emap_t *emap; + edata_cache_t *edata_cache; + + /* The grow info for the retained ecache. */ + exp_grow_t exp_grow; + malloc_mutex_t grow_mtx; + + /* Special allocator for guarded frequently reused extents. */ + san_bump_alloc_t sba; + + /* How large extents should be before getting auto-purged. */ + atomic_zu_t oversize_threshold; + + /* + * Decay-based purging state, responsible for scheduling extent state + * transitions. + * + * Synchronization: via the internal mutex. + */ + decay_t decay_dirty; /* dirty --> muzzy */ + decay_t decay_muzzy; /* muzzy --> retained */ + + malloc_mutex_t *stats_mtx; + pac_stats_t *stats; + + /* Extent serial number generator state. */ + atomic_zu_t extent_sn_next; +}; + +bool pac_init(tsdn_t *tsdn, pac_t *pac, base_t *base, emap_t *emap, + edata_cache_t *edata_cache, nstime_t *cur_time, size_t oversize_threshold, + ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms, pac_stats_t *pac_stats, + malloc_mutex_t *stats_mtx); + +static inline size_t +pac_mapped(pac_t *pac) { + return atomic_load_zu(&pac->stats->pac_mapped, ATOMIC_RELAXED); +} + +static inline ehooks_t * +pac_ehooks_get(pac_t *pac) { + return base_ehooks_get(pac->base); +} + +/* + * All purging functions require holding decay->mtx. This is one of the few + * places external modules are allowed to peek inside pa_shard_t internals. + */ + +/* + * Decays the number of pages currently in the ecache. This might not leave the + * ecache empty if other threads are inserting dirty objects into it + * concurrently with the call. + */ +void pac_decay_all(tsdn_t *tsdn, pac_t *pac, decay_t *decay, + pac_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay); +/* + * Updates decay settings for the current time, and conditionally purges in + * response (depending on decay_purge_setting). Returns whether or not the + * epoch advanced. + */ +bool pac_maybe_decay_purge(tsdn_t *tsdn, pac_t *pac, decay_t *decay, + pac_decay_stats_t *decay_stats, ecache_t *ecache, + pac_purge_eagerness_t eagerness); + +/* + * Gets / sets the maximum amount that we'll grow an arena down the + * grow-retained pathways (unless forced to by an allocaction request). + * + * Set new_limit to NULL if it's just a query, or old_limit to NULL if you don't + * care about the previous value. + * + * Returns true on error (if the new limit is not valid). + */ +bool pac_retain_grow_limit_get_set(tsdn_t *tsdn, pac_t *pac, size_t *old_limit, + size_t *new_limit); + +bool pac_decay_ms_set(tsdn_t *tsdn, pac_t *pac, extent_state_t state, + ssize_t decay_ms, pac_purge_eagerness_t eagerness); +ssize_t pac_decay_ms_get(pac_t *pac, extent_state_t state); + +void pac_reset(tsdn_t *tsdn, pac_t *pac); +void pac_destroy(tsdn_t *tsdn, pac_t *pac); + +#endif /* JEMALLOC_INTERNAL_PAC_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/pages.h b/contrib/jemalloc/include/jemalloc/internal/pages.h index 7dae633afe5891..ad1f606a8a059b 100644 --- a/contrib/jemalloc/include/jemalloc/internal/pages.h +++ b/contrib/jemalloc/include/jemalloc/internal/pages.h @@ -13,10 +13,27 @@ /* Return the smallest pagesize multiple that is >= s. */ #define PAGE_CEILING(s) \ (((s) + PAGE_MASK) & ~PAGE_MASK) +/* Return the largest pagesize multiple that is <=s. */ +#define PAGE_FLOOR(s) \ + ((s) & ~PAGE_MASK) /* Huge page size. LG_HUGEPAGE is determined by the configure script. */ #define HUGEPAGE ((size_t)(1U << LG_HUGEPAGE)) #define HUGEPAGE_MASK ((size_t)(HUGEPAGE - 1)) + +#if LG_HUGEPAGE != 0 +# define HUGEPAGE_PAGES (HUGEPAGE / PAGE) +#else +/* + * It's convenient to define arrays (or bitmaps) of HUGEPAGE_PAGES lengths. If + * we can't autodetect the hugepage size, it gets treated as 0, in which case + * we'll trigger a compiler error in those arrays. Avoid this case by ensuring + * that this value is at least 1. (We won't ever run in this degraded state; + * hpa_supported() returns false in this case. + */ +# define HUGEPAGE_PAGES 1 +#endif + /* Return the huge page base address for the huge page containing address a. */ #define HUGEPAGE_ADDR2BASE(a) \ ((void *)((uintptr_t)(a) & ~HUGEPAGE_MASK)) @@ -58,6 +75,18 @@ static const bool pages_can_purge_forced = #endif ; +#if defined(JEMALLOC_HAVE_MADVISE_HUGE) || defined(JEMALLOC_HAVE_MEMCNTL) +# define PAGES_CAN_HUGIFY +#endif + +static const bool pages_can_hugify = +#ifdef PAGES_CAN_HUGIFY + true +#else + false +#endif + ; + typedef enum { thp_mode_default = 0, /* Do not change hugepage settings. */ thp_mode_always = 1, /* Always set MADV_HUGEPAGE. */ @@ -84,5 +113,7 @@ bool pages_dontdump(void *addr, size_t size); bool pages_dodump(void *addr, size_t size); bool pages_boot(void); void pages_set_thp_state (void *ptr, size_t size); +void pages_mark_guards(void *head, void *tail); +void pages_unmark_guards(void *head, void *tail); #endif /* JEMALLOC_INTERNAL_PAGES_EXTERNS_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/pai.h b/contrib/jemalloc/include/jemalloc/internal/pai.h new file mode 100644 index 00000000000000..d978cd7d25ec8d --- /dev/null +++ b/contrib/jemalloc/include/jemalloc/internal/pai.h @@ -0,0 +1,95 @@ +#ifndef JEMALLOC_INTERNAL_PAI_H +#define JEMALLOC_INTERNAL_PAI_H + +/* An interface for page allocation. */ + +typedef struct pai_s pai_t; +struct pai_s { + /* Returns NULL on failure. */ + edata_t *(*alloc)(tsdn_t *tsdn, pai_t *self, size_t size, + size_t alignment, bool zero, bool guarded, bool frequent_reuse, + bool *deferred_work_generated); + /* + * Returns the number of extents added to the list (which may be fewer + * than requested, in case of OOM). The list should already be + * initialized. The only alignment guarantee is page-alignment, and + * the results are not necessarily zeroed. + */ + size_t (*alloc_batch)(tsdn_t *tsdn, pai_t *self, size_t size, + size_t nallocs, edata_list_active_t *results, + bool *deferred_work_generated); + bool (*expand)(tsdn_t *tsdn, pai_t *self, edata_t *edata, + size_t old_size, size_t new_size, bool zero, + bool *deferred_work_generated); + bool (*shrink)(tsdn_t *tsdn, pai_t *self, edata_t *edata, + size_t old_size, size_t new_size, bool *deferred_work_generated); + void (*dalloc)(tsdn_t *tsdn, pai_t *self, edata_t *edata, + bool *deferred_work_generated); + /* This function empties out list as a side-effect of being called. */ + void (*dalloc_batch)(tsdn_t *tsdn, pai_t *self, + edata_list_active_t *list, bool *deferred_work_generated); + uint64_t (*time_until_deferred_work)(tsdn_t *tsdn, pai_t *self); +}; + +/* + * These are just simple convenience functions to avoid having to reference the + * same pai_t twice on every invocation. + */ + +static inline edata_t * +pai_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, + bool zero, bool guarded, bool frequent_reuse, + bool *deferred_work_generated) { + return self->alloc(tsdn, self, size, alignment, zero, guarded, + frequent_reuse, deferred_work_generated); +} + +static inline size_t +pai_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size, size_t nallocs, + edata_list_active_t *results, bool *deferred_work_generated) { + return self->alloc_batch(tsdn, self, size, nallocs, results, + deferred_work_generated); +} + +static inline bool +pai_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size, + size_t new_size, bool zero, bool *deferred_work_generated) { + return self->expand(tsdn, self, edata, old_size, new_size, zero, + deferred_work_generated); +} + +static inline bool +pai_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size, + size_t new_size, bool *deferred_work_generated) { + return self->shrink(tsdn, self, edata, old_size, new_size, + deferred_work_generated); +} + +static inline void +pai_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata, + bool *deferred_work_generated) { + self->dalloc(tsdn, self, edata, deferred_work_generated); +} + +static inline void +pai_dalloc_batch(tsdn_t *tsdn, pai_t *self, edata_list_active_t *list, + bool *deferred_work_generated) { + self->dalloc_batch(tsdn, self, list, deferred_work_generated); +} + +static inline uint64_t +pai_time_until_deferred_work(tsdn_t *tsdn, pai_t *self) { + return self->time_until_deferred_work(tsdn, self); +} + +/* + * An implementation of batch allocation that simply calls alloc once for + * each item in the list. + */ +size_t pai_alloc_batch_default(tsdn_t *tsdn, pai_t *self, size_t size, + size_t nallocs, edata_list_active_t *results, bool *deferred_work_generated); +/* Ditto, for dalloc. */ +void pai_dalloc_batch_default(tsdn_t *tsdn, pai_t *self, + edata_list_active_t *list, bool *deferred_work_generated); + +#endif /* JEMALLOC_INTERNAL_PAI_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/peak.h b/contrib/jemalloc/include/jemalloc/internal/peak.h new file mode 100644 index 00000000000000..59da3e41b6b78b --- /dev/null +++ b/contrib/jemalloc/include/jemalloc/internal/peak.h @@ -0,0 +1,37 @@ +#ifndef JEMALLOC_INTERNAL_PEAK_H +#define JEMALLOC_INTERNAL_PEAK_H + +typedef struct peak_s peak_t; +struct peak_s { + /* The highest recorded peak value, after adjustment (see below). */ + uint64_t cur_max; + /* + * The difference between alloc and dalloc at the last set_zero call; + * this lets us cancel out the appropriate amount of excess. + */ + uint64_t adjustment; +}; + +#define PEAK_INITIALIZER {0, 0} + +static inline uint64_t +peak_max(peak_t *peak) { + return peak->cur_max; +} + +static inline void +peak_update(peak_t *peak, uint64_t alloc, uint64_t dalloc) { + int64_t candidate_max = (int64_t)(alloc - dalloc - peak->adjustment); + if (candidate_max > (int64_t)peak->cur_max) { + peak->cur_max = candidate_max; + } +} + +/* Resets the counter to zero; all peaks are now relative to this point. */ +static inline void +peak_set_zero(peak_t *peak, uint64_t alloc, uint64_t dalloc) { + peak->cur_max = 0; + peak->adjustment = alloc - dalloc; +} + +#endif /* JEMALLOC_INTERNAL_PEAK_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/peak_event.h b/contrib/jemalloc/include/jemalloc/internal/peak_event.h new file mode 100644 index 00000000000000..b808ce0432993e --- /dev/null +++ b/contrib/jemalloc/include/jemalloc/internal/peak_event.h @@ -0,0 +1,24 @@ +#ifndef JEMALLOC_INTERNAL_PEAK_EVENT_H +#define JEMALLOC_INTERNAL_PEAK_EVENT_H + +/* + * While peak.h contains the simple helper struct that tracks state, this + * contains the allocator tie-ins (and knows about tsd, the event module, etc.). + */ + +/* Update the peak with current tsd state. */ +void peak_event_update(tsd_t *tsd); +/* Set current state to zero. */ +void peak_event_zero(tsd_t *tsd); +uint64_t peak_event_max(tsd_t *tsd); + +/* Manual hooks. */ +/* The activity-triggered hooks. */ +uint64_t peak_alloc_new_event_wait(tsd_t *tsd); +uint64_t peak_alloc_postponed_event_wait(tsd_t *tsd); +void peak_alloc_event_handler(tsd_t *tsd, uint64_t elapsed); +uint64_t peak_dalloc_new_event_wait(tsd_t *tsd); +uint64_t peak_dalloc_postponed_event_wait(tsd_t *tsd); +void peak_dalloc_event_handler(tsd_t *tsd, uint64_t elapsed); + +#endif /* JEMALLOC_INTERNAL_PEAK_EVENT_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/ph.h b/contrib/jemalloc/include/jemalloc/internal/ph.h index 84d6778a906e7b..5f091c5fbb0c15 100644 --- a/contrib/jemalloc/include/jemalloc/internal/ph.h +++ b/contrib/jemalloc/include/jemalloc/internal/ph.h @@ -1,3 +1,6 @@ +#ifndef JEMALLOC_INTERNAL_PH_H +#define JEMALLOC_INTERNAL_PH_H + /* * A Pairing Heap implementation. * @@ -10,382 +13,508 @@ * http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.106.2988&rep=rep1&type=pdf * ******************************************************************************* + * + * We include a non-obvious optimization: + * - First, we introduce a new pop-and-link operation; pop the two most + * recently-inserted items off the aux-list, link them, and push the resulting + * heap. + * - We maintain a count of the number of insertions since the last time we + * merged the aux-list (i.e. via first() or remove_first()). After N inserts, + * we do ffs(N) pop-and-link operations. + * + * One way to think of this is that we're progressively building up a tree in + * the aux-list, rather than a linked-list (think of the series of merges that + * will be performed as the aux-count grows). + * + * There's a couple reasons we benefit from this: + * - Ordinarily, after N insertions, the aux-list is of size N. With our + * strategy, it's of size O(log(N)). So we decrease the worst-case time of + * first() calls, and reduce the average cost of remove_min calls. Since + * these almost always occur while holding a lock, we practically reduce the + * frequency of unusually long hold times. + * - This moves the bulk of the work of merging the aux-list onto the threads + * that are inserting into the heap. In some common scenarios, insertions + * happen in bulk, from a single thread (think tcache flushing; we potentially + * move many slabs from slabs_full to slabs_nonfull). All the nodes in this + * case are in the inserting threads cache, and linking them is very cheap + * (cache misses dominate linking cost). Without this optimization, linking + * happens on the next call to remove_first. Since that remove_first call + * likely happens on a different thread (or at least, after the cache has + * gotten cold if done on the same thread), deferring linking trades cheap + * link operations now for expensive ones later. + * + * The ffs trick keeps amortized insert cost at constant time. Similar + * strategies based on periodically sorting the list after a batch of operations + * perform worse than this in practice, even with various fancy tricks; they + * all took amortized complexity of an insert from O(1) to O(log(n)). */ -#ifndef PH_H_ -#define PH_H_ +typedef int (*ph_cmp_t)(void *, void *); /* Node structure. */ -#define phn(a_type) \ -struct { \ - a_type *phn_prev; \ - a_type *phn_next; \ - a_type *phn_lchild; \ +typedef struct phn_link_s phn_link_t; +struct phn_link_s { + void *prev; + void *next; + void *lchild; +}; + +typedef struct ph_s ph_t; +struct ph_s { + void *root; + /* + * Inserts done since the last aux-list merge. This is not necessarily + * the size of the aux-list, since it's possible that removals have + * happened since, and we don't track whether or not those removals are + * from the aux list. + */ + size_t auxcount; +}; + +JEMALLOC_ALWAYS_INLINE phn_link_t * +phn_link_get(void *phn, size_t offset) { + return (phn_link_t *)(((uintptr_t)phn) + offset); } -/* Root structure. */ -#define ph(a_type) \ -struct { \ - a_type *ph_root; \ +JEMALLOC_ALWAYS_INLINE void +phn_link_init(void *phn, size_t offset) { + phn_link_get(phn, offset)->prev = NULL; + phn_link_get(phn, offset)->next = NULL; + phn_link_get(phn, offset)->lchild = NULL; } -/* Internal utility macros. */ -#define phn_lchild_get(a_type, a_field, a_phn) \ - (a_phn->a_field.phn_lchild) -#define phn_lchild_set(a_type, a_field, a_phn, a_lchild) do { \ - a_phn->a_field.phn_lchild = a_lchild; \ -} while (0) - -#define phn_next_get(a_type, a_field, a_phn) \ - (a_phn->a_field.phn_next) -#define phn_prev_set(a_type, a_field, a_phn, a_prev) do { \ - a_phn->a_field.phn_prev = a_prev; \ -} while (0) - -#define phn_prev_get(a_type, a_field, a_phn) \ - (a_phn->a_field.phn_prev) -#define phn_next_set(a_type, a_field, a_phn, a_next) do { \ - a_phn->a_field.phn_next = a_next; \ -} while (0) - -#define phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, a_cmp) do { \ - a_type *phn0child; \ - \ - assert(a_phn0 != NULL); \ - assert(a_phn1 != NULL); \ - assert(a_cmp(a_phn0, a_phn1) <= 0); \ - \ - phn_prev_set(a_type, a_field, a_phn1, a_phn0); \ - phn0child = phn_lchild_get(a_type, a_field, a_phn0); \ - phn_next_set(a_type, a_field, a_phn1, phn0child); \ - if (phn0child != NULL) { \ - phn_prev_set(a_type, a_field, phn0child, a_phn1); \ - } \ - phn_lchild_set(a_type, a_field, a_phn0, a_phn1); \ -} while (0) - -#define phn_merge(a_type, a_field, a_phn0, a_phn1, a_cmp, r_phn) do { \ - if (a_phn0 == NULL) { \ - r_phn = a_phn1; \ - } else if (a_phn1 == NULL) { \ - r_phn = a_phn0; \ - } else if (a_cmp(a_phn0, a_phn1) < 0) { \ - phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, \ - a_cmp); \ - r_phn = a_phn0; \ - } else { \ - phn_merge_ordered(a_type, a_field, a_phn1, a_phn0, \ - a_cmp); \ - r_phn = a_phn1; \ - } \ -} while (0) +/* Internal utility helpers. */ +JEMALLOC_ALWAYS_INLINE void * +phn_lchild_get(void *phn, size_t offset) { + return phn_link_get(phn, offset)->lchild; +} + +JEMALLOC_ALWAYS_INLINE void +phn_lchild_set(void *phn, void *lchild, size_t offset) { + phn_link_get(phn, offset)->lchild = lchild; +} + +JEMALLOC_ALWAYS_INLINE void * +phn_next_get(void *phn, size_t offset) { + return phn_link_get(phn, offset)->next; +} + +JEMALLOC_ALWAYS_INLINE void +phn_next_set(void *phn, void *next, size_t offset) { + phn_link_get(phn, offset)->next = next; +} + +JEMALLOC_ALWAYS_INLINE void * +phn_prev_get(void *phn, size_t offset) { + return phn_link_get(phn, offset)->prev; +} + +JEMALLOC_ALWAYS_INLINE void +phn_prev_set(void *phn, void *prev, size_t offset) { + phn_link_get(phn, offset)->prev = prev; +} + +JEMALLOC_ALWAYS_INLINE void +phn_merge_ordered(void *phn0, void *phn1, size_t offset, + ph_cmp_t cmp) { + void *phn0child; + + assert(phn0 != NULL); + assert(phn1 != NULL); + assert(cmp(phn0, phn1) <= 0); + + phn_prev_set(phn1, phn0, offset); + phn0child = phn_lchild_get(phn0, offset); + phn_next_set(phn1, phn0child, offset); + if (phn0child != NULL) { + phn_prev_set(phn0child, phn1, offset); + } + phn_lchild_set(phn0, phn1, offset); +} + +JEMALLOC_ALWAYS_INLINE void * +phn_merge(void *phn0, void *phn1, size_t offset, ph_cmp_t cmp) { + void *result; + if (phn0 == NULL) { + result = phn1; + } else if (phn1 == NULL) { + result = phn0; + } else if (cmp(phn0, phn1) < 0) { + phn_merge_ordered(phn0, phn1, offset, cmp); + result = phn0; + } else { + phn_merge_ordered(phn1, phn0, offset, cmp); + result = phn1; + } + return result; +} + +JEMALLOC_ALWAYS_INLINE void * +phn_merge_siblings(void *phn, size_t offset, ph_cmp_t cmp) { + void *head = NULL; + void *tail = NULL; + void *phn0 = phn; + void *phn1 = phn_next_get(phn0, offset); + + /* + * Multipass merge, wherein the first two elements of a FIFO + * are repeatedly merged, and each result is appended to the + * singly linked FIFO, until the FIFO contains only a single + * element. We start with a sibling list but no reference to + * its tail, so we do a single pass over the sibling list to + * populate the FIFO. + */ + if (phn1 != NULL) { + void *phnrest = phn_next_get(phn1, offset); + if (phnrest != NULL) { + phn_prev_set(phnrest, NULL, offset); + } + phn_prev_set(phn0, NULL, offset); + phn_next_set(phn0, NULL, offset); + phn_prev_set(phn1, NULL, offset); + phn_next_set(phn1, NULL, offset); + phn0 = phn_merge(phn0, phn1, offset, cmp); + head = tail = phn0; + phn0 = phnrest; + while (phn0 != NULL) { + phn1 = phn_next_get(phn0, offset); + if (phn1 != NULL) { + phnrest = phn_next_get(phn1, offset); + if (phnrest != NULL) { + phn_prev_set(phnrest, NULL, offset); + } + phn_prev_set(phn0, NULL, offset); + phn_next_set(phn0, NULL, offset); + phn_prev_set(phn1, NULL, offset); + phn_next_set(phn1, NULL, offset); + phn0 = phn_merge(phn0, phn1, offset, cmp); + phn_next_set(tail, phn0, offset); + tail = phn0; + phn0 = phnrest; + } else { + phn_next_set(tail, phn0, offset); + tail = phn0; + phn0 = NULL; + } + } + phn0 = head; + phn1 = phn_next_get(phn0, offset); + if (phn1 != NULL) { + while (true) { + head = phn_next_get(phn1, offset); + assert(phn_prev_get(phn0, offset) == NULL); + phn_next_set(phn0, NULL, offset); + assert(phn_prev_get(phn1, offset) == NULL); + phn_next_set(phn1, NULL, offset); + phn0 = phn_merge(phn0, phn1, offset, cmp); + if (head == NULL) { + break; + } + phn_next_set(tail, phn0, offset); + tail = phn0; + phn0 = head; + phn1 = phn_next_get(phn0, offset); + } + } + } + return phn0; +} + +JEMALLOC_ALWAYS_INLINE void +ph_merge_aux(ph_t *ph, size_t offset, ph_cmp_t cmp) { + ph->auxcount = 0; + void *phn = phn_next_get(ph->root, offset); + if (phn != NULL) { + phn_prev_set(ph->root, NULL, offset); + phn_next_set(ph->root, NULL, offset); + phn_prev_set(phn, NULL, offset); + phn = phn_merge_siblings(phn, offset, cmp); + assert(phn_next_get(phn, offset) == NULL); + ph->root = phn_merge(ph->root, phn, offset, cmp); + } +} + +JEMALLOC_ALWAYS_INLINE void * +ph_merge_children(void *phn, size_t offset, ph_cmp_t cmp) { + void *result; + void *lchild = phn_lchild_get(phn, offset); + if (lchild == NULL) { + result = NULL; + } else { + result = phn_merge_siblings(lchild, offset, cmp); + } + return result; +} + +JEMALLOC_ALWAYS_INLINE void +ph_new(ph_t *ph) { + ph->root = NULL; + ph->auxcount = 0; +} + +JEMALLOC_ALWAYS_INLINE bool +ph_empty(ph_t *ph) { + return ph->root == NULL; +} + +JEMALLOC_ALWAYS_INLINE void * +ph_first(ph_t *ph, size_t offset, ph_cmp_t cmp) { + if (ph->root == NULL) { + return NULL; + } + ph_merge_aux(ph, offset, cmp); + return ph->root; +} + +JEMALLOC_ALWAYS_INLINE void * +ph_any(ph_t *ph, size_t offset) { + if (ph->root == NULL) { + return NULL; + } + void *aux = phn_next_get(ph->root, offset); + if (aux != NULL) { + return aux; + } + return ph->root; +} + +/* Returns true if we should stop trying to merge. */ +JEMALLOC_ALWAYS_INLINE bool +ph_try_aux_merge_pair(ph_t *ph, size_t offset, ph_cmp_t cmp) { + assert(ph->root != NULL); + void *phn0 = phn_next_get(ph->root, offset); + if (phn0 == NULL) { + return true; + } + void *phn1 = phn_next_get(phn0, offset); + if (phn1 == NULL) { + return true; + } + void *next_phn1 = phn_next_get(phn1, offset); + phn_next_set(phn0, NULL, offset); + phn_prev_set(phn0, NULL, offset); + phn_next_set(phn1, NULL, offset); + phn_prev_set(phn1, NULL, offset); + phn0 = phn_merge(phn0, phn1, offset, cmp); + phn_next_set(phn0, next_phn1, offset); + if (next_phn1 != NULL) { + phn_prev_set(next_phn1, phn0, offset); + } + phn_next_set(ph->root, phn0, offset); + phn_prev_set(phn0, ph->root, offset); + return next_phn1 == NULL; +} + +JEMALLOC_ALWAYS_INLINE void +ph_insert(ph_t *ph, void *phn, size_t offset, ph_cmp_t cmp) { + phn_link_init(phn, offset); -#define ph_merge_siblings(a_type, a_field, a_phn, a_cmp, r_phn) do { \ - a_type *head = NULL; \ - a_type *tail = NULL; \ - a_type *phn0 = a_phn; \ - a_type *phn1 = phn_next_get(a_type, a_field, phn0); \ + /* + * Treat the root as an aux list during insertion, and lazily merge + * during a_prefix##remove_first(). For elements that are inserted, + * then removed via a_prefix##remove() before the aux list is ever + * processed, this makes insert/remove constant-time, whereas eager + * merging would make insert O(log n). + */ + if (ph->root == NULL) { + ph->root = phn; + } else { + /* + * As a special case, check to see if we can replace the root. + * This is practically common in some important cases, and lets + * us defer some insertions (hopefully, until the point where + * some of the items in the aux list have been removed, savings + * us from linking them at all). + */ + if (cmp(phn, ph->root) < 0) { + phn_lchild_set(phn, ph->root, offset); + phn_prev_set(ph->root, phn, offset); + ph->root = phn; + ph->auxcount = 0; + return; + } + ph->auxcount++; + phn_next_set(phn, phn_next_get(ph->root, offset), offset); + if (phn_next_get(ph->root, offset) != NULL) { + phn_prev_set(phn_next_get(ph->root, offset), phn, + offset); + } + phn_prev_set(phn, ph->root, offset); + phn_next_set(ph->root, phn, offset); + } + if (ph->auxcount > 1) { + unsigned nmerges = ffs_zu(ph->auxcount - 1); + bool done = false; + for (unsigned i = 0; i < nmerges && !done; i++) { + done = ph_try_aux_merge_pair(ph, offset, cmp); + } + } +} + +JEMALLOC_ALWAYS_INLINE void * +ph_remove_first(ph_t *ph, size_t offset, ph_cmp_t cmp) { + void *ret; + + if (ph->root == NULL) { + return NULL; + } + ph_merge_aux(ph, offset, cmp); + ret = ph->root; + ph->root = ph_merge_children(ph->root, offset, cmp); + + return ret; + +} + +JEMALLOC_ALWAYS_INLINE void +ph_remove(ph_t *ph, void *phn, size_t offset, ph_cmp_t cmp) { + void *replace; + void *parent; + + if (ph->root == phn) { + /* + * We can delete from aux list without merging it, but we need + * to merge if we are dealing with the root node and it has + * children. + */ + if (phn_lchild_get(phn, offset) == NULL) { + ph->root = phn_next_get(phn, offset); + if (ph->root != NULL) { + phn_prev_set(ph->root, NULL, offset); + } + return; + } + ph_merge_aux(ph, offset, cmp); + if (ph->root == phn) { + ph->root = ph_merge_children(ph->root, offset, cmp); + return; + } + } + + /* Get parent (if phn is leftmost child) before mutating. */ + if ((parent = phn_prev_get(phn, offset)) != NULL) { + if (phn_lchild_get(parent, offset) != phn) { + parent = NULL; + } + } + /* Find a possible replacement node, and link to parent. */ + replace = ph_merge_children(phn, offset, cmp); + /* Set next/prev for sibling linked list. */ + if (replace != NULL) { + if (parent != NULL) { + phn_prev_set(replace, parent, offset); + phn_lchild_set(parent, replace, offset); + } else { + phn_prev_set(replace, phn_prev_get(phn, offset), + offset); + if (phn_prev_get(phn, offset) != NULL) { + phn_next_set(phn_prev_get(phn, offset), replace, + offset); + } + } + phn_next_set(replace, phn_next_get(phn, offset), offset); + if (phn_next_get(phn, offset) != NULL) { + phn_prev_set(phn_next_get(phn, offset), replace, + offset); + } + } else { + if (parent != NULL) { + void *next = phn_next_get(phn, offset); + phn_lchild_set(parent, next, offset); + if (next != NULL) { + phn_prev_set(next, parent, offset); + } + } else { + assert(phn_prev_get(phn, offset) != NULL); + phn_next_set( + phn_prev_get(phn, offset), + phn_next_get(phn, offset), offset); + } + if (phn_next_get(phn, offset) != NULL) { + phn_prev_set( + phn_next_get(phn, offset), + phn_prev_get(phn, offset), offset); + } + } +} + +#define ph_structs(a_prefix, a_type) \ +typedef struct { \ + phn_link_t link; \ +} a_prefix##_link_t; \ \ - /* \ - * Multipass merge, wherein the first two elements of a FIFO \ - * are repeatedly merged, and each result is appended to the \ - * singly linked FIFO, until the FIFO contains only a single \ - * element. We start with a sibling list but no reference to \ - * its tail, so we do a single pass over the sibling list to \ - * populate the FIFO. \ - */ \ - if (phn1 != NULL) { \ - a_type *phnrest = phn_next_get(a_type, a_field, phn1); \ - if (phnrest != NULL) { \ - phn_prev_set(a_type, a_field, phnrest, NULL); \ - } \ - phn_prev_set(a_type, a_field, phn0, NULL); \ - phn_next_set(a_type, a_field, phn0, NULL); \ - phn_prev_set(a_type, a_field, phn1, NULL); \ - phn_next_set(a_type, a_field, phn1, NULL); \ - phn_merge(a_type, a_field, phn0, phn1, a_cmp, phn0); \ - head = tail = phn0; \ - phn0 = phnrest; \ - while (phn0 != NULL) { \ - phn1 = phn_next_get(a_type, a_field, phn0); \ - if (phn1 != NULL) { \ - phnrest = phn_next_get(a_type, a_field, \ - phn1); \ - if (phnrest != NULL) { \ - phn_prev_set(a_type, a_field, \ - phnrest, NULL); \ - } \ - phn_prev_set(a_type, a_field, phn0, \ - NULL); \ - phn_next_set(a_type, a_field, phn0, \ - NULL); \ - phn_prev_set(a_type, a_field, phn1, \ - NULL); \ - phn_next_set(a_type, a_field, phn1, \ - NULL); \ - phn_merge(a_type, a_field, phn0, phn1, \ - a_cmp, phn0); \ - phn_next_set(a_type, a_field, tail, \ - phn0); \ - tail = phn0; \ - phn0 = phnrest; \ - } else { \ - phn_next_set(a_type, a_field, tail, \ - phn0); \ - tail = phn0; \ - phn0 = NULL; \ - } \ - } \ - phn0 = head; \ - phn1 = phn_next_get(a_type, a_field, phn0); \ - if (phn1 != NULL) { \ - while (true) { \ - head = phn_next_get(a_type, a_field, \ - phn1); \ - assert(phn_prev_get(a_type, a_field, \ - phn0) == NULL); \ - phn_next_set(a_type, a_field, phn0, \ - NULL); \ - assert(phn_prev_get(a_type, a_field, \ - phn1) == NULL); \ - phn_next_set(a_type, a_field, phn1, \ - NULL); \ - phn_merge(a_type, a_field, phn0, phn1, \ - a_cmp, phn0); \ - if (head == NULL) { \ - break; \ - } \ - phn_next_set(a_type, a_field, tail, \ - phn0); \ - tail = phn0; \ - phn0 = head; \ - phn1 = phn_next_get(a_type, a_field, \ - phn0); \ - } \ - } \ - } \ - r_phn = phn0; \ -} while (0) - -#define ph_merge_aux(a_type, a_field, a_ph, a_cmp) do { \ - a_type *phn = phn_next_get(a_type, a_field, a_ph->ph_root); \ - if (phn != NULL) { \ - phn_prev_set(a_type, a_field, a_ph->ph_root, NULL); \ - phn_next_set(a_type, a_field, a_ph->ph_root, NULL); \ - phn_prev_set(a_type, a_field, phn, NULL); \ - ph_merge_siblings(a_type, a_field, phn, a_cmp, phn); \ - assert(phn_next_get(a_type, a_field, phn) == NULL); \ - phn_merge(a_type, a_field, a_ph->ph_root, phn, a_cmp, \ - a_ph->ph_root); \ - } \ -} while (0) - -#define ph_merge_children(a_type, a_field, a_phn, a_cmp, r_phn) do { \ - a_type *lchild = phn_lchild_get(a_type, a_field, a_phn); \ - if (lchild == NULL) { \ - r_phn = NULL; \ - } else { \ - ph_merge_siblings(a_type, a_field, lchild, a_cmp, \ - r_phn); \ - } \ -} while (0) +typedef struct { \ + ph_t ph; \ +} a_prefix##_t; /* * The ph_proto() macro generates function prototypes that correspond to the * functions generated by an equivalently parameterized call to ph_gen(). */ -#define ph_proto(a_attr, a_prefix, a_ph_type, a_type) \ -a_attr void a_prefix##new(a_ph_type *ph); \ -a_attr bool a_prefix##empty(a_ph_type *ph); \ -a_attr a_type *a_prefix##first(a_ph_type *ph); \ -a_attr a_type *a_prefix##any(a_ph_type *ph); \ -a_attr void a_prefix##insert(a_ph_type *ph, a_type *phn); \ -a_attr a_type *a_prefix##remove_first(a_ph_type *ph); \ -a_attr a_type *a_prefix##remove_any(a_ph_type *ph); \ -a_attr void a_prefix##remove(a_ph_type *ph, a_type *phn); +#define ph_proto(a_attr, a_prefix, a_type) \ + \ +a_attr void a_prefix##_new(a_prefix##_t *ph); \ +a_attr bool a_prefix##_empty(a_prefix##_t *ph); \ +a_attr a_type *a_prefix##_first(a_prefix##_t *ph); \ +a_attr a_type *a_prefix##_any(a_prefix##_t *ph); \ +a_attr void a_prefix##_insert(a_prefix##_t *ph, a_type *phn); \ +a_attr a_type *a_prefix##_remove_first(a_prefix##_t *ph); \ +a_attr void a_prefix##_remove(a_prefix##_t *ph, a_type *phn); \ +a_attr a_type *a_prefix##_remove_any(a_prefix##_t *ph); -/* - * The ph_gen() macro generates a type-specific pairing heap implementation, - * based on the above cpp macros. - */ -#define ph_gen(a_attr, a_prefix, a_ph_type, a_type, a_field, a_cmp) \ +/* The ph_gen() macro generates a type-specific pairing heap implementation. */ +#define ph_gen(a_attr, a_prefix, a_type, a_field, a_cmp) \ +JEMALLOC_ALWAYS_INLINE int \ +a_prefix##_ph_cmp(void *a, void *b) { \ + return a_cmp((a_type *)a, (a_type *)b); \ +} \ + \ a_attr void \ -a_prefix##new(a_ph_type *ph) { \ - memset(ph, 0, sizeof(ph(a_type))); \ +a_prefix##_new(a_prefix##_t *ph) { \ + ph_new(&ph->ph); \ } \ + \ a_attr bool \ -a_prefix##empty(a_ph_type *ph) { \ - return (ph->ph_root == NULL); \ +a_prefix##_empty(a_prefix##_t *ph) { \ + return ph_empty(&ph->ph); \ } \ + \ a_attr a_type * \ -a_prefix##first(a_ph_type *ph) { \ - if (ph->ph_root == NULL) { \ - return NULL; \ - } \ - ph_merge_aux(a_type, a_field, ph, a_cmp); \ - return ph->ph_root; \ +a_prefix##_first(a_prefix##_t *ph) { \ + return ph_first(&ph->ph, offsetof(a_type, a_field), \ + &a_prefix##_ph_cmp); \ } \ + \ a_attr a_type * \ -a_prefix##any(a_ph_type *ph) { \ - if (ph->ph_root == NULL) { \ - return NULL; \ - } \ - a_type *aux = phn_next_get(a_type, a_field, ph->ph_root); \ - if (aux != NULL) { \ - return aux; \ - } \ - return ph->ph_root; \ +a_prefix##_any(a_prefix##_t *ph) { \ + return ph_any(&ph->ph, offsetof(a_type, a_field)); \ } \ -a_attr void \ -a_prefix##insert(a_ph_type *ph, a_type *phn) { \ - memset(&phn->a_field, 0, sizeof(phn(a_type))); \ \ - /* \ - * Treat the root as an aux list during insertion, and lazily \ - * merge during a_prefix##remove_first(). For elements that \ - * are inserted, then removed via a_prefix##remove() before the \ - * aux list is ever processed, this makes insert/remove \ - * constant-time, whereas eager merging would make insert \ - * O(log n). \ - */ \ - if (ph->ph_root == NULL) { \ - ph->ph_root = phn; \ - } else { \ - phn_next_set(a_type, a_field, phn, phn_next_get(a_type, \ - a_field, ph->ph_root)); \ - if (phn_next_get(a_type, a_field, ph->ph_root) != \ - NULL) { \ - phn_prev_set(a_type, a_field, \ - phn_next_get(a_type, a_field, ph->ph_root), \ - phn); \ - } \ - phn_prev_set(a_type, a_field, phn, ph->ph_root); \ - phn_next_set(a_type, a_field, ph->ph_root, phn); \ - } \ +a_attr void \ +a_prefix##_insert(a_prefix##_t *ph, a_type *phn) { \ + ph_insert(&ph->ph, phn, offsetof(a_type, a_field), \ + a_prefix##_ph_cmp); \ } \ -a_attr a_type * \ -a_prefix##remove_first(a_ph_type *ph) { \ - a_type *ret; \ \ - if (ph->ph_root == NULL) { \ - return NULL; \ - } \ - ph_merge_aux(a_type, a_field, ph, a_cmp); \ - \ - ret = ph->ph_root; \ - \ - ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \ - ph->ph_root); \ +a_attr a_type * \ +a_prefix##_remove_first(a_prefix##_t *ph) { \ + return ph_remove_first(&ph->ph, offsetof(a_type, a_field), \ + a_prefix##_ph_cmp); \ +} \ \ - return ret; \ +a_attr void \ +a_prefix##_remove(a_prefix##_t *ph, a_type *phn) { \ + ph_remove(&ph->ph, phn, offsetof(a_type, a_field), \ + a_prefix##_ph_cmp); \ } \ + \ a_attr a_type * \ -a_prefix##remove_any(a_ph_type *ph) { \ - /* \ - * Remove the most recently inserted aux list element, or the \ - * root if the aux list is empty. This has the effect of \ - * behaving as a LIFO (and insertion/removal is therefore \ - * constant-time) if a_prefix##[remove_]first() are never \ - * called. \ - */ \ - if (ph->ph_root == NULL) { \ - return NULL; \ - } \ - a_type *ret = phn_next_get(a_type, a_field, ph->ph_root); \ +a_prefix##_remove_any(a_prefix##_t *ph) { \ + a_type *ret = a_prefix##_any(ph); \ if (ret != NULL) { \ - a_type *aux = phn_next_get(a_type, a_field, ret); \ - phn_next_set(a_type, a_field, ph->ph_root, aux); \ - if (aux != NULL) { \ - phn_prev_set(a_type, a_field, aux, \ - ph->ph_root); \ - } \ - return ret; \ + a_prefix##_remove(ph, ret); \ } \ - ret = ph->ph_root; \ - ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \ - ph->ph_root); \ return ret; \ -} \ -a_attr void \ -a_prefix##remove(a_ph_type *ph, a_type *phn) { \ - a_type *replace, *parent; \ - \ - if (ph->ph_root == phn) { \ - /* \ - * We can delete from aux list without merging it, but \ - * we need to merge if we are dealing with the root \ - * node and it has children. \ - */ \ - if (phn_lchild_get(a_type, a_field, phn) == NULL) { \ - ph->ph_root = phn_next_get(a_type, a_field, \ - phn); \ - if (ph->ph_root != NULL) { \ - phn_prev_set(a_type, a_field, \ - ph->ph_root, NULL); \ - } \ - return; \ - } \ - ph_merge_aux(a_type, a_field, ph, a_cmp); \ - if (ph->ph_root == phn) { \ - ph_merge_children(a_type, a_field, ph->ph_root, \ - a_cmp, ph->ph_root); \ - return; \ - } \ - } \ - \ - /* Get parent (if phn is leftmost child) before mutating. */ \ - if ((parent = phn_prev_get(a_type, a_field, phn)) != NULL) { \ - if (phn_lchild_get(a_type, a_field, parent) != phn) { \ - parent = NULL; \ - } \ - } \ - /* Find a possible replacement node, and link to parent. */ \ - ph_merge_children(a_type, a_field, phn, a_cmp, replace); \ - /* Set next/prev for sibling linked list. */ \ - if (replace != NULL) { \ - if (parent != NULL) { \ - phn_prev_set(a_type, a_field, replace, parent); \ - phn_lchild_set(a_type, a_field, parent, \ - replace); \ - } else { \ - phn_prev_set(a_type, a_field, replace, \ - phn_prev_get(a_type, a_field, phn)); \ - if (phn_prev_get(a_type, a_field, phn) != \ - NULL) { \ - phn_next_set(a_type, a_field, \ - phn_prev_get(a_type, a_field, phn), \ - replace); \ - } \ - } \ - phn_next_set(a_type, a_field, replace, \ - phn_next_get(a_type, a_field, phn)); \ - if (phn_next_get(a_type, a_field, phn) != NULL) { \ - phn_prev_set(a_type, a_field, \ - phn_next_get(a_type, a_field, phn), \ - replace); \ - } \ - } else { \ - if (parent != NULL) { \ - a_type *next = phn_next_get(a_type, a_field, \ - phn); \ - phn_lchild_set(a_type, a_field, parent, next); \ - if (next != NULL) { \ - phn_prev_set(a_type, a_field, next, \ - parent); \ - } \ - } else { \ - assert(phn_prev_get(a_type, a_field, phn) != \ - NULL); \ - phn_next_set(a_type, a_field, \ - phn_prev_get(a_type, a_field, phn), \ - phn_next_get(a_type, a_field, phn)); \ - } \ - if (phn_next_get(a_type, a_field, phn) != NULL) { \ - phn_prev_set(a_type, a_field, \ - phn_next_get(a_type, a_field, phn), \ - phn_prev_get(a_type, a_field, phn)); \ - } \ - } \ } -#endif /* PH_H_ */ +#endif /* JEMALLOC_INTERNAL_PH_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/private_namespace.h b/contrib/jemalloc/include/jemalloc/internal/private_namespace.h index a448ad63fa54e4..63861033590cd6 100644 --- a/contrib/jemalloc/include/jemalloc/internal/private_namespace.h +++ b/contrib/jemalloc/include/jemalloc/internal/private_namespace.h @@ -5,10 +5,9 @@ #define arena_init JEMALLOC_N(arena_init) #define arena_migrate JEMALLOC_N(arena_migrate) #define arena_set JEMALLOC_N(arena_set) -#define arena_tdata_get_hard JEMALLOC_N(arena_tdata_get_hard) #define arenas JEMALLOC_N(arenas) #define arenas_lock JEMALLOC_N(arenas_lock) -#define arenas_tdata_cleanup JEMALLOC_N(arenas_tdata_cleanup) +#define batch_alloc JEMALLOC_N(batch_alloc) #define bootstrap_calloc JEMALLOC_N(bootstrap_calloc) #define bootstrap_free JEMALLOC_N(bootstrap_free) #define bootstrap_malloc JEMALLOC_N(bootstrap_malloc) @@ -16,8 +15,10 @@ #define iarena_cleanup JEMALLOC_N(iarena_cleanup) #define je_sdallocx_noflags JEMALLOC_N(je_sdallocx_noflags) #define jemalloc_postfork_child JEMALLOC_N(jemalloc_postfork_child) +#define junk_alloc_callback JEMALLOC_N(junk_alloc_callback) +#define junk_free_callback JEMALLOC_N(junk_free_callback) #define malloc_default JEMALLOC_N(malloc_default) -#define malloc_initialized JEMALLOC_N(malloc_initialized) +#define malloc_init_state JEMALLOC_N(malloc_init_state) #define malloc_slow JEMALLOC_N(malloc_slow) #define manual_arena_base JEMALLOC_N(manual_arena_base) #define narenas_auto JEMALLOC_N(narenas_auto) @@ -25,49 +26,64 @@ #define ncpus JEMALLOC_N(ncpus) #define opt_abort JEMALLOC_N(opt_abort) #define opt_abort_conf JEMALLOC_N(opt_abort_conf) +#define opt_cache_oblivious JEMALLOC_N(opt_cache_oblivious) #define opt_confirm_conf JEMALLOC_N(opt_confirm_conf) +#define opt_experimental_infallible_new JEMALLOC_N(opt_experimental_infallible_new) +#define opt_hpa JEMALLOC_N(opt_hpa) +#define opt_hpa_opts JEMALLOC_N(opt_hpa_opts) +#define opt_hpa_sec_opts JEMALLOC_N(opt_hpa_sec_opts) #define opt_junk JEMALLOC_N(opt_junk) #define opt_junk_alloc JEMALLOC_N(opt_junk_alloc) #define opt_junk_free JEMALLOC_N(opt_junk_free) #define opt_narenas JEMALLOC_N(opt_narenas) +#define opt_narenas_ratio JEMALLOC_N(opt_narenas_ratio) +#define opt_trust_madvise JEMALLOC_N(opt_trust_madvise) #define opt_utrace JEMALLOC_N(opt_utrace) #define opt_xmalloc JEMALLOC_N(opt_xmalloc) #define opt_zero JEMALLOC_N(opt_zero) +#define opt_zero_realloc_action JEMALLOC_N(opt_zero_realloc_action) #define sdallocx_default JEMALLOC_N(sdallocx_default) -#define arena_alloc_junk_small JEMALLOC_N(arena_alloc_junk_small) +#define zero_realloc_count JEMALLOC_N(zero_realloc_count) +#define zero_realloc_mode_names JEMALLOC_N(zero_realloc_mode_names) #define arena_basic_stats_merge JEMALLOC_N(arena_basic_stats_merge) -#define arena_bin_choose_lock JEMALLOC_N(arena_bin_choose_lock) +#define arena_bin_choose JEMALLOC_N(arena_bin_choose) +#define arena_bin_offsets JEMALLOC_N(arena_bin_offsets) +#define arena_binind_div_info JEMALLOC_N(arena_binind_div_info) #define arena_boot JEMALLOC_N(arena_boot) +#define arena_cache_bin_fill_small JEMALLOC_N(arena_cache_bin_fill_small) #define arena_choose_huge JEMALLOC_N(arena_choose_huge) -#define arena_dalloc_bin_junked_locked JEMALLOC_N(arena_dalloc_bin_junked_locked) -#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small) +#define arena_config_default JEMALLOC_N(arena_config_default) +#define arena_dalloc_bin_locked_handle_newly_empty JEMALLOC_N(arena_dalloc_bin_locked_handle_newly_empty) +#define arena_dalloc_bin_locked_handle_newly_nonempty JEMALLOC_N(arena_dalloc_bin_locked_handle_newly_nonempty) #define arena_dalloc_promoted JEMALLOC_N(arena_dalloc_promoted) #define arena_dalloc_small JEMALLOC_N(arena_dalloc_small) #define arena_decay JEMALLOC_N(arena_decay) +#define arena_decay_ms_get JEMALLOC_N(arena_decay_ms_get) +#define arena_decay_ms_set JEMALLOC_N(arena_decay_ms_set) #define arena_destroy JEMALLOC_N(arena_destroy) #define arena_dirty_decay_ms_default_get JEMALLOC_N(arena_dirty_decay_ms_default_get) #define arena_dirty_decay_ms_default_set JEMALLOC_N(arena_dirty_decay_ms_default_set) -#define arena_dirty_decay_ms_get JEMALLOC_N(arena_dirty_decay_ms_get) -#define arena_dirty_decay_ms_set JEMALLOC_N(arena_dirty_decay_ms_set) +#define arena_do_deferred_work JEMALLOC_N(arena_do_deferred_work) #define arena_dss_prec_get JEMALLOC_N(arena_dss_prec_get) #define arena_dss_prec_set JEMALLOC_N(arena_dss_prec_set) +#define arena_emap_global JEMALLOC_N(arena_emap_global) #define arena_extent_alloc_large JEMALLOC_N(arena_extent_alloc_large) #define arena_extent_dalloc_large_prep JEMALLOC_N(arena_extent_dalloc_large_prep) #define arena_extent_ralloc_large_expand JEMALLOC_N(arena_extent_ralloc_large_expand) #define arena_extent_ralloc_large_shrink JEMALLOC_N(arena_extent_ralloc_large_shrink) -#define arena_extent_sn_next JEMALLOC_N(arena_extent_sn_next) -#define arena_extents_dirty_dalloc JEMALLOC_N(arena_extents_dirty_dalloc) +#define arena_fill_small_fresh JEMALLOC_N(arena_fill_small_fresh) +#define arena_get_ehooks JEMALLOC_N(arena_get_ehooks) +#define arena_handle_deferred_work JEMALLOC_N(arena_handle_deferred_work) #define arena_init_huge JEMALLOC_N(arena_init_huge) #define arena_is_huge JEMALLOC_N(arena_is_huge) #define arena_malloc_hard JEMALLOC_N(arena_malloc_hard) #define arena_muzzy_decay_ms_default_get JEMALLOC_N(arena_muzzy_decay_ms_default_get) #define arena_muzzy_decay_ms_default_set JEMALLOC_N(arena_muzzy_decay_ms_default_set) -#define arena_muzzy_decay_ms_get JEMALLOC_N(arena_muzzy_decay_ms_get) -#define arena_muzzy_decay_ms_set JEMALLOC_N(arena_muzzy_decay_ms_set) #define arena_new JEMALLOC_N(arena_new) #define arena_nthreads_dec JEMALLOC_N(arena_nthreads_dec) #define arena_nthreads_get JEMALLOC_N(arena_nthreads_get) #define arena_nthreads_inc JEMALLOC_N(arena_nthreads_inc) +#define arena_pa_central_global JEMALLOC_N(arena_pa_central_global) #define arena_palloc JEMALLOC_N(arena_palloc) #define arena_postfork_child JEMALLOC_N(arena_postfork_child) #define arena_postfork_parent JEMALLOC_N(arena_postfork_parent) @@ -79,14 +95,15 @@ #define arena_prefork5 JEMALLOC_N(arena_prefork5) #define arena_prefork6 JEMALLOC_N(arena_prefork6) #define arena_prefork7 JEMALLOC_N(arena_prefork7) +#define arena_prefork8 JEMALLOC_N(arena_prefork8) #define arena_prof_promote JEMALLOC_N(arena_prof_promote) #define arena_ralloc JEMALLOC_N(arena_ralloc) #define arena_ralloc_no_move JEMALLOC_N(arena_ralloc_no_move) #define arena_reset JEMALLOC_N(arena_reset) #define arena_retain_grow_limit_get_set JEMALLOC_N(arena_retain_grow_limit_get_set) +#define arena_set_extent_hooks JEMALLOC_N(arena_set_extent_hooks) +#define arena_slab_dalloc JEMALLOC_N(arena_slab_dalloc) #define arena_stats_merge JEMALLOC_N(arena_stats_merge) -#define arena_tcache_fill_small JEMALLOC_N(arena_tcache_fill_small) -#define h_steps JEMALLOC_N(h_steps) #define opt_dirty_decay_ms JEMALLOC_N(opt_dirty_decay_ms) #define opt_muzzy_decay_ms JEMALLOC_N(opt_muzzy_decay_ms) #define opt_oversize_threshold JEMALLOC_N(opt_oversize_threshold) @@ -99,13 +116,14 @@ #define background_thread_ctl_init JEMALLOC_N(background_thread_ctl_init) #define background_thread_enabled_state JEMALLOC_N(background_thread_enabled_state) #define background_thread_info JEMALLOC_N(background_thread_info) -#define background_thread_interval_check JEMALLOC_N(background_thread_interval_check) +#define background_thread_is_started JEMALLOC_N(background_thread_is_started) #define background_thread_lock JEMALLOC_N(background_thread_lock) #define background_thread_postfork_child JEMALLOC_N(background_thread_postfork_child) #define background_thread_postfork_parent JEMALLOC_N(background_thread_postfork_parent) #define background_thread_prefork0 JEMALLOC_N(background_thread_prefork0) #define background_thread_prefork1 JEMALLOC_N(background_thread_prefork1) #define background_thread_stats_read JEMALLOC_N(background_thread_stats_read) +#define background_thread_wakeup_early JEMALLOC_N(background_thread_wakeup_early) #define background_threads_disable JEMALLOC_N(background_threads_disable) #define background_threads_enable JEMALLOC_N(background_threads_enable) #define max_background_threads JEMALLOC_N(max_background_threads) @@ -115,10 +133,11 @@ #define pthread_create_wrapper JEMALLOC_N(pthread_create_wrapper) #define b0get JEMALLOC_N(b0get) #define base_alloc JEMALLOC_N(base_alloc) -#define base_alloc_extent JEMALLOC_N(base_alloc_extent) +#define base_alloc_edata JEMALLOC_N(base_alloc_edata) #define base_boot JEMALLOC_N(base_boot) #define base_delete JEMALLOC_N(base_delete) -#define base_extent_hooks_get JEMALLOC_N(base_extent_hooks_get) +#define base_ehooks_get JEMALLOC_N(base_ehooks_get) +#define base_ehooks_get_for_metadata JEMALLOC_N(base_ehooks_get_for_metadata) #define base_extent_hooks_set JEMALLOC_N(base_extent_hooks_set) #define base_new JEMALLOC_N(base_new) #define base_postfork_child JEMALLOC_N(base_postfork_child) @@ -127,17 +146,28 @@ #define base_stats_get JEMALLOC_N(base_stats_get) #define metadata_thp_mode_names JEMALLOC_N(metadata_thp_mode_names) #define opt_metadata_thp JEMALLOC_N(opt_metadata_thp) -#define bin_boot JEMALLOC_N(bin_boot) -#define bin_infos JEMALLOC_N(bin_infos) #define bin_init JEMALLOC_N(bin_init) #define bin_postfork_child JEMALLOC_N(bin_postfork_child) #define bin_postfork_parent JEMALLOC_N(bin_postfork_parent) #define bin_prefork JEMALLOC_N(bin_prefork) #define bin_shard_sizes_boot JEMALLOC_N(bin_shard_sizes_boot) #define bin_update_shard_size JEMALLOC_N(bin_update_shard_size) +#define bin_info_boot JEMALLOC_N(bin_info_boot) +#define bin_infos JEMALLOC_N(bin_infos) #define bitmap_info_init JEMALLOC_N(bitmap_info_init) #define bitmap_init JEMALLOC_N(bitmap_init) #define bitmap_size JEMALLOC_N(bitmap_size) +#define buf_writer_cb JEMALLOC_N(buf_writer_cb) +#define buf_writer_flush JEMALLOC_N(buf_writer_flush) +#define buf_writer_init JEMALLOC_N(buf_writer_init) +#define buf_writer_pipe JEMALLOC_N(buf_writer_pipe) +#define buf_writer_terminate JEMALLOC_N(buf_writer_terminate) +#define cache_bin_info_compute_alloc JEMALLOC_N(cache_bin_info_compute_alloc) +#define cache_bin_info_init JEMALLOC_N(cache_bin_info_init) +#define cache_bin_init JEMALLOC_N(cache_bin_init) +#define cache_bin_postincrement JEMALLOC_N(cache_bin_postincrement) +#define cache_bin_preincrement JEMALLOC_N(cache_bin_preincrement) +#define cache_bin_still_zero_initialized JEMALLOC_N(cache_bin_still_zero_initialized) #define ckh_count JEMALLOC_N(ckh_count) #define ckh_delete JEMALLOC_N(ckh_delete) #define ckh_insert JEMALLOC_N(ckh_insert) @@ -149,61 +179,116 @@ #define ckh_search JEMALLOC_N(ckh_search) #define ckh_string_hash JEMALLOC_N(ckh_string_hash) #define ckh_string_keycomp JEMALLOC_N(ckh_string_keycomp) +#define counter_accum_init JEMALLOC_N(counter_accum_init) +#define counter_postfork_child JEMALLOC_N(counter_postfork_child) +#define counter_postfork_parent JEMALLOC_N(counter_postfork_parent) +#define counter_prefork JEMALLOC_N(counter_prefork) #define ctl_boot JEMALLOC_N(ctl_boot) #define ctl_bymib JEMALLOC_N(ctl_bymib) +#define ctl_bymibname JEMALLOC_N(ctl_bymibname) #define ctl_byname JEMALLOC_N(ctl_byname) +#define ctl_mibnametomib JEMALLOC_N(ctl_mibnametomib) +#define ctl_mtx_assert_held JEMALLOC_N(ctl_mtx_assert_held) #define ctl_nametomib JEMALLOC_N(ctl_nametomib) #define ctl_postfork_child JEMALLOC_N(ctl_postfork_child) #define ctl_postfork_parent JEMALLOC_N(ctl_postfork_parent) #define ctl_prefork JEMALLOC_N(ctl_prefork) +#define decay_deadline_init JEMALLOC_N(decay_deadline_init) +#define decay_init JEMALLOC_N(decay_init) +#define decay_maybe_advance_epoch JEMALLOC_N(decay_maybe_advance_epoch) +#define decay_ms_valid JEMALLOC_N(decay_ms_valid) +#define decay_npages_purge_in JEMALLOC_N(decay_npages_purge_in) +#define decay_ns_until_purge JEMALLOC_N(decay_ns_until_purge) +#define decay_reinit JEMALLOC_N(decay_reinit) #define div_init JEMALLOC_N(div_init) -#define extent_alloc JEMALLOC_N(extent_alloc) +#define ecache_init JEMALLOC_N(ecache_init) +#define ecache_postfork_child JEMALLOC_N(ecache_postfork_child) +#define ecache_postfork_parent JEMALLOC_N(ecache_postfork_parent) +#define ecache_prefork JEMALLOC_N(ecache_prefork) +#define edata_avail_any JEMALLOC_N(edata_avail_any) +#define edata_avail_empty JEMALLOC_N(edata_avail_empty) +#define edata_avail_first JEMALLOC_N(edata_avail_first) +#define edata_avail_insert JEMALLOC_N(edata_avail_insert) +#define edata_avail_new JEMALLOC_N(edata_avail_new) +#define edata_avail_remove JEMALLOC_N(edata_avail_remove) +#define edata_avail_remove_any JEMALLOC_N(edata_avail_remove_any) +#define edata_avail_remove_first JEMALLOC_N(edata_avail_remove_first) +#define edata_heap_any JEMALLOC_N(edata_heap_any) +#define edata_heap_empty JEMALLOC_N(edata_heap_empty) +#define edata_heap_first JEMALLOC_N(edata_heap_first) +#define edata_heap_insert JEMALLOC_N(edata_heap_insert) +#define edata_heap_new JEMALLOC_N(edata_heap_new) +#define edata_heap_remove JEMALLOC_N(edata_heap_remove) +#define edata_heap_remove_any JEMALLOC_N(edata_heap_remove_any) +#define edata_heap_remove_first JEMALLOC_N(edata_heap_remove_first) +#define edata_cache_fast_disable JEMALLOC_N(edata_cache_fast_disable) +#define edata_cache_fast_get JEMALLOC_N(edata_cache_fast_get) +#define edata_cache_fast_init JEMALLOC_N(edata_cache_fast_init) +#define edata_cache_fast_put JEMALLOC_N(edata_cache_fast_put) +#define edata_cache_get JEMALLOC_N(edata_cache_get) +#define edata_cache_init JEMALLOC_N(edata_cache_init) +#define edata_cache_postfork_child JEMALLOC_N(edata_cache_postfork_child) +#define edata_cache_postfork_parent JEMALLOC_N(edata_cache_postfork_parent) +#define edata_cache_prefork JEMALLOC_N(edata_cache_prefork) +#define edata_cache_put JEMALLOC_N(edata_cache_put) +#define ehooks_default_alloc_impl JEMALLOC_N(ehooks_default_alloc_impl) +#define ehooks_default_commit_impl JEMALLOC_N(ehooks_default_commit_impl) +#define ehooks_default_dalloc_impl JEMALLOC_N(ehooks_default_dalloc_impl) +#define ehooks_default_decommit_impl JEMALLOC_N(ehooks_default_decommit_impl) +#define ehooks_default_destroy_impl JEMALLOC_N(ehooks_default_destroy_impl) +#define ehooks_default_extent_hooks JEMALLOC_N(ehooks_default_extent_hooks) +#define ehooks_default_guard_impl JEMALLOC_N(ehooks_default_guard_impl) +#define ehooks_default_merge JEMALLOC_N(ehooks_default_merge) +#define ehooks_default_merge_impl JEMALLOC_N(ehooks_default_merge_impl) +#define ehooks_default_purge_forced_impl JEMALLOC_N(ehooks_default_purge_forced_impl) +#define ehooks_default_purge_lazy_impl JEMALLOC_N(ehooks_default_purge_lazy_impl) +#define ehooks_default_split_impl JEMALLOC_N(ehooks_default_split_impl) +#define ehooks_default_unguard_impl JEMALLOC_N(ehooks_default_unguard_impl) +#define ehooks_default_zero_impl JEMALLOC_N(ehooks_default_zero_impl) +#define ehooks_init JEMALLOC_N(ehooks_init) +#define emap_deregister_boundary JEMALLOC_N(emap_deregister_boundary) +#define emap_deregister_interior JEMALLOC_N(emap_deregister_interior) +#define emap_do_assert_mapped JEMALLOC_N(emap_do_assert_mapped) +#define emap_do_assert_not_mapped JEMALLOC_N(emap_do_assert_not_mapped) +#define emap_init JEMALLOC_N(emap_init) +#define emap_merge_commit JEMALLOC_N(emap_merge_commit) +#define emap_merge_prepare JEMALLOC_N(emap_merge_prepare) +#define emap_register_boundary JEMALLOC_N(emap_register_boundary) +#define emap_register_interior JEMALLOC_N(emap_register_interior) +#define emap_release_edata JEMALLOC_N(emap_release_edata) +#define emap_remap JEMALLOC_N(emap_remap) +#define emap_split_commit JEMALLOC_N(emap_split_commit) +#define emap_split_prepare JEMALLOC_N(emap_split_prepare) +#define emap_try_acquire_edata_neighbor JEMALLOC_N(emap_try_acquire_edata_neighbor) +#define emap_try_acquire_edata_neighbor_expand JEMALLOC_N(emap_try_acquire_edata_neighbor_expand) +#define emap_update_edata_state JEMALLOC_N(emap_update_edata_state) +#define eset_fit JEMALLOC_N(eset_fit) +#define eset_init JEMALLOC_N(eset_init) +#define eset_insert JEMALLOC_N(eset_insert) +#define eset_nbytes_get JEMALLOC_N(eset_nbytes_get) +#define eset_nextents_get JEMALLOC_N(eset_nextents_get) +#define eset_npages_get JEMALLOC_N(eset_npages_get) +#define eset_remove JEMALLOC_N(eset_remove) +#define exp_grow_init JEMALLOC_N(exp_grow_init) +#define ecache_alloc JEMALLOC_N(ecache_alloc) +#define ecache_alloc_grow JEMALLOC_N(ecache_alloc_grow) +#define ecache_dalloc JEMALLOC_N(ecache_dalloc) +#define ecache_evict JEMALLOC_N(ecache_evict) #define extent_alloc_wrapper JEMALLOC_N(extent_alloc_wrapper) -#define extent_avail_any JEMALLOC_N(extent_avail_any) -#define extent_avail_empty JEMALLOC_N(extent_avail_empty) -#define extent_avail_first JEMALLOC_N(extent_avail_first) -#define extent_avail_insert JEMALLOC_N(extent_avail_insert) -#define extent_avail_new JEMALLOC_N(extent_avail_new) -#define extent_avail_remove JEMALLOC_N(extent_avail_remove) -#define extent_avail_remove_any JEMALLOC_N(extent_avail_remove_any) -#define extent_avail_remove_first JEMALLOC_N(extent_avail_remove_first) #define extent_boot JEMALLOC_N(extent_boot) #define extent_commit_wrapper JEMALLOC_N(extent_commit_wrapper) -#define extent_dalloc JEMALLOC_N(extent_dalloc) +#define extent_commit_zero JEMALLOC_N(extent_commit_zero) #define extent_dalloc_gap JEMALLOC_N(extent_dalloc_gap) #define extent_dalloc_wrapper JEMALLOC_N(extent_dalloc_wrapper) #define extent_decommit_wrapper JEMALLOC_N(extent_decommit_wrapper) #define extent_destroy_wrapper JEMALLOC_N(extent_destroy_wrapper) -#define extent_heap_any JEMALLOC_N(extent_heap_any) -#define extent_heap_empty JEMALLOC_N(extent_heap_empty) -#define extent_heap_first JEMALLOC_N(extent_heap_first) -#define extent_heap_insert JEMALLOC_N(extent_heap_insert) -#define extent_heap_new JEMALLOC_N(extent_heap_new) -#define extent_heap_remove JEMALLOC_N(extent_heap_remove) -#define extent_heap_remove_any JEMALLOC_N(extent_heap_remove_any) -#define extent_heap_remove_first JEMALLOC_N(extent_heap_remove_first) -#define extent_hooks_default JEMALLOC_N(extent_hooks_default) -#define extent_hooks_get JEMALLOC_N(extent_hooks_get) -#define extent_hooks_set JEMALLOC_N(extent_hooks_set) +#define extent_gdump_add JEMALLOC_N(extent_gdump_add) #define extent_merge_wrapper JEMALLOC_N(extent_merge_wrapper) -#define extent_mutex_pool JEMALLOC_N(extent_mutex_pool) #define extent_purge_forced_wrapper JEMALLOC_N(extent_purge_forced_wrapper) #define extent_purge_lazy_wrapper JEMALLOC_N(extent_purge_lazy_wrapper) +#define extent_record JEMALLOC_N(extent_record) +#define extent_sn_next JEMALLOC_N(extent_sn_next) #define extent_split_wrapper JEMALLOC_N(extent_split_wrapper) -#define extent_util_stats_get JEMALLOC_N(extent_util_stats_get) -#define extent_util_stats_verbose_get JEMALLOC_N(extent_util_stats_verbose_get) -#define extents_alloc JEMALLOC_N(extents_alloc) -#define extents_dalloc JEMALLOC_N(extents_dalloc) -#define extents_evict JEMALLOC_N(extents_evict) -#define extents_init JEMALLOC_N(extents_init) -#define extents_nbytes_get JEMALLOC_N(extents_nbytes_get) -#define extents_nextents_get JEMALLOC_N(extents_nextents_get) -#define extents_npages_get JEMALLOC_N(extents_npages_get) -#define extents_postfork_child JEMALLOC_N(extents_postfork_child) -#define extents_postfork_parent JEMALLOC_N(extents_postfork_parent) -#define extents_prefork JEMALLOC_N(extents_prefork) -#define extents_rtree JEMALLOC_N(extents_rtree) -#define extents_state_get JEMALLOC_N(extents_state_get) #define opt_lg_extent_max_active_fit JEMALLOC_N(opt_lg_extent_max_active_fit) #define dss_prec_names JEMALLOC_N(dss_prec_names) #define extent_alloc_dss JEMALLOC_N(extent_alloc_dss) @@ -216,24 +301,66 @@ #define extent_alloc_mmap JEMALLOC_N(extent_alloc_mmap) #define extent_dalloc_mmap JEMALLOC_N(extent_dalloc_mmap) #define opt_retain JEMALLOC_N(opt_retain) +#define fxp_parse JEMALLOC_N(fxp_parse) +#define fxp_print JEMALLOC_N(fxp_print) +#define opt_lg_san_uaf_align JEMALLOC_N(opt_lg_san_uaf_align) +#define opt_san_guard_large JEMALLOC_N(opt_san_guard_large) +#define opt_san_guard_small JEMALLOC_N(opt_san_guard_small) +#define san_cache_bin_nonfast_mask JEMALLOC_N(san_cache_bin_nonfast_mask) +#define san_check_stashed_ptrs JEMALLOC_N(san_check_stashed_ptrs) +#define san_guard_pages JEMALLOC_N(san_guard_pages) +#define san_init JEMALLOC_N(san_init) +#define san_unguard_pages JEMALLOC_N(san_unguard_pages) +#define san_unguard_pages_pre_destroy JEMALLOC_N(san_unguard_pages_pre_destroy) +#define tsd_san_init JEMALLOC_N(tsd_san_init) +#define san_bump_alloc JEMALLOC_N(san_bump_alloc) #define hook_boot JEMALLOC_N(hook_boot) #define hook_install JEMALLOC_N(hook_install) #define hook_invoke_alloc JEMALLOC_N(hook_invoke_alloc) #define hook_invoke_dalloc JEMALLOC_N(hook_invoke_dalloc) #define hook_invoke_expand JEMALLOC_N(hook_invoke_expand) #define hook_remove JEMALLOC_N(hook_remove) +#define hpa_central_extract JEMALLOC_N(hpa_central_extract) +#define hpa_central_init JEMALLOC_N(hpa_central_init) +#define hpa_shard_destroy JEMALLOC_N(hpa_shard_destroy) +#define hpa_shard_disable JEMALLOC_N(hpa_shard_disable) +#define hpa_shard_do_deferred_work JEMALLOC_N(hpa_shard_do_deferred_work) +#define hpa_shard_init JEMALLOC_N(hpa_shard_init) +#define hpa_shard_postfork_child JEMALLOC_N(hpa_shard_postfork_child) +#define hpa_shard_postfork_parent JEMALLOC_N(hpa_shard_postfork_parent) +#define hpa_shard_prefork3 JEMALLOC_N(hpa_shard_prefork3) +#define hpa_shard_prefork4 JEMALLOC_N(hpa_shard_prefork4) +#define hpa_shard_set_deferral_allowed JEMALLOC_N(hpa_shard_set_deferral_allowed) +#define hpa_shard_stats_accum JEMALLOC_N(hpa_shard_stats_accum) +#define hpa_shard_stats_merge JEMALLOC_N(hpa_shard_stats_merge) +#define hpa_supported JEMALLOC_N(hpa_supported) +#define hpa_hooks_default JEMALLOC_N(hpa_hooks_default) +#define hpdata_age_heap_any JEMALLOC_N(hpdata_age_heap_any) +#define hpdata_age_heap_empty JEMALLOC_N(hpdata_age_heap_empty) +#define hpdata_age_heap_first JEMALLOC_N(hpdata_age_heap_first) +#define hpdata_age_heap_insert JEMALLOC_N(hpdata_age_heap_insert) +#define hpdata_age_heap_new JEMALLOC_N(hpdata_age_heap_new) +#define hpdata_age_heap_remove JEMALLOC_N(hpdata_age_heap_remove) +#define hpdata_age_heap_remove_any JEMALLOC_N(hpdata_age_heap_remove_any) +#define hpdata_age_heap_remove_first JEMALLOC_N(hpdata_age_heap_remove_first) +#define hpdata_dehugify JEMALLOC_N(hpdata_dehugify) +#define hpdata_hugify JEMALLOC_N(hpdata_hugify) +#define hpdata_init JEMALLOC_N(hpdata_init) +#define hpdata_purge_begin JEMALLOC_N(hpdata_purge_begin) +#define hpdata_purge_end JEMALLOC_N(hpdata_purge_end) +#define hpdata_purge_next JEMALLOC_N(hpdata_purge_next) +#define hpdata_reserve_alloc JEMALLOC_N(hpdata_reserve_alloc) +#define hpdata_unreserve JEMALLOC_N(hpdata_unreserve) +#define inspect_extent_util_stats_get JEMALLOC_N(inspect_extent_util_stats_get) +#define inspect_extent_util_stats_verbose_get JEMALLOC_N(inspect_extent_util_stats_verbose_get) #define large_dalloc JEMALLOC_N(large_dalloc) #define large_dalloc_finish JEMALLOC_N(large_dalloc_finish) -#define large_dalloc_junk JEMALLOC_N(large_dalloc_junk) -#define large_dalloc_maybe_junk JEMALLOC_N(large_dalloc_maybe_junk) -#define large_dalloc_prep_junked_locked JEMALLOC_N(large_dalloc_prep_junked_locked) +#define large_dalloc_prep_locked JEMALLOC_N(large_dalloc_prep_locked) #define large_malloc JEMALLOC_N(large_malloc) #define large_palloc JEMALLOC_N(large_palloc) -#define large_prof_alloc_time_get JEMALLOC_N(large_prof_alloc_time_get) -#define large_prof_alloc_time_set JEMALLOC_N(large_prof_alloc_time_set) -#define large_prof_tctx_get JEMALLOC_N(large_prof_tctx_get) +#define large_prof_info_get JEMALLOC_N(large_prof_info_get) +#define large_prof_info_set JEMALLOC_N(large_prof_info_set) #define large_prof_tctx_reset JEMALLOC_N(large_prof_tctx_reset) -#define large_prof_tctx_set JEMALLOC_N(large_prof_tctx_set) #define large_ralloc JEMALLOC_N(large_ralloc) #define large_ralloc_no_move JEMALLOC_N(large_ralloc_no_move) #define large_salloc JEMALLOC_N(large_salloc) @@ -248,6 +375,7 @@ #define malloc_vcprintf JEMALLOC_N(malloc_vcprintf) #define malloc_vsnprintf JEMALLOC_N(malloc_vsnprintf) #define malloc_write JEMALLOC_N(malloc_write) +#define wrtmessage JEMALLOC_N(wrtmessage) #define malloc_mutex_boot JEMALLOC_N(malloc_mutex_boot) #define malloc_mutex_init JEMALLOC_N(malloc_mutex_init) #define malloc_mutex_lock_slow JEMALLOC_N(malloc_mutex_lock_slow) @@ -255,7 +383,7 @@ #define malloc_mutex_postfork_parent JEMALLOC_N(malloc_mutex_postfork_parent) #define malloc_mutex_prefork JEMALLOC_N(malloc_mutex_prefork) #define malloc_mutex_prof_data_reset JEMALLOC_N(malloc_mutex_prof_data_reset) -#define mutex_pool_init JEMALLOC_N(mutex_pool_init) +#define opt_mutex_max_spin JEMALLOC_N(opt_mutex_max_spin) #define nstime_add JEMALLOC_N(nstime_add) #define nstime_compare JEMALLOC_N(nstime_compare) #define nstime_copy JEMALLOC_N(nstime_copy) @@ -265,14 +393,56 @@ #define nstime_imultiply JEMALLOC_N(nstime_imultiply) #define nstime_init JEMALLOC_N(nstime_init) #define nstime_init2 JEMALLOC_N(nstime_init2) +#define nstime_init_update JEMALLOC_N(nstime_init_update) #define nstime_isubtract JEMALLOC_N(nstime_isubtract) #define nstime_monotonic JEMALLOC_N(nstime_monotonic) #define nstime_msec JEMALLOC_N(nstime_msec) #define nstime_ns JEMALLOC_N(nstime_ns) +#define nstime_ns_since JEMALLOC_N(nstime_ns_since) #define nstime_nsec JEMALLOC_N(nstime_nsec) +#define nstime_prof_init_update JEMALLOC_N(nstime_prof_init_update) +#define nstime_prof_update JEMALLOC_N(nstime_prof_update) #define nstime_sec JEMALLOC_N(nstime_sec) #define nstime_subtract JEMALLOC_N(nstime_subtract) #define nstime_update JEMALLOC_N(nstime_update) +#define opt_prof_time_res JEMALLOC_N(opt_prof_time_res) +#define prof_time_res_mode_names JEMALLOC_N(prof_time_res_mode_names) +#define pa_alloc JEMALLOC_N(pa_alloc) +#define pa_central_init JEMALLOC_N(pa_central_init) +#define pa_dalloc JEMALLOC_N(pa_dalloc) +#define pa_decay_ms_get JEMALLOC_N(pa_decay_ms_get) +#define pa_decay_ms_set JEMALLOC_N(pa_decay_ms_set) +#define pa_expand JEMALLOC_N(pa_expand) +#define pa_shard_destroy JEMALLOC_N(pa_shard_destroy) +#define pa_shard_disable_hpa JEMALLOC_N(pa_shard_disable_hpa) +#define pa_shard_do_deferred_work JEMALLOC_N(pa_shard_do_deferred_work) +#define pa_shard_enable_hpa JEMALLOC_N(pa_shard_enable_hpa) +#define pa_shard_init JEMALLOC_N(pa_shard_init) +#define pa_shard_reset JEMALLOC_N(pa_shard_reset) +#define pa_shard_retain_grow_limit_get_set JEMALLOC_N(pa_shard_retain_grow_limit_get_set) +#define pa_shard_set_deferral_allowed JEMALLOC_N(pa_shard_set_deferral_allowed) +#define pa_shard_time_until_deferred_work JEMALLOC_N(pa_shard_time_until_deferred_work) +#define pa_shrink JEMALLOC_N(pa_shrink) +#define pa_shard_basic_stats_merge JEMALLOC_N(pa_shard_basic_stats_merge) +#define pa_shard_mtx_stats_read JEMALLOC_N(pa_shard_mtx_stats_read) +#define pa_shard_postfork_child JEMALLOC_N(pa_shard_postfork_child) +#define pa_shard_postfork_parent JEMALLOC_N(pa_shard_postfork_parent) +#define pa_shard_prefork0 JEMALLOC_N(pa_shard_prefork0) +#define pa_shard_prefork2 JEMALLOC_N(pa_shard_prefork2) +#define pa_shard_prefork3 JEMALLOC_N(pa_shard_prefork3) +#define pa_shard_prefork4 JEMALLOC_N(pa_shard_prefork4) +#define pa_shard_prefork5 JEMALLOC_N(pa_shard_prefork5) +#define pa_shard_stats_merge JEMALLOC_N(pa_shard_stats_merge) +#define pai_alloc_batch_default JEMALLOC_N(pai_alloc_batch_default) +#define pai_dalloc_batch_default JEMALLOC_N(pai_dalloc_batch_default) +#define pac_decay_all JEMALLOC_N(pac_decay_all) +#define pac_decay_ms_get JEMALLOC_N(pac_decay_ms_get) +#define pac_decay_ms_set JEMALLOC_N(pac_decay_ms_set) +#define pac_destroy JEMALLOC_N(pac_destroy) +#define pac_init JEMALLOC_N(pac_init) +#define pac_maybe_decay_purge JEMALLOC_N(pac_maybe_decay_purge) +#define pac_reset JEMALLOC_N(pac_reset) +#define pac_retain_grow_limit_get_set JEMALLOC_N(pac_retain_grow_limit_get_set) #define init_system_thp_mode JEMALLOC_N(init_system_thp_mode) #define opt_thp JEMALLOC_N(opt_thp) #define pages_boot JEMALLOC_N(pages_boot) @@ -282,14 +452,23 @@ #define pages_dontdump JEMALLOC_N(pages_dontdump) #define pages_huge JEMALLOC_N(pages_huge) #define pages_map JEMALLOC_N(pages_map) +#define pages_mark_guards JEMALLOC_N(pages_mark_guards) #define pages_nohuge JEMALLOC_N(pages_nohuge) #define pages_purge_forced JEMALLOC_N(pages_purge_forced) #define pages_purge_lazy JEMALLOC_N(pages_purge_lazy) #define pages_set_thp_state JEMALLOC_N(pages_set_thp_state) #define pages_unmap JEMALLOC_N(pages_unmap) +#define pages_unmark_guards JEMALLOC_N(pages_unmark_guards) #define thp_mode_names JEMALLOC_N(thp_mode_names) -#define bt2gctx_mtx JEMALLOC_N(bt2gctx_mtx) -#define bt_init JEMALLOC_N(bt_init) +#define peak_alloc_event_handler JEMALLOC_N(peak_alloc_event_handler) +#define peak_alloc_new_event_wait JEMALLOC_N(peak_alloc_new_event_wait) +#define peak_alloc_postponed_event_wait JEMALLOC_N(peak_alloc_postponed_event_wait) +#define peak_dalloc_event_handler JEMALLOC_N(peak_dalloc_event_handler) +#define peak_dalloc_new_event_wait JEMALLOC_N(peak_dalloc_new_event_wait) +#define peak_dalloc_postponed_event_wait JEMALLOC_N(peak_dalloc_postponed_event_wait) +#define peak_event_max JEMALLOC_N(peak_event_max) +#define peak_event_update JEMALLOC_N(peak_event_update) +#define peak_event_zero JEMALLOC_N(peak_event_zero) #define lg_prof_sample JEMALLOC_N(lg_prof_sample) #define opt_lg_prof_interval JEMALLOC_N(opt_lg_prof_interval) #define opt_lg_prof_sample JEMALLOC_N(opt_lg_prof_sample) @@ -299,20 +478,25 @@ #define opt_prof_final JEMALLOC_N(opt_prof_final) #define opt_prof_gdump JEMALLOC_N(opt_prof_gdump) #define opt_prof_leak JEMALLOC_N(opt_prof_leak) -#define opt_prof_log JEMALLOC_N(opt_prof_log) +#define opt_prof_leak_error JEMALLOC_N(opt_prof_leak_error) #define opt_prof_prefix JEMALLOC_N(opt_prof_prefix) +#define opt_prof_sys_thread_name JEMALLOC_N(opt_prof_sys_thread_name) #define opt_prof_thread_active_init JEMALLOC_N(opt_prof_thread_active_init) -#define prof_accum_init JEMALLOC_N(prof_accum_init) -#define prof_active JEMALLOC_N(prof_active) +#define opt_prof_unbias JEMALLOC_N(opt_prof_unbias) #define prof_active_get JEMALLOC_N(prof_active_get) #define prof_active_set JEMALLOC_N(prof_active_set) +#define prof_active_state JEMALLOC_N(prof_active_state) #define prof_alloc_rollback JEMALLOC_N(prof_alloc_rollback) -#define prof_backtrace JEMALLOC_N(prof_backtrace) +#define prof_backtrace_hook JEMALLOC_N(prof_backtrace_hook) +#define prof_backtrace_hook_get JEMALLOC_N(prof_backtrace_hook_get) +#define prof_backtrace_hook_set JEMALLOC_N(prof_backtrace_hook_set) #define prof_boot0 JEMALLOC_N(prof_boot0) #define prof_boot1 JEMALLOC_N(prof_boot1) #define prof_boot2 JEMALLOC_N(prof_boot2) -#define prof_dump_header JEMALLOC_N(prof_dump_header) -#define prof_dump_open JEMALLOC_N(prof_dump_open) +#define prof_booted JEMALLOC_N(prof_booted) +#define prof_dump_hook JEMALLOC_N(prof_dump_hook) +#define prof_dump_hook_get JEMALLOC_N(prof_dump_hook_get) +#define prof_dump_hook_set JEMALLOC_N(prof_dump_hook_set) #define prof_free_sampled_object JEMALLOC_N(prof_free_sampled_object) #define prof_gdump JEMALLOC_N(prof_gdump) #define prof_gdump_get JEMALLOC_N(prof_gdump_get) @@ -320,18 +504,16 @@ #define prof_gdump_val JEMALLOC_N(prof_gdump_val) #define prof_idump JEMALLOC_N(prof_idump) #define prof_interval JEMALLOC_N(prof_interval) -#define prof_log_start JEMALLOC_N(prof_log_start) -#define prof_log_stop JEMALLOC_N(prof_log_stop) -#define prof_logging_state JEMALLOC_N(prof_logging_state) -#define prof_lookup JEMALLOC_N(prof_lookup) #define prof_malloc_sample_object JEMALLOC_N(prof_malloc_sample_object) #define prof_mdump JEMALLOC_N(prof_mdump) #define prof_postfork_child JEMALLOC_N(prof_postfork_child) #define prof_postfork_parent JEMALLOC_N(prof_postfork_parent) #define prof_prefork0 JEMALLOC_N(prof_prefork0) #define prof_prefork1 JEMALLOC_N(prof_prefork1) -#define prof_reset JEMALLOC_N(prof_reset) -#define prof_sample_threshold_update JEMALLOC_N(prof_sample_threshold_update) +#define prof_sample_event_handler JEMALLOC_N(prof_sample_event_handler) +#define prof_sample_new_event_wait JEMALLOC_N(prof_sample_new_event_wait) +#define prof_sample_postponed_event_wait JEMALLOC_N(prof_sample_postponed_event_wait) +#define prof_tctx_create JEMALLOC_N(prof_tctx_create) #define prof_tdata_cleanup JEMALLOC_N(prof_tdata_cleanup) #define prof_tdata_init JEMALLOC_N(prof_tdata_init) #define prof_tdata_reinit JEMALLOC_N(prof_tdata_reinit) @@ -341,42 +523,157 @@ #define prof_thread_active_set JEMALLOC_N(prof_thread_active_set) #define prof_thread_name_get JEMALLOC_N(prof_thread_name_get) #define prof_thread_name_set JEMALLOC_N(prof_thread_name_set) +#define bt2gctx_mtx JEMALLOC_N(bt2gctx_mtx) +#define gctx_locks JEMALLOC_N(gctx_locks) +#define prof_bt_count JEMALLOC_N(prof_bt_count) +#define prof_bt_hash JEMALLOC_N(prof_bt_hash) +#define prof_bt_keycomp JEMALLOC_N(prof_bt_keycomp) +#define prof_cnt_all JEMALLOC_N(prof_cnt_all) +#define prof_data_init JEMALLOC_N(prof_data_init) +#define prof_dump_impl JEMALLOC_N(prof_dump_impl) +#define prof_dump_mtx JEMALLOC_N(prof_dump_mtx) +#define prof_lookup JEMALLOC_N(prof_lookup) +#define prof_reset JEMALLOC_N(prof_reset) +#define prof_shifted_unbiased_cnt JEMALLOC_N(prof_shifted_unbiased_cnt) +#define prof_tctx_try_destroy JEMALLOC_N(prof_tctx_try_destroy) +#define prof_tdata_count JEMALLOC_N(prof_tdata_count) +#define prof_tdata_detach JEMALLOC_N(prof_tdata_detach) +#define prof_tdata_init_impl JEMALLOC_N(prof_tdata_init_impl) +#define prof_thread_name_alloc JEMALLOC_N(prof_thread_name_alloc) +#define prof_thread_name_set_impl JEMALLOC_N(prof_thread_name_set_impl) +#define prof_unbias_map_init JEMALLOC_N(prof_unbias_map_init) +#define prof_unbiased_sz JEMALLOC_N(prof_unbiased_sz) +#define tdata_locks JEMALLOC_N(tdata_locks) +#define tdatas_mtx JEMALLOC_N(tdatas_mtx) +#define log_mtx JEMALLOC_N(log_mtx) +#define opt_prof_log JEMALLOC_N(opt_prof_log) +#define prof_log_alloc_count JEMALLOC_N(prof_log_alloc_count) +#define prof_log_bt_count JEMALLOC_N(prof_log_bt_count) +#define prof_log_dummy_set JEMALLOC_N(prof_log_dummy_set) +#define prof_log_init JEMALLOC_N(prof_log_init) +#define prof_log_is_logging JEMALLOC_N(prof_log_is_logging) +#define prof_log_rep_check JEMALLOC_N(prof_log_rep_check) +#define prof_log_start JEMALLOC_N(prof_log_start) +#define prof_log_stop JEMALLOC_N(prof_log_stop) +#define prof_log_thr_count JEMALLOC_N(prof_log_thr_count) +#define prof_logging_state JEMALLOC_N(prof_logging_state) +#define prof_try_log JEMALLOC_N(prof_try_log) +#define edata_prof_recent_alloc_get_no_lock_test JEMALLOC_N(edata_prof_recent_alloc_get_no_lock_test) +#define edata_prof_recent_alloc_init JEMALLOC_N(edata_prof_recent_alloc_init) +#define opt_prof_recent_alloc_max JEMALLOC_N(opt_prof_recent_alloc_max) +#define prof_recent_alloc JEMALLOC_N(prof_recent_alloc) +#define prof_recent_alloc_dump JEMALLOC_N(prof_recent_alloc_dump) +#define prof_recent_alloc_edata_get_no_lock_test JEMALLOC_N(prof_recent_alloc_edata_get_no_lock_test) +#define prof_recent_alloc_list JEMALLOC_N(prof_recent_alloc_list) +#define prof_recent_alloc_max_ctl_read JEMALLOC_N(prof_recent_alloc_max_ctl_read) +#define prof_recent_alloc_max_ctl_write JEMALLOC_N(prof_recent_alloc_max_ctl_write) +#define prof_recent_alloc_mtx JEMALLOC_N(prof_recent_alloc_mtx) +#define prof_recent_alloc_prepare JEMALLOC_N(prof_recent_alloc_prepare) +#define prof_recent_alloc_reset JEMALLOC_N(prof_recent_alloc_reset) +#define prof_recent_dump_mtx JEMALLOC_N(prof_recent_dump_mtx) +#define prof_recent_init JEMALLOC_N(prof_recent_init) +#define opt_prof_stats JEMALLOC_N(opt_prof_stats) +#define prof_stats_dec JEMALLOC_N(prof_stats_dec) +#define prof_stats_get_accum JEMALLOC_N(prof_stats_get_accum) +#define prof_stats_get_live JEMALLOC_N(prof_stats_get_live) +#define prof_stats_inc JEMALLOC_N(prof_stats_inc) +#define prof_stats_mtx JEMALLOC_N(prof_stats_mtx) +#define bt_init JEMALLOC_N(bt_init) +#define prof_backtrace JEMALLOC_N(prof_backtrace) +#define prof_base JEMALLOC_N(prof_base) +#define prof_do_mock JEMALLOC_N(prof_do_mock) +#define prof_dump_filename_mtx JEMALLOC_N(prof_dump_filename_mtx) +#define prof_dump_open_file JEMALLOC_N(prof_dump_open_file) +#define prof_dump_open_maps JEMALLOC_N(prof_dump_open_maps) +#define prof_dump_write_file JEMALLOC_N(prof_dump_write_file) +#define prof_fdump_impl JEMALLOC_N(prof_fdump_impl) +#define prof_gdump_impl JEMALLOC_N(prof_gdump_impl) +#define prof_get_default_filename JEMALLOC_N(prof_get_default_filename) +#define prof_getpid JEMALLOC_N(prof_getpid) +#define prof_hooks_init JEMALLOC_N(prof_hooks_init) +#define prof_idump_impl JEMALLOC_N(prof_idump_impl) +#define prof_mdump_impl JEMALLOC_N(prof_mdump_impl) +#define prof_prefix_set JEMALLOC_N(prof_prefix_set) +#define prof_sys_thread_name_fetch JEMALLOC_N(prof_sys_thread_name_fetch) +#define prof_sys_thread_name_read JEMALLOC_N(prof_sys_thread_name_read) +#define prof_unwind_init JEMALLOC_N(prof_unwind_init) +#define psset_init JEMALLOC_N(psset_init) +#define psset_insert JEMALLOC_N(psset_insert) +#define psset_pick_alloc JEMALLOC_N(psset_pick_alloc) +#define psset_pick_hugify JEMALLOC_N(psset_pick_hugify) +#define psset_pick_purge JEMALLOC_N(psset_pick_purge) +#define psset_remove JEMALLOC_N(psset_remove) +#define psset_stats_accum JEMALLOC_N(psset_stats_accum) +#define psset_update_begin JEMALLOC_N(psset_update_begin) +#define psset_update_end JEMALLOC_N(psset_update_end) #define rtree_ctx_data_init JEMALLOC_N(rtree_ctx_data_init) -#define rtree_leaf_alloc JEMALLOC_N(rtree_leaf_alloc) -#define rtree_leaf_dalloc JEMALLOC_N(rtree_leaf_dalloc) #define rtree_leaf_elm_lookup_hard JEMALLOC_N(rtree_leaf_elm_lookup_hard) #define rtree_new JEMALLOC_N(rtree_new) -#define rtree_node_alloc JEMALLOC_N(rtree_node_alloc) -#define rtree_node_dalloc JEMALLOC_N(rtree_node_dalloc) #define safety_check_fail JEMALLOC_N(safety_check_fail) +#define safety_check_fail_sized_dealloc JEMALLOC_N(safety_check_fail_sized_dealloc) #define safety_check_set_abort JEMALLOC_N(safety_check_set_abort) +#define reg_size_compute JEMALLOC_N(reg_size_compute) +#define sc_boot JEMALLOC_N(sc_boot) +#define sc_data_init JEMALLOC_N(sc_data_init) +#define sc_data_update_slab_size JEMALLOC_N(sc_data_update_slab_size) +#define sec_disable JEMALLOC_N(sec_disable) +#define sec_flush JEMALLOC_N(sec_flush) +#define sec_init JEMALLOC_N(sec_init) +#define sec_mutex_stats_read JEMALLOC_N(sec_mutex_stats_read) +#define sec_postfork_child JEMALLOC_N(sec_postfork_child) +#define sec_postfork_parent JEMALLOC_N(sec_postfork_parent) +#define sec_prefork2 JEMALLOC_N(sec_prefork2) +#define sec_stats_merge JEMALLOC_N(sec_stats_merge) #define arena_mutex_names JEMALLOC_N(arena_mutex_names) #define global_mutex_names JEMALLOC_N(global_mutex_names) +#define opt_stats_interval JEMALLOC_N(opt_stats_interval) +#define opt_stats_interval_opts JEMALLOC_N(opt_stats_interval_opts) #define opt_stats_print JEMALLOC_N(opt_stats_print) #define opt_stats_print_opts JEMALLOC_N(opt_stats_print_opts) +#define stats_boot JEMALLOC_N(stats_boot) +#define stats_interval_event_handler JEMALLOC_N(stats_interval_event_handler) +#define stats_interval_new_event_wait JEMALLOC_N(stats_interval_new_event_wait) +#define stats_interval_postponed_event_wait JEMALLOC_N(stats_interval_postponed_event_wait) +#define stats_postfork_child JEMALLOC_N(stats_postfork_child) +#define stats_postfork_parent JEMALLOC_N(stats_postfork_parent) +#define stats_prefork JEMALLOC_N(stats_prefork) #define stats_print JEMALLOC_N(stats_print) -#define sc_boot JEMALLOC_N(sc_boot) -#define sc_data_global JEMALLOC_N(sc_data_global) -#define sc_data_init JEMALLOC_N(sc_data_init) -#define sc_data_update_slab_size JEMALLOC_N(sc_data_update_slab_size) #define sz_boot JEMALLOC_N(sz_boot) #define sz_index2size_tab JEMALLOC_N(sz_index2size_tab) +#define sz_large_pad JEMALLOC_N(sz_large_pad) #define sz_pind2sz_tab JEMALLOC_N(sz_pind2sz_tab) +#define sz_psz_quantize_ceil JEMALLOC_N(sz_psz_quantize_ceil) +#define sz_psz_quantize_floor JEMALLOC_N(sz_psz_quantize_floor) #define sz_size2index_tab JEMALLOC_N(sz_size2index_tab) #define nhbins JEMALLOC_N(nhbins) -#define opt_lg_tcache_max JEMALLOC_N(opt_lg_tcache_max) +#define opt_lg_tcache_flush_large_div JEMALLOC_N(opt_lg_tcache_flush_large_div) +#define opt_lg_tcache_flush_small_div JEMALLOC_N(opt_lg_tcache_flush_small_div) +#define opt_lg_tcache_nslots_mul JEMALLOC_N(opt_lg_tcache_nslots_mul) #define opt_tcache JEMALLOC_N(opt_tcache) +#define opt_tcache_gc_delay_bytes JEMALLOC_N(opt_tcache_gc_delay_bytes) +#define opt_tcache_gc_incr_bytes JEMALLOC_N(opt_tcache_gc_incr_bytes) +#define opt_tcache_max JEMALLOC_N(opt_tcache_max) +#define opt_tcache_nslots_large JEMALLOC_N(opt_tcache_nslots_large) +#define opt_tcache_nslots_small_max JEMALLOC_N(opt_tcache_nslots_small_max) +#define opt_tcache_nslots_small_min JEMALLOC_N(opt_tcache_nslots_small_min) #define tcache_alloc_small_hard JEMALLOC_N(tcache_alloc_small_hard) #define tcache_arena_associate JEMALLOC_N(tcache_arena_associate) #define tcache_arena_reassociate JEMALLOC_N(tcache_arena_reassociate) +#define tcache_assert_initialized JEMALLOC_N(tcache_assert_initialized) #define tcache_bin_flush_large JEMALLOC_N(tcache_bin_flush_large) #define tcache_bin_flush_small JEMALLOC_N(tcache_bin_flush_small) +#define tcache_bin_flush_stashed JEMALLOC_N(tcache_bin_flush_stashed) #define tcache_bin_info JEMALLOC_N(tcache_bin_info) #define tcache_boot JEMALLOC_N(tcache_boot) #define tcache_cleanup JEMALLOC_N(tcache_cleanup) #define tcache_create_explicit JEMALLOC_N(tcache_create_explicit) -#define tcache_event_hard JEMALLOC_N(tcache_event_hard) #define tcache_flush JEMALLOC_N(tcache_flush) +#define tcache_gc_dalloc_event_handler JEMALLOC_N(tcache_gc_dalloc_event_handler) +#define tcache_gc_dalloc_new_event_wait JEMALLOC_N(tcache_gc_dalloc_new_event_wait) +#define tcache_gc_dalloc_postponed_event_wait JEMALLOC_N(tcache_gc_dalloc_postponed_event_wait) +#define tcache_gc_event_handler JEMALLOC_N(tcache_gc_event_handler) +#define tcache_gc_new_event_wait JEMALLOC_N(tcache_gc_new_event_wait) +#define tcache_gc_postponed_event_wait JEMALLOC_N(tcache_gc_postponed_event_wait) #define tcache_maxclass JEMALLOC_N(tcache_maxclass) #define tcache_postfork_child JEMALLOC_N(tcache_postfork_child) #define tcache_postfork_parent JEMALLOC_N(tcache_postfork_parent) @@ -391,9 +688,13 @@ #define tsd_tcache_enabled_data_init JEMALLOC_N(tsd_tcache_enabled_data_init) #define test_hooks_arena_new_hook JEMALLOC_N(test_hooks_arena_new_hook) #define test_hooks_libc_hook JEMALLOC_N(test_hooks_libc_hook) +#define te_assert_invariants_debug JEMALLOC_N(te_assert_invariants_debug) +#define te_event_trigger JEMALLOC_N(te_event_trigger) +#define te_recompute_fast_threshold JEMALLOC_N(te_recompute_fast_threshold) +#define tsd_te_init JEMALLOC_N(tsd_te_init) +#define ticker_geom_table JEMALLOC_N(ticker_geom_table) #define malloc_tsd_boot0 JEMALLOC_N(malloc_tsd_boot0) #define malloc_tsd_boot1 JEMALLOC_N(malloc_tsd_boot1) -#define malloc_tsd_cleanup_register JEMALLOC_N(malloc_tsd_cleanup_register) #define malloc_tsd_dalloc JEMALLOC_N(malloc_tsd_dalloc) #define malloc_tsd_malloc JEMALLOC_N(malloc_tsd_malloc) #define tsd_booted JEMALLOC_N(tsd_booted) diff --git a/contrib/jemalloc/include/jemalloc/internal/prng.h b/contrib/jemalloc/include/jemalloc/internal/prng.h index 15cc2d18fa4dee..14542aa12d4651 100644 --- a/contrib/jemalloc/include/jemalloc/internal/prng.h +++ b/contrib/jemalloc/include/jemalloc/internal/prng.h @@ -1,7 +1,6 @@ #ifndef JEMALLOC_INTERNAL_PRNG_H #define JEMALLOC_INTERNAL_PRNG_H -#include "jemalloc/internal/atomic.h" #include "jemalloc/internal/bit_util.h" /* @@ -59,66 +58,38 @@ prng_state_next_zu(size_t state) { /* * The prng_lg_range functions give a uniform int in the half-open range [0, - * 2**lg_range). If atomic is true, they do so safely from multiple threads. - * Multithreaded 64-bit prngs aren't supported. + * 2**lg_range). */ JEMALLOC_ALWAYS_INLINE uint32_t -prng_lg_range_u32(atomic_u32_t *state, unsigned lg_range, bool atomic) { - uint32_t ret, state0, state1; - +prng_lg_range_u32(uint32_t *state, unsigned lg_range) { assert(lg_range > 0); assert(lg_range <= 32); - state0 = atomic_load_u32(state, ATOMIC_RELAXED); - - if (atomic) { - do { - state1 = prng_state_next_u32(state0); - } while (!atomic_compare_exchange_weak_u32(state, &state0, - state1, ATOMIC_RELAXED, ATOMIC_RELAXED)); - } else { - state1 = prng_state_next_u32(state0); - atomic_store_u32(state, state1, ATOMIC_RELAXED); - } - ret = state1 >> (32 - lg_range); + *state = prng_state_next_u32(*state); + uint32_t ret = *state >> (32 - lg_range); return ret; } JEMALLOC_ALWAYS_INLINE uint64_t prng_lg_range_u64(uint64_t *state, unsigned lg_range) { - uint64_t ret, state1; - assert(lg_range > 0); assert(lg_range <= 64); - state1 = prng_state_next_u64(*state); - *state = state1; - ret = state1 >> (64 - lg_range); + *state = prng_state_next_u64(*state); + uint64_t ret = *state >> (64 - lg_range); return ret; } JEMALLOC_ALWAYS_INLINE size_t -prng_lg_range_zu(atomic_zu_t *state, unsigned lg_range, bool atomic) { - size_t ret, state0, state1; - +prng_lg_range_zu(size_t *state, unsigned lg_range) { assert(lg_range > 0); assert(lg_range <= ZU(1) << (3 + LG_SIZEOF_PTR)); - state0 = atomic_load_zu(state, ATOMIC_RELAXED); - - if (atomic) { - do { - state1 = prng_state_next_zu(state0); - } while (atomic_compare_exchange_weak_zu(state, &state0, - state1, ATOMIC_RELAXED, ATOMIC_RELAXED)); - } else { - state1 = prng_state_next_zu(state0); - atomic_store_zu(state, state1, ATOMIC_RELAXED); - } - ret = state1 >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - lg_range); + *state = prng_state_next_zu(*state); + size_t ret = *state >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - lg_range); return ret; } @@ -129,18 +100,24 @@ prng_lg_range_zu(atomic_zu_t *state, unsigned lg_range, bool atomic) { */ JEMALLOC_ALWAYS_INLINE uint32_t -prng_range_u32(atomic_u32_t *state, uint32_t range, bool atomic) { - uint32_t ret; - unsigned lg_range; - - assert(range > 1); +prng_range_u32(uint32_t *state, uint32_t range) { + assert(range != 0); + /* + * If range were 1, lg_range would be 0, so the shift in + * prng_lg_range_u32 would be a shift of a 32-bit variable by 32 bits, + * which is UB. Just handle this case as a one-off. + */ + if (range == 1) { + return 0; + } /* Compute the ceiling of lg(range). */ - lg_range = ffs_u32(pow2_ceil_u32(range)) - 1; + unsigned lg_range = ffs_u32(pow2_ceil_u32(range)); /* Generate a result in [0..range) via repeated trial. */ + uint32_t ret; do { - ret = prng_lg_range_u32(state, lg_range, atomic); + ret = prng_lg_range_u32(state, lg_range); } while (ret >= range); return ret; @@ -148,15 +125,18 @@ prng_range_u32(atomic_u32_t *state, uint32_t range, bool atomic) { JEMALLOC_ALWAYS_INLINE uint64_t prng_range_u64(uint64_t *state, uint64_t range) { - uint64_t ret; - unsigned lg_range; + assert(range != 0); - assert(range > 1); + /* See the note in prng_range_u32. */ + if (range == 1) { + return 0; + } /* Compute the ceiling of lg(range). */ - lg_range = ffs_u64(pow2_ceil_u64(range)) - 1; + unsigned lg_range = ffs_u64(pow2_ceil_u64(range)); /* Generate a result in [0..range) via repeated trial. */ + uint64_t ret; do { ret = prng_lg_range_u64(state, lg_range); } while (ret >= range); @@ -165,18 +145,21 @@ prng_range_u64(uint64_t *state, uint64_t range) { } JEMALLOC_ALWAYS_INLINE size_t -prng_range_zu(atomic_zu_t *state, size_t range, bool atomic) { - size_t ret; - unsigned lg_range; +prng_range_zu(size_t *state, size_t range) { + assert(range != 0); - assert(range > 1); + /* See the note in prng_range_u32. */ + if (range == 1) { + return 0; + } /* Compute the ceiling of lg(range). */ - lg_range = ffs_u64(pow2_ceil_u64(range)) - 1; + unsigned lg_range = ffs_u64(pow2_ceil_u64(range)); /* Generate a result in [0..range) via repeated trial. */ + size_t ret; do { - ret = prng_lg_range_zu(state, lg_range, atomic); + ret = prng_lg_range_zu(state, lg_range); } while (ret >= range); return ret; diff --git a/contrib/jemalloc/include/jemalloc/internal/prof_data.h b/contrib/jemalloc/include/jemalloc/internal/prof_data.h new file mode 100644 index 00000000000000..4c8e22c76f20e7 --- /dev/null +++ b/contrib/jemalloc/include/jemalloc/internal/prof_data.h @@ -0,0 +1,37 @@ +#ifndef JEMALLOC_INTERNAL_PROF_DATA_H +#define JEMALLOC_INTERNAL_PROF_DATA_H + +#include "jemalloc/internal/mutex.h" + +extern malloc_mutex_t bt2gctx_mtx; +extern malloc_mutex_t tdatas_mtx; +extern malloc_mutex_t prof_dump_mtx; + +extern malloc_mutex_t *gctx_locks; +extern malloc_mutex_t *tdata_locks; + +extern size_t prof_unbiased_sz[PROF_SC_NSIZES]; +extern size_t prof_shifted_unbiased_cnt[PROF_SC_NSIZES]; + +void prof_bt_hash(const void *key, size_t r_hash[2]); +bool prof_bt_keycomp(const void *k1, const void *k2); + +bool prof_data_init(tsd_t *tsd); +prof_tctx_t *prof_lookup(tsd_t *tsd, prof_bt_t *bt); +char *prof_thread_name_alloc(tsd_t *tsd, const char *thread_name); +int prof_thread_name_set_impl(tsd_t *tsd, const char *thread_name); +void prof_unbias_map_init(); +void prof_dump_impl(tsd_t *tsd, write_cb_t *prof_dump_write, void *cbopaque, + prof_tdata_t *tdata, bool leakcheck); +prof_tdata_t * prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, + uint64_t thr_discrim, char *thread_name, bool active); +void prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata); +void prof_reset(tsd_t *tsd, size_t lg_sample); +void prof_tctx_try_destroy(tsd_t *tsd, prof_tctx_t *tctx); + +/* Used in unit tests. */ +size_t prof_tdata_count(void); +size_t prof_bt_count(void); +void prof_cnt_all(prof_cnt_t *cnt_all); + +#endif /* JEMALLOC_INTERNAL_PROF_DATA_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/prof_externs.h b/contrib/jemalloc/include/jemalloc/internal/prof_externs.h index 094f3e170ae7f8..bdff1349aeafc9 100644 --- a/contrib/jemalloc/include/jemalloc/internal/prof_externs.h +++ b/contrib/jemalloc/include/jemalloc/internal/prof_externs.h @@ -2,75 +2,72 @@ #define JEMALLOC_INTERNAL_PROF_EXTERNS_H #include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/prof_hook.h" -extern malloc_mutex_t bt2gctx_mtx; - -extern bool opt_prof; -extern bool opt_prof_active; -extern bool opt_prof_thread_active_init; -extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */ -extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */ -extern bool opt_prof_gdump; /* High-water memory dumping. */ -extern bool opt_prof_final; /* Final profile dumping. */ -extern bool opt_prof_leak; /* Dump leak summary at exit. */ -extern bool opt_prof_accum; /* Report cumulative bytes. */ -extern bool opt_prof_log; /* Turn logging on at boot. */ -extern char opt_prof_prefix[ +extern bool opt_prof; +extern bool opt_prof_active; +extern bool opt_prof_thread_active_init; +extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */ +extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */ +extern bool opt_prof_gdump; /* High-water memory dumping. */ +extern bool opt_prof_final; /* Final profile dumping. */ +extern bool opt_prof_leak; /* Dump leak summary at exit. */ +extern bool opt_prof_leak_error; /* Exit with error code if memory leaked */ +extern bool opt_prof_accum; /* Report cumulative bytes. */ +extern bool opt_prof_log; /* Turn logging on at boot. */ +extern char opt_prof_prefix[ /* Minimize memory bloat for non-prof builds. */ #ifdef JEMALLOC_PROF PATH_MAX + #endif 1]; +extern bool opt_prof_unbias; + +/* For recording recent allocations */ +extern ssize_t opt_prof_recent_alloc_max; + +/* Whether to use thread name provided by the system or by mallctl. */ +extern bool opt_prof_sys_thread_name; + +/* Whether to record per size class counts and request size totals. */ +extern bool opt_prof_stats; /* Accessed via prof_active_[gs]et{_unlocked,}(). */ -extern bool prof_active; +extern bool prof_active_state; /* Accessed via prof_gdump_[gs]et{_unlocked,}(). */ -extern bool prof_gdump_val; +extern bool prof_gdump_val; -/* - * Profile dump interval, measured in bytes allocated. Each arena triggers a - * profile dump when it reaches this threshold. The effect is that the - * interval between profile dumps averages prof_interval, though the actual - * interval between dumps will tend to be sporadic, and the interval will be a - * maximum of approximately (prof_interval * narenas). - */ -extern uint64_t prof_interval; +/* Profile dump interval, measured in bytes allocated. */ +extern uint64_t prof_interval; /* * Initialized as opt_lg_prof_sample, and potentially modified during profiling * resets. */ -extern size_t lg_prof_sample; - -void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated); -void prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize, - prof_tctx_t *tctx); -void prof_free_sampled_object(tsd_t *tsd, const void *ptr, size_t usize, - prof_tctx_t *tctx); -void bt_init(prof_bt_t *bt, void **vec); -void prof_backtrace(prof_bt_t *bt); -prof_tctx_t *prof_lookup(tsd_t *tsd, prof_bt_t *bt); -#ifdef JEMALLOC_JET -size_t prof_tdata_count(void); -size_t prof_bt_count(void); -#endif -typedef int (prof_dump_open_t)(bool, const char *); -extern prof_dump_open_t *JET_MUTABLE prof_dump_open; - -typedef bool (prof_dump_header_t)(tsdn_t *, bool, const prof_cnt_t *); -extern prof_dump_header_t *JET_MUTABLE prof_dump_header; -#ifdef JEMALLOC_JET -void prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs, - uint64_t *accumbytes); -#endif -bool prof_accum_init(tsdn_t *tsdn, prof_accum_t *prof_accum); +extern size_t lg_prof_sample; + +extern bool prof_booted; + +void prof_backtrace_hook_set(prof_backtrace_hook_t hook); +prof_backtrace_hook_t prof_backtrace_hook_get(); + +void prof_dump_hook_set(prof_dump_hook_t hook); +prof_dump_hook_t prof_dump_hook_get(); + +/* Functions only accessed in prof_inlines.h */ +prof_tdata_t *prof_tdata_init(tsd_t *tsd); +prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata); + +void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx); +void prof_malloc_sample_object(tsd_t *tsd, const void *ptr, size_t size, + size_t usize, prof_tctx_t *tctx); +void prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_info_t *prof_info); +prof_tctx_t *prof_tctx_create(tsd_t *tsd); void prof_idump(tsdn_t *tsdn); bool prof_mdump(tsd_t *tsd, const char *filename); void prof_gdump(tsdn_t *tsdn); -prof_tdata_t *prof_tdata_init(tsd_t *tsd); -prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata); -void prof_reset(tsd_t *tsd, size_t lg_sample); + void prof_tdata_cleanup(tsd_t *tsd); bool prof_active_get(tsdn_t *tsdn); bool prof_active_set(tsdn_t *tsdn, bool active); @@ -84,22 +81,15 @@ bool prof_gdump_get(tsdn_t *tsdn); bool prof_gdump_set(tsdn_t *tsdn, bool active); void prof_boot0(void); void prof_boot1(void); -bool prof_boot2(tsd_t *tsd); +bool prof_boot2(tsd_t *tsd, base_t *base); void prof_prefork0(tsdn_t *tsdn); void prof_prefork1(tsdn_t *tsdn); void prof_postfork_parent(tsdn_t *tsdn); void prof_postfork_child(tsdn_t *tsdn); -void prof_sample_threshold_update(prof_tdata_t *tdata); - -bool prof_log_start(tsdn_t *tsdn, const char *filename); -bool prof_log_stop(tsdn_t *tsdn); -#ifdef JEMALLOC_JET -size_t prof_log_bt_count(void); -size_t prof_log_alloc_count(void); -size_t prof_log_thr_count(void); -bool prof_log_is_logging(void); -bool prof_log_rep_check(void); -void prof_log_dummy_set(bool new_value); -#endif + +/* Only accessed by thread event. */ +uint64_t prof_sample_new_event_wait(tsd_t *tsd); +uint64_t prof_sample_postponed_event_wait(tsd_t *tsd); +void prof_sample_event_handler(tsd_t *tsd, uint64_t elapsed); #endif /* JEMALLOC_INTERNAL_PROF_EXTERNS_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/prof_hook.h b/contrib/jemalloc/include/jemalloc/internal/prof_hook.h new file mode 100644 index 00000000000000..150d19d3d61cc3 --- /dev/null +++ b/contrib/jemalloc/include/jemalloc/internal/prof_hook.h @@ -0,0 +1,21 @@ +#ifndef JEMALLOC_INTERNAL_PROF_HOOK_H +#define JEMALLOC_INTERNAL_PROF_HOOK_H + +/* + * The hooks types of which are declared in this file are experimental and + * undocumented, thus the typedefs are located in an 'internal' header. + */ + +/* + * A hook to mock out backtrace functionality. This can be handy, since it's + * otherwise difficult to guarantee that two allocations are reported as coming + * from the exact same stack trace in the presence of an optimizing compiler. + */ +typedef void (*prof_backtrace_hook_t)(void **, unsigned *, unsigned); + +/* + * A callback hook that notifies about recently dumped heap profile. + */ +typedef void (*prof_dump_hook_t)(const char *filename); + +#endif /* JEMALLOC_INTERNAL_PROF_HOOK_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/prof_inlines.h b/contrib/jemalloc/include/jemalloc/internal/prof_inlines.h new file mode 100644 index 00000000000000..a8e7e7fb663e13 --- /dev/null +++ b/contrib/jemalloc/include/jemalloc/internal/prof_inlines.h @@ -0,0 +1,261 @@ +#ifndef JEMALLOC_INTERNAL_PROF_INLINES_H +#define JEMALLOC_INTERNAL_PROF_INLINES_H + +#include "jemalloc/internal/safety_check.h" +#include "jemalloc/internal/sz.h" +#include "jemalloc/internal/thread_event.h" + +JEMALLOC_ALWAYS_INLINE void +prof_active_assert() { + cassert(config_prof); + /* + * If opt_prof is off, then prof_active must always be off, regardless + * of whether prof_active_mtx is in effect or not. + */ + assert(opt_prof || !prof_active_state); +} + +JEMALLOC_ALWAYS_INLINE bool +prof_active_get_unlocked(void) { + prof_active_assert(); + /* + * Even if opt_prof is true, sampling can be temporarily disabled by + * setting prof_active to false. No locking is used when reading + * prof_active in the fast path, so there are no guarantees regarding + * how long it will take for all threads to notice state changes. + */ + return prof_active_state; +} + +JEMALLOC_ALWAYS_INLINE bool +prof_gdump_get_unlocked(void) { + /* + * No locking is used when reading prof_gdump_val in the fast path, so + * there are no guarantees regarding how long it will take for all + * threads to notice state changes. + */ + return prof_gdump_val; +} + +JEMALLOC_ALWAYS_INLINE prof_tdata_t * +prof_tdata_get(tsd_t *tsd, bool create) { + prof_tdata_t *tdata; + + cassert(config_prof); + + tdata = tsd_prof_tdata_get(tsd); + if (create) { + assert(tsd_reentrancy_level_get(tsd) == 0); + if (unlikely(tdata == NULL)) { + if (tsd_nominal(tsd)) { + tdata = prof_tdata_init(tsd); + tsd_prof_tdata_set(tsd, tdata); + } + } else if (unlikely(tdata->expired)) { + tdata = prof_tdata_reinit(tsd, tdata); + tsd_prof_tdata_set(tsd, tdata); + } + assert(tdata == NULL || tdata->attached); + } + + return tdata; +} + +JEMALLOC_ALWAYS_INLINE void +prof_info_get(tsd_t *tsd, const void *ptr, emap_alloc_ctx_t *alloc_ctx, + prof_info_t *prof_info) { + cassert(config_prof); + assert(ptr != NULL); + assert(prof_info != NULL); + + arena_prof_info_get(tsd, ptr, alloc_ctx, prof_info, false); +} + +JEMALLOC_ALWAYS_INLINE void +prof_info_get_and_reset_recent(tsd_t *tsd, const void *ptr, + emap_alloc_ctx_t *alloc_ctx, prof_info_t *prof_info) { + cassert(config_prof); + assert(ptr != NULL); + assert(prof_info != NULL); + + arena_prof_info_get(tsd, ptr, alloc_ctx, prof_info, true); +} + +JEMALLOC_ALWAYS_INLINE void +prof_tctx_reset(tsd_t *tsd, const void *ptr, emap_alloc_ctx_t *alloc_ctx) { + cassert(config_prof); + assert(ptr != NULL); + + arena_prof_tctx_reset(tsd, ptr, alloc_ctx); +} + +JEMALLOC_ALWAYS_INLINE void +prof_tctx_reset_sampled(tsd_t *tsd, const void *ptr) { + cassert(config_prof); + assert(ptr != NULL); + + arena_prof_tctx_reset_sampled(tsd, ptr); +} + +JEMALLOC_ALWAYS_INLINE void +prof_info_set(tsd_t *tsd, edata_t *edata, prof_tctx_t *tctx, size_t size) { + cassert(config_prof); + assert(edata != NULL); + assert((uintptr_t)tctx > (uintptr_t)1U); + + arena_prof_info_set(tsd, edata, tctx, size); +} + +JEMALLOC_ALWAYS_INLINE bool +prof_sample_should_skip(tsd_t *tsd, bool sample_event) { + cassert(config_prof); + + /* Fastpath: no need to load tdata */ + if (likely(!sample_event)) { + return true; + } + + /* + * sample_event is always obtained from the thread event module, and + * whenever it's true, it means that the thread event module has + * already checked the reentrancy level. + */ + assert(tsd_reentrancy_level_get(tsd) == 0); + + prof_tdata_t *tdata = prof_tdata_get(tsd, true); + if (unlikely(tdata == NULL)) { + return true; + } + + return !tdata->active; +} + +JEMALLOC_ALWAYS_INLINE prof_tctx_t * +prof_alloc_prep(tsd_t *tsd, bool prof_active, bool sample_event) { + prof_tctx_t *ret; + + if (!prof_active || + likely(prof_sample_should_skip(tsd, sample_event))) { + ret = (prof_tctx_t *)(uintptr_t)1U; + } else { + ret = prof_tctx_create(tsd); + } + + return ret; +} + +JEMALLOC_ALWAYS_INLINE void +prof_malloc(tsd_t *tsd, const void *ptr, size_t size, size_t usize, + emap_alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) { + cassert(config_prof); + assert(ptr != NULL); + assert(usize == isalloc(tsd_tsdn(tsd), ptr)); + + if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) { + prof_malloc_sample_object(tsd, ptr, size, usize, tctx); + } else { + prof_tctx_reset(tsd, ptr, alloc_ctx); + } +} + +JEMALLOC_ALWAYS_INLINE void +prof_realloc(tsd_t *tsd, const void *ptr, size_t size, size_t usize, + prof_tctx_t *tctx, bool prof_active, const void *old_ptr, size_t old_usize, + prof_info_t *old_prof_info, bool sample_event) { + bool sampled, old_sampled, moved; + + cassert(config_prof); + assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U); + + if (prof_active && ptr != NULL) { + assert(usize == isalloc(tsd_tsdn(tsd), ptr)); + if (prof_sample_should_skip(tsd, sample_event)) { + /* + * Don't sample. The usize passed to prof_alloc_prep() + * was larger than what actually got allocated, so a + * backtrace was captured for this allocation, even + * though its actual usize was insufficient to cross the + * sample threshold. + */ + prof_alloc_rollback(tsd, tctx); + tctx = (prof_tctx_t *)(uintptr_t)1U; + } + } + + sampled = ((uintptr_t)tctx > (uintptr_t)1U); + old_sampled = ((uintptr_t)old_prof_info->alloc_tctx > (uintptr_t)1U); + moved = (ptr != old_ptr); + + if (unlikely(sampled)) { + prof_malloc_sample_object(tsd, ptr, size, usize, tctx); + } else if (moved) { + prof_tctx_reset(tsd, ptr, NULL); + } else if (unlikely(old_sampled)) { + /* + * prof_tctx_reset() would work for the !moved case as well, + * but prof_tctx_reset_sampled() is slightly cheaper, and the + * proper thing to do here in the presence of explicit + * knowledge re: moved state. + */ + prof_tctx_reset_sampled(tsd, ptr); + } else { + prof_info_t prof_info; + prof_info_get(tsd, ptr, NULL, &prof_info); + assert((uintptr_t)prof_info.alloc_tctx == (uintptr_t)1U); + } + + /* + * The prof_free_sampled_object() call must come after the + * prof_malloc_sample_object() call, because tctx and old_tctx may be + * the same, in which case reversing the call order could cause the tctx + * to be prematurely destroyed as a side effect of momentarily zeroed + * counters. + */ + if (unlikely(old_sampled)) { + prof_free_sampled_object(tsd, old_usize, old_prof_info); + } +} + +JEMALLOC_ALWAYS_INLINE size_t +prof_sample_align(size_t orig_align) { + /* + * Enforce page alignment, so that sampled allocations can be identified + * w/o metadata lookup. + */ + assert(opt_prof); + return (opt_cache_oblivious && orig_align < PAGE) ? PAGE : + orig_align; +} + +JEMALLOC_ALWAYS_INLINE bool +prof_sample_aligned(const void *ptr) { + return ((uintptr_t)ptr & PAGE_MASK) == 0; +} + +JEMALLOC_ALWAYS_INLINE bool +prof_sampled(tsd_t *tsd, const void *ptr) { + prof_info_t prof_info; + prof_info_get(tsd, ptr, NULL, &prof_info); + bool sampled = (uintptr_t)prof_info.alloc_tctx > (uintptr_t)1U; + if (sampled) { + assert(prof_sample_aligned(ptr)); + } + return sampled; +} + +JEMALLOC_ALWAYS_INLINE void +prof_free(tsd_t *tsd, const void *ptr, size_t usize, + emap_alloc_ctx_t *alloc_ctx) { + prof_info_t prof_info; + prof_info_get_and_reset_recent(tsd, ptr, alloc_ctx, &prof_info); + + cassert(config_prof); + assert(usize == isalloc(tsd_tsdn(tsd), ptr)); + + if (unlikely((uintptr_t)prof_info.alloc_tctx > (uintptr_t)1U)) { + assert(prof_sample_aligned(ptr)); + prof_free_sampled_object(tsd, usize, &prof_info); + } +} + +#endif /* JEMALLOC_INTERNAL_PROF_INLINES_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/prof_inlines_a.h b/contrib/jemalloc/include/jemalloc/internal/prof_inlines_a.h deleted file mode 100644 index 471d9853cf8769..00000000000000 --- a/contrib/jemalloc/include/jemalloc/internal/prof_inlines_a.h +++ /dev/null @@ -1,85 +0,0 @@ -#ifndef JEMALLOC_INTERNAL_PROF_INLINES_A_H -#define JEMALLOC_INTERNAL_PROF_INLINES_A_H - -#include "jemalloc/internal/mutex.h" - -static inline bool -prof_accum_add(tsdn_t *tsdn, prof_accum_t *prof_accum, - uint64_t accumbytes) { - cassert(config_prof); - - bool overflow; - uint64_t a0, a1; - - /* - * If the application allocates fast enough (and/or if idump is slow - * enough), extreme overflow here (a1 >= prof_interval * 2) can cause - * idump trigger coalescing. This is an intentional mechanism that - * avoids rate-limiting allocation. - */ -#ifdef JEMALLOC_ATOMIC_U64 - a0 = atomic_load_u64(&prof_accum->accumbytes, ATOMIC_RELAXED); - do { - a1 = a0 + accumbytes; - assert(a1 >= a0); - overflow = (a1 >= prof_interval); - if (overflow) { - a1 %= prof_interval; - } - } while (!atomic_compare_exchange_weak_u64(&prof_accum->accumbytes, &a0, - a1, ATOMIC_RELAXED, ATOMIC_RELAXED)); -#else - malloc_mutex_lock(tsdn, &prof_accum->mtx); - a0 = prof_accum->accumbytes; - a1 = a0 + accumbytes; - overflow = (a1 >= prof_interval); - if (overflow) { - a1 %= prof_interval; - } - prof_accum->accumbytes = a1; - malloc_mutex_unlock(tsdn, &prof_accum->mtx); -#endif - return overflow; -} - -static inline void -prof_accum_cancel(tsdn_t *tsdn, prof_accum_t *prof_accum, - size_t usize) { - cassert(config_prof); - - /* - * Cancel out as much of the excessive prof_accumbytes increase as - * possible without underflowing. Interval-triggered dumps occur - * slightly more often than intended as a result of incomplete - * canceling. - */ - uint64_t a0, a1; -#ifdef JEMALLOC_ATOMIC_U64 - a0 = atomic_load_u64(&prof_accum->accumbytes, ATOMIC_RELAXED); - do { - a1 = (a0 >= SC_LARGE_MINCLASS - usize) - ? a0 - (SC_LARGE_MINCLASS - usize) : 0; - } while (!atomic_compare_exchange_weak_u64(&prof_accum->accumbytes, &a0, - a1, ATOMIC_RELAXED, ATOMIC_RELAXED)); -#else - malloc_mutex_lock(tsdn, &prof_accum->mtx); - a0 = prof_accum->accumbytes; - a1 = (a0 >= SC_LARGE_MINCLASS - usize) - ? a0 - (SC_LARGE_MINCLASS - usize) : 0; - prof_accum->accumbytes = a1; - malloc_mutex_unlock(tsdn, &prof_accum->mtx); -#endif -} - -JEMALLOC_ALWAYS_INLINE bool -prof_active_get_unlocked(void) { - /* - * Even if opt_prof is true, sampling can be temporarily disabled by - * setting prof_active to false. No locking is used when reading - * prof_active in the fast path, so there are no guarantees regarding - * how long it will take for all threads to notice state changes. - */ - return prof_active; -} - -#endif /* JEMALLOC_INTERNAL_PROF_INLINES_A_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/prof_inlines_b.h b/contrib/jemalloc/include/jemalloc/internal/prof_inlines_b.h deleted file mode 100644 index 8ba8a1e1ffe7e9..00000000000000 --- a/contrib/jemalloc/include/jemalloc/internal/prof_inlines_b.h +++ /dev/null @@ -1,250 +0,0 @@ -#ifndef JEMALLOC_INTERNAL_PROF_INLINES_B_H -#define JEMALLOC_INTERNAL_PROF_INLINES_B_H - -#include "jemalloc/internal/safety_check.h" -#include "jemalloc/internal/sz.h" - -JEMALLOC_ALWAYS_INLINE bool -prof_gdump_get_unlocked(void) { - /* - * No locking is used when reading prof_gdump_val in the fast path, so - * there are no guarantees regarding how long it will take for all - * threads to notice state changes. - */ - return prof_gdump_val; -} - -JEMALLOC_ALWAYS_INLINE prof_tdata_t * -prof_tdata_get(tsd_t *tsd, bool create) { - prof_tdata_t *tdata; - - cassert(config_prof); - - tdata = tsd_prof_tdata_get(tsd); - if (create) { - if (unlikely(tdata == NULL)) { - if (tsd_nominal(tsd)) { - tdata = prof_tdata_init(tsd); - tsd_prof_tdata_set(tsd, tdata); - } - } else if (unlikely(tdata->expired)) { - tdata = prof_tdata_reinit(tsd, tdata); - tsd_prof_tdata_set(tsd, tdata); - } - assert(tdata == NULL || tdata->attached); - } - - return tdata; -} - -JEMALLOC_ALWAYS_INLINE prof_tctx_t * -prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) { - cassert(config_prof); - assert(ptr != NULL); - - return arena_prof_tctx_get(tsdn, ptr, alloc_ctx); -} - -JEMALLOC_ALWAYS_INLINE void -prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, - alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) { - cassert(config_prof); - assert(ptr != NULL); - - arena_prof_tctx_set(tsdn, ptr, usize, alloc_ctx, tctx); -} - -JEMALLOC_ALWAYS_INLINE void -prof_tctx_reset(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) { - cassert(config_prof); - assert(ptr != NULL); - - arena_prof_tctx_reset(tsdn, ptr, tctx); -} - -JEMALLOC_ALWAYS_INLINE nstime_t -prof_alloc_time_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) { - cassert(config_prof); - assert(ptr != NULL); - - return arena_prof_alloc_time_get(tsdn, ptr, alloc_ctx); -} - -JEMALLOC_ALWAYS_INLINE void -prof_alloc_time_set(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx, - nstime_t t) { - cassert(config_prof); - assert(ptr != NULL); - - arena_prof_alloc_time_set(tsdn, ptr, alloc_ctx, t); -} - -JEMALLOC_ALWAYS_INLINE bool -prof_sample_check(tsd_t *tsd, size_t usize, bool update) { - ssize_t check = update ? 0 : usize; - - int64_t bytes_until_sample = tsd_bytes_until_sample_get(tsd); - if (update) { - bytes_until_sample -= usize; - if (tsd_nominal(tsd)) { - tsd_bytes_until_sample_set(tsd, bytes_until_sample); - } - } - if (likely(bytes_until_sample >= check)) { - return true; - } - - return false; -} - -JEMALLOC_ALWAYS_INLINE bool -prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update, - prof_tdata_t **tdata_out) { - prof_tdata_t *tdata; - - cassert(config_prof); - - /* Fastpath: no need to load tdata */ - if (likely(prof_sample_check(tsd, usize, update))) { - return true; - } - - bool booted = tsd_prof_tdata_get(tsd); - tdata = prof_tdata_get(tsd, true); - if (unlikely((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)) { - tdata = NULL; - } - - if (tdata_out != NULL) { - *tdata_out = tdata; - } - - if (unlikely(tdata == NULL)) { - return true; - } - - /* - * If this was the first creation of tdata, then - * prof_tdata_get() reset bytes_until_sample, so decrement and - * check it again - */ - if (!booted && prof_sample_check(tsd, usize, update)) { - return true; - } - - if (tsd_reentrancy_level_get(tsd) > 0) { - return true; - } - /* Compute new sample threshold. */ - if (update) { - prof_sample_threshold_update(tdata); - } - return !tdata->active; -} - -JEMALLOC_ALWAYS_INLINE prof_tctx_t * -prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update) { - prof_tctx_t *ret; - prof_tdata_t *tdata; - prof_bt_t bt; - - assert(usize == sz_s2u(usize)); - - if (!prof_active || likely(prof_sample_accum_update(tsd, usize, update, - &tdata))) { - ret = (prof_tctx_t *)(uintptr_t)1U; - } else { - bt_init(&bt, tdata->vec); - prof_backtrace(&bt); - ret = prof_lookup(tsd, &bt); - } - - return ret; -} - -JEMALLOC_ALWAYS_INLINE void -prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize, alloc_ctx_t *alloc_ctx, - prof_tctx_t *tctx) { - cassert(config_prof); - assert(ptr != NULL); - assert(usize == isalloc(tsdn, ptr)); - - if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) { - prof_malloc_sample_object(tsdn, ptr, usize, tctx); - } else { - prof_tctx_set(tsdn, ptr, usize, alloc_ctx, - (prof_tctx_t *)(uintptr_t)1U); - } -} - -JEMALLOC_ALWAYS_INLINE void -prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx, - bool prof_active, bool updated, const void *old_ptr, size_t old_usize, - prof_tctx_t *old_tctx) { - bool sampled, old_sampled, moved; - - cassert(config_prof); - assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U); - - if (prof_active && !updated && ptr != NULL) { - assert(usize == isalloc(tsd_tsdn(tsd), ptr)); - if (prof_sample_accum_update(tsd, usize, true, NULL)) { - /* - * Don't sample. The usize passed to prof_alloc_prep() - * was larger than what actually got allocated, so a - * backtrace was captured for this allocation, even - * though its actual usize was insufficient to cross the - * sample threshold. - */ - prof_alloc_rollback(tsd, tctx, true); - tctx = (prof_tctx_t *)(uintptr_t)1U; - } - } - - sampled = ((uintptr_t)tctx > (uintptr_t)1U); - old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U); - moved = (ptr != old_ptr); - - if (unlikely(sampled)) { - prof_malloc_sample_object(tsd_tsdn(tsd), ptr, usize, tctx); - } else if (moved) { - prof_tctx_set(tsd_tsdn(tsd), ptr, usize, NULL, - (prof_tctx_t *)(uintptr_t)1U); - } else if (unlikely(old_sampled)) { - /* - * prof_tctx_set() would work for the !moved case as well, but - * prof_tctx_reset() is slightly cheaper, and the proper thing - * to do here in the presence of explicit knowledge re: moved - * state. - */ - prof_tctx_reset(tsd_tsdn(tsd), ptr, tctx); - } else { - assert((uintptr_t)prof_tctx_get(tsd_tsdn(tsd), ptr, NULL) == - (uintptr_t)1U); - } - - /* - * The prof_free_sampled_object() call must come after the - * prof_malloc_sample_object() call, because tctx and old_tctx may be - * the same, in which case reversing the call order could cause the tctx - * to be prematurely destroyed as a side effect of momentarily zeroed - * counters. - */ - if (unlikely(old_sampled)) { - prof_free_sampled_object(tsd, ptr, old_usize, old_tctx); - } -} - -JEMALLOC_ALWAYS_INLINE void -prof_free(tsd_t *tsd, const void *ptr, size_t usize, alloc_ctx_t *alloc_ctx) { - prof_tctx_t *tctx = prof_tctx_get(tsd_tsdn(tsd), ptr, alloc_ctx); - - cassert(config_prof); - assert(usize == isalloc(tsd_tsdn(tsd), ptr)); - - if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) { - prof_free_sampled_object(tsd, ptr, usize, tctx); - } -} - -#endif /* JEMALLOC_INTERNAL_PROF_INLINES_B_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/prof_log.h b/contrib/jemalloc/include/jemalloc/internal/prof_log.h new file mode 100644 index 00000000000000..ccb557dde69fb3 --- /dev/null +++ b/contrib/jemalloc/include/jemalloc/internal/prof_log.h @@ -0,0 +1,22 @@ +#ifndef JEMALLOC_INTERNAL_PROF_LOG_H +#define JEMALLOC_INTERNAL_PROF_LOG_H + +#include "jemalloc/internal/mutex.h" + +extern malloc_mutex_t log_mtx; + +void prof_try_log(tsd_t *tsd, size_t usize, prof_info_t *prof_info); +bool prof_log_init(tsd_t *tsdn); + +/* Used in unit tests. */ +size_t prof_log_bt_count(void); +size_t prof_log_alloc_count(void); +size_t prof_log_thr_count(void); +bool prof_log_is_logging(void); +bool prof_log_rep_check(void); +void prof_log_dummy_set(bool new_value); + +bool prof_log_start(tsdn_t *tsdn, const char *filename); +bool prof_log_stop(tsdn_t *tsdn); + +#endif /* JEMALLOC_INTERNAL_PROF_LOG_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/prof_recent.h b/contrib/jemalloc/include/jemalloc/internal/prof_recent.h new file mode 100644 index 00000000000000..df410236269923 --- /dev/null +++ b/contrib/jemalloc/include/jemalloc/internal/prof_recent.h @@ -0,0 +1,23 @@ +#ifndef JEMALLOC_INTERNAL_PROF_RECENT_H +#define JEMALLOC_INTERNAL_PROF_RECENT_H + +extern malloc_mutex_t prof_recent_alloc_mtx; +extern malloc_mutex_t prof_recent_dump_mtx; + +bool prof_recent_alloc_prepare(tsd_t *tsd, prof_tctx_t *tctx); +void prof_recent_alloc(tsd_t *tsd, edata_t *edata, size_t size, size_t usize); +void prof_recent_alloc_reset(tsd_t *tsd, edata_t *edata); +bool prof_recent_init(); +void edata_prof_recent_alloc_init(edata_t *edata); + +/* Used in unit tests. */ +typedef ql_head(prof_recent_t) prof_recent_list_t; +extern prof_recent_list_t prof_recent_alloc_list; +edata_t *prof_recent_alloc_edata_get_no_lock_test(const prof_recent_t *node); +prof_recent_t *edata_prof_recent_alloc_get_no_lock_test(const edata_t *edata); + +ssize_t prof_recent_alloc_max_ctl_read(); +ssize_t prof_recent_alloc_max_ctl_write(tsd_t *tsd, ssize_t max); +void prof_recent_alloc_dump(tsd_t *tsd, write_cb_t *write_cb, void *cbopaque); + +#endif /* JEMALLOC_INTERNAL_PROF_RECENT_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/prof_stats.h b/contrib/jemalloc/include/jemalloc/internal/prof_stats.h new file mode 100644 index 00000000000000..7954e82de7962f --- /dev/null +++ b/contrib/jemalloc/include/jemalloc/internal/prof_stats.h @@ -0,0 +1,17 @@ +#ifndef JEMALLOC_INTERNAL_PROF_STATS_H +#define JEMALLOC_INTERNAL_PROF_STATS_H + +typedef struct prof_stats_s prof_stats_t; +struct prof_stats_s { + uint64_t req_sum; + uint64_t count; +}; + +extern malloc_mutex_t prof_stats_mtx; + +void prof_stats_inc(tsd_t *tsd, szind_t ind, size_t size); +void prof_stats_dec(tsd_t *tsd, szind_t ind, size_t size); +void prof_stats_get_live(tsd_t *tsd, szind_t ind, prof_stats_t *stats); +void prof_stats_get_accum(tsd_t *tsd, szind_t ind, prof_stats_t *stats); + +#endif /* JEMALLOC_INTERNAL_PROF_STATS_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/prof_structs.h b/contrib/jemalloc/include/jemalloc/internal/prof_structs.h index 34ed4822b67279..dd22115f622280 100644 --- a/contrib/jemalloc/include/jemalloc/internal/prof_structs.h +++ b/contrib/jemalloc/include/jemalloc/internal/prof_structs.h @@ -2,6 +2,7 @@ #define JEMALLOC_INTERNAL_PROF_STRUCTS_H #include "jemalloc/internal/ckh.h" +#include "jemalloc/internal/edata.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/prng.h" #include "jemalloc/internal/rb.h" @@ -15,26 +16,22 @@ struct prof_bt_s { #ifdef JEMALLOC_PROF_LIBGCC /* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */ typedef struct { - prof_bt_t *bt; + void **vec; + unsigned *len; unsigned max; } prof_unwind_data_t; #endif -struct prof_accum_s { -#ifndef JEMALLOC_ATOMIC_U64 - malloc_mutex_t mtx; - uint64_t accumbytes; -#else - atomic_u64_t accumbytes; -#endif -}; - struct prof_cnt_s { /* Profiling counters. */ uint64_t curobjs; + uint64_t curobjs_shifted_unbiased; uint64_t curbytes; + uint64_t curbytes_unbiased; uint64_t accumobjs; + uint64_t accumobjs_shifted_unbiased; uint64_t accumbytes; + uint64_t accumbytes_unbiased; }; typedef enum { @@ -55,6 +52,12 @@ struct prof_tctx_s { uint64_t thr_uid; uint64_t thr_discrim; + /* + * Reference count of how many times this tctx object is referenced in + * recent allocation / deallocation records, protected by tdata->lock. + */ + uint64_t recent_count; + /* Profiling counters, protected by tdata->lock. */ prof_cnt_t cnts; @@ -96,6 +99,15 @@ struct prof_tctx_s { }; typedef rb_tree(prof_tctx_t) prof_tctx_tree_t; +struct prof_info_s { + /* Time when the allocation was made. */ + nstime_t alloc_time; + /* Points to the prof_tctx_t corresponding to the allocation. */ + prof_tctx_t *alloc_tctx; + /* Allocation request size. */ + size_t alloc_size; +}; + struct prof_gctx_s { /* Protects nlimbo, cnt_summed, and tctxs. */ malloc_mutex_t *lock; @@ -167,9 +179,6 @@ struct prof_tdata_s { */ ckh_t bt2tctx; - /* Sampling state. */ - uint64_t prng_state; - /* State used to avoid dumping while operating on prof internals. */ bool enq; bool enq_idump; @@ -197,4 +206,16 @@ struct prof_tdata_s { }; typedef rb_tree(prof_tdata_t) prof_tdata_tree_t; +struct prof_recent_s { + nstime_t alloc_time; + nstime_t dalloc_time; + + ql_elm(prof_recent_t) link; + size_t size; + size_t usize; + atomic_p_t alloc_edata; /* NULL means allocation has been freed. */ + prof_tctx_t *alloc_tctx; + prof_tctx_t *dalloc_tctx; +}; + #endif /* JEMALLOC_INTERNAL_PROF_STRUCTS_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/prof_sys.h b/contrib/jemalloc/include/jemalloc/internal/prof_sys.h new file mode 100644 index 00000000000000..3d25a4295e2847 --- /dev/null +++ b/contrib/jemalloc/include/jemalloc/internal/prof_sys.h @@ -0,0 +1,30 @@ +#ifndef JEMALLOC_INTERNAL_PROF_SYS_H +#define JEMALLOC_INTERNAL_PROF_SYS_H + +extern malloc_mutex_t prof_dump_filename_mtx; +extern base_t *prof_base; + +void bt_init(prof_bt_t *bt, void **vec); +void prof_backtrace(tsd_t *tsd, prof_bt_t *bt); +void prof_hooks_init(); +void prof_unwind_init(); +void prof_sys_thread_name_fetch(tsd_t *tsd); +int prof_getpid(void); +void prof_get_default_filename(tsdn_t *tsdn, char *filename, uint64_t ind); +bool prof_prefix_set(tsdn_t *tsdn, const char *prefix); +void prof_fdump_impl(tsd_t *tsd); +void prof_idump_impl(tsd_t *tsd); +bool prof_mdump_impl(tsd_t *tsd, const char *filename); +void prof_gdump_impl(tsd_t *tsd); + +/* Used in unit tests. */ +typedef int (prof_sys_thread_name_read_t)(char *buf, size_t limit); +extern prof_sys_thread_name_read_t *JET_MUTABLE prof_sys_thread_name_read; +typedef int (prof_dump_open_file_t)(const char *, int); +extern prof_dump_open_file_t *JET_MUTABLE prof_dump_open_file; +typedef ssize_t (prof_dump_write_file_t)(int, const void *, size_t); +extern prof_dump_write_file_t *JET_MUTABLE prof_dump_write_file; +typedef int (prof_dump_open_maps_t)(); +extern prof_dump_open_maps_t *JET_MUTABLE prof_dump_open_maps; + +#endif /* JEMALLOC_INTERNAL_PROF_SYS_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/prof_types.h b/contrib/jemalloc/include/jemalloc/internal/prof_types.h index 1eff995ecf0fcc..ba6286548e04bd 100644 --- a/contrib/jemalloc/include/jemalloc/internal/prof_types.h +++ b/contrib/jemalloc/include/jemalloc/internal/prof_types.h @@ -2,11 +2,12 @@ #define JEMALLOC_INTERNAL_PROF_TYPES_H typedef struct prof_bt_s prof_bt_t; -typedef struct prof_accum_s prof_accum_t; typedef struct prof_cnt_s prof_cnt_t; typedef struct prof_tctx_s prof_tctx_t; +typedef struct prof_info_s prof_info_t; typedef struct prof_gctx_s prof_gctx_t; typedef struct prof_tdata_s prof_tdata_t; +typedef struct prof_recent_s prof_recent_t; /* Option defaults. */ #ifdef JEMALLOC_PROF @@ -28,7 +29,23 @@ typedef struct prof_tdata_s prof_tdata_t; #define PROF_CKH_MINITEMS 64 /* Size of memory buffer to use when writing dump files. */ -#define PROF_DUMP_BUFSIZE 65536 +#ifndef JEMALLOC_PROF +/* Minimize memory bloat for non-prof builds. */ +# define PROF_DUMP_BUFSIZE 1 +#elif defined(JEMALLOC_DEBUG) +/* Use a small buffer size in debug build, mainly to facilitate testing. */ +# define PROF_DUMP_BUFSIZE 16 +#else +# define PROF_DUMP_BUFSIZE 65536 +#endif + +/* Size of size class related tables */ +#ifdef JEMALLOC_PROF +# define PROF_SC_NSIZES SC_NSIZES +#else +/* Minimize memory bloat for non-prof builds. */ +# define PROF_SC_NSIZES 1 +#endif /* Size of stack-allocated buffer used by prof_printf(). */ #define PROF_PRINTF_BUFSIZE 128 @@ -45,12 +62,14 @@ typedef struct prof_tdata_s prof_tdata_t; */ #define PROF_NTDATA_LOCKS 256 -/* - * prof_tdata pointers close to NULL are used to encode state information that - * is used for cleaning up during thread shutdown. - */ -#define PROF_TDATA_STATE_REINCARNATED ((prof_tdata_t *)(uintptr_t)1) -#define PROF_TDATA_STATE_PURGATORY ((prof_tdata_t *)(uintptr_t)2) -#define PROF_TDATA_STATE_MAX PROF_TDATA_STATE_PURGATORY +/* Minimize memory bloat for non-prof builds. */ +#ifdef JEMALLOC_PROF +#define PROF_DUMP_FILENAME_LEN (PATH_MAX + 1) +#else +#define PROF_DUMP_FILENAME_LEN 1 +#endif + +/* Default number of recent allocations to record. */ +#define PROF_RECENT_ALLOC_MAX_DEFAULT 0 #endif /* JEMALLOC_INTERNAL_PROF_TYPES_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/psset.h b/contrib/jemalloc/include/jemalloc/internal/psset.h new file mode 100644 index 00000000000000..e1d64970ee1402 --- /dev/null +++ b/contrib/jemalloc/include/jemalloc/internal/psset.h @@ -0,0 +1,131 @@ +#ifndef JEMALLOC_INTERNAL_PSSET_H +#define JEMALLOC_INTERNAL_PSSET_H + +#include "jemalloc/internal/hpdata.h" + +/* + * A page-slab set. What the eset is to PAC, the psset is to HPA. It maintains + * a collection of page-slabs (the intent being that they are backed by + * hugepages, or at least could be), and handles allocation and deallocation + * requests. + */ + +/* + * One more than the maximum pszind_t we will serve out of the HPA. + * Practically, we expect only the first few to be actually used. This + * corresponds to a maximum size of of 512MB on systems with 4k pages and + * SC_NGROUP == 4, which is already an unreasonably large maximum. Morally, you + * can think of this as being SC_NPSIZES, but there's no sense in wasting that + * much space in the arena, making bitmaps that much larger, etc. + */ +#define PSSET_NPSIZES 64 + +/* + * We keep two purge lists per page size class; one for hugified hpdatas (at + * index 2*pszind), and one for the non-hugified hpdatas (at index 2*pszind + + * 1). This lets us implement a preference for purging non-hugified hpdatas + * among similarly-dirty ones. + * We reserve the last two indices for empty slabs, in that case purging + * hugified ones (which are definitionally all waste) before non-hugified ones + * (i.e. reversing the order). + */ +#define PSSET_NPURGE_LISTS (2 * PSSET_NPSIZES) + +typedef struct psset_bin_stats_s psset_bin_stats_t; +struct psset_bin_stats_s { + /* How many pageslabs are in this bin? */ + size_t npageslabs; + /* Of them, how many pages are active? */ + size_t nactive; + /* And how many are dirty? */ + size_t ndirty; +}; + +typedef struct psset_stats_s psset_stats_t; +struct psset_stats_s { + /* + * The second index is huge stats; nonfull_slabs[pszind][0] contains + * stats for the non-huge slabs in bucket pszind, while + * nonfull_slabs[pszind][1] contains stats for the huge slabs. + */ + psset_bin_stats_t nonfull_slabs[PSSET_NPSIZES][2]; + + /* + * Full slabs don't live in any edata heap, but we still track their + * stats. + */ + psset_bin_stats_t full_slabs[2]; + + /* Empty slabs are similar. */ + psset_bin_stats_t empty_slabs[2]; +}; + +typedef struct psset_s psset_t; +struct psset_s { + /* + * The pageslabs, quantized by the size class of the largest contiguous + * free run of pages in a pageslab. + */ + hpdata_age_heap_t pageslabs[PSSET_NPSIZES]; + /* Bitmap for which set bits correspond to non-empty heaps. */ + fb_group_t pageslab_bitmap[FB_NGROUPS(PSSET_NPSIZES)]; + /* + * The sum of all bin stats in stats. This lets us quickly answer + * queries for the number of dirty, active, and retained pages in the + * entire set. + */ + psset_bin_stats_t merged_stats; + psset_stats_t stats; + /* + * Slabs with no active allocations, but which are allowed to serve new + * allocations. + */ + hpdata_empty_list_t empty; + /* + * Slabs which are available to be purged, ordered by how much we want + * to purge them (with later indices indicating slabs we want to purge + * more). + */ + hpdata_purge_list_t to_purge[PSSET_NPURGE_LISTS]; + /* Bitmap for which set bits correspond to non-empty purge lists. */ + fb_group_t purge_bitmap[FB_NGROUPS(PSSET_NPURGE_LISTS)]; + /* Slabs which are available to be hugified. */ + hpdata_hugify_list_t to_hugify; +}; + +void psset_init(psset_t *psset); +void psset_stats_accum(psset_stats_t *dst, psset_stats_t *src); + +/* + * Begin or end updating the given pageslab's metadata. While the pageslab is + * being updated, it won't be returned from psset_fit calls. + */ +void psset_update_begin(psset_t *psset, hpdata_t *ps); +void psset_update_end(psset_t *psset, hpdata_t *ps); + +/* Analogous to the eset_fit; pick a hpdata to serve the request. */ +hpdata_t *psset_pick_alloc(psset_t *psset, size_t size); +/* Pick one to purge. */ +hpdata_t *psset_pick_purge(psset_t *psset); +/* Pick one to hugify. */ +hpdata_t *psset_pick_hugify(psset_t *psset); + +void psset_insert(psset_t *psset, hpdata_t *ps); +void psset_remove(psset_t *psset, hpdata_t *ps); + +static inline size_t +psset_npageslabs(psset_t *psset) { + return psset->merged_stats.npageslabs; +} + +static inline size_t +psset_nactive(psset_t *psset) { + return psset->merged_stats.nactive; +} + +static inline size_t +psset_ndirty(psset_t *psset) { + return psset->merged_stats.ndirty; +} + +#endif /* JEMALLOC_INTERNAL_PSSET_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/public_namespace.h b/contrib/jemalloc/include/jemalloc/internal/public_namespace.h index 78d5c66377c3b2..6363f085f34be4 100644 --- a/contrib/jemalloc/include/jemalloc/internal/public_namespace.h +++ b/contrib/jemalloc/include/jemalloc/internal/public_namespace.h @@ -7,11 +7,12 @@ #define je_mallctlnametomib JEMALLOC_N(mallctlnametomib) #define je_malloc JEMALLOC_N(malloc) #define je_malloc_conf JEMALLOC_N(malloc_conf) +#define je_malloc_conf_2_conf_harder JEMALLOC_N(malloc_conf_2_conf_harder) #define je_malloc_message JEMALLOC_N(malloc_message) #define je_malloc_stats_print JEMALLOC_N(malloc_stats_print) #define je_malloc_usable_size JEMALLOC_N(malloc_usable_size) #define je_mallocx JEMALLOC_N(mallocx) -#define je_smallocx_ea6b3e973b477b8061e0076bb257dbd7f3faa756 JEMALLOC_N(smallocx_ea6b3e973b477b8061e0076bb257dbd7f3faa756) +#define je_smallocx_54eaed1d8b56b1aa528be3bdd1877e59c56fa90c JEMALLOC_N(smallocx_54eaed1d8b56b1aa528be3bdd1877e59c56fa90c) #define je_nallocx JEMALLOC_N(nallocx) #define je_posix_memalign JEMALLOC_N(posix_memalign) #define je_rallocx JEMALLOC_N(rallocx) @@ -19,4 +20,5 @@ #define je_sallocx JEMALLOC_N(sallocx) #define je_sdallocx JEMALLOC_N(sdallocx) #define je_xallocx JEMALLOC_N(xallocx) +#define je_memalign JEMALLOC_N(memalign) #define je_valloc JEMALLOC_N(valloc) diff --git a/contrib/jemalloc/include/jemalloc/internal/ql.h b/contrib/jemalloc/include/jemalloc/internal/ql.h index 80290407716101..c7f52f86221954 100644 --- a/contrib/jemalloc/include/jemalloc/internal/ql.h +++ b/contrib/jemalloc/include/jemalloc/internal/ql.h @@ -3,37 +3,85 @@ #include "jemalloc/internal/qr.h" +/* + * A linked-list implementation. + * + * This is built on top of the ring implementation, but that can be viewed as an + * implementation detail (i.e. trying to advance past the tail of the list + * doesn't wrap around). + * + * You define a struct like so: + * typedef strucy my_s my_t; + * struct my_s { + * int data; + * ql_elm(my_t) my_link; + * }; + * + * // We wobble between "list" and "head" for this type; we're now mostly + * // heading towards "list". + * typedef ql_head(my_t) my_list_t; + * + * You then pass a my_list_t * for a_head arguments, a my_t * for a_elm + * arguments, the token "my_link" for a_field arguments, and the token "my_t" + * for a_type arguments. + */ + /* List definitions. */ #define ql_head(a_type) \ struct { \ a_type *qlh_first; \ } +/* Static initializer for an empty list. */ #define ql_head_initializer(a_head) {NULL} +/* The field definition. */ #define ql_elm(a_type) qr(a_type) -/* List functions. */ +/* A pointer to the first element in the list, or NULL if the list is empty. */ +#define ql_first(a_head) ((a_head)->qlh_first) + +/* Dynamically initializes a list. */ #define ql_new(a_head) do { \ - (a_head)->qlh_first = NULL; \ + ql_first(a_head) = NULL; \ } while (0) -#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field) +/* + * Sets dest to be the contents of src (overwriting any elements there), leaving + * src empty. + */ +#define ql_move(a_head_dest, a_head_src) do { \ + ql_first(a_head_dest) = ql_first(a_head_src); \ + ql_new(a_head_src); \ +} while (0) -#define ql_first(a_head) ((a_head)->qlh_first) +/* True if the list is empty, otherwise false. */ +#define ql_empty(a_head) (ql_first(a_head) == NULL) + +/* + * Initializes a ql_elm. Must be called even if the field is about to be + * overwritten. + */ +#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field) +/* + * Obtains the last item in the list. + */ #define ql_last(a_head, a_field) \ - ((ql_first(a_head) != NULL) \ - ? qr_prev(ql_first(a_head), a_field) : NULL) + (ql_empty(a_head) ? NULL : qr_prev(ql_first(a_head), a_field)) +/* + * Gets a pointer to the next/prev element in the list. Trying to advance past + * the end or retreat before the beginning of the list returns NULL. + */ #define ql_next(a_head, a_elm, a_field) \ ((ql_last(a_head, a_field) != (a_elm)) \ ? qr_next((a_elm), a_field) : NULL) - #define ql_prev(a_head, a_elm, a_field) \ ((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \ : NULL) +/* Inserts a_elm before a_qlelm in the list. */ #define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \ qr_before_insert((a_qlelm), (a_elm), a_field); \ if (ql_first(a_head) == (a_qlelm)) { \ @@ -41,23 +89,41 @@ struct { \ } \ } while (0) +/* Inserts a_elm after a_qlelm in the list. */ #define ql_after_insert(a_qlelm, a_elm, a_field) \ qr_after_insert((a_qlelm), (a_elm), a_field) +/* Inserts a_elm as the first item in the list. */ #define ql_head_insert(a_head, a_elm, a_field) do { \ - if (ql_first(a_head) != NULL) { \ + if (!ql_empty(a_head)) { \ qr_before_insert(ql_first(a_head), (a_elm), a_field); \ } \ ql_first(a_head) = (a_elm); \ } while (0) +/* Inserts a_elm as the last item in the list. */ #define ql_tail_insert(a_head, a_elm, a_field) do { \ - if (ql_first(a_head) != NULL) { \ + if (!ql_empty(a_head)) { \ qr_before_insert(ql_first(a_head), (a_elm), a_field); \ } \ ql_first(a_head) = qr_next((a_elm), a_field); \ } while (0) +/* + * Given lists a = [a_1, ..., a_n] and [b_1, ..., b_n], results in: + * a = [a1, ..., a_n, b_1, ..., b_n] and b = []. + */ +#define ql_concat(a_head_a, a_head_b, a_field) do { \ + if (ql_empty(a_head_a)) { \ + ql_move(a_head_a, a_head_b); \ + } else if (!ql_empty(a_head_b)) { \ + qr_meld(ql_first(a_head_a), ql_first(a_head_b), \ + a_field); \ + ql_new(a_head_b); \ + } \ +} while (0) + +/* Removes a_elm from the list. */ #define ql_remove(a_head, a_elm, a_field) do { \ if (ql_first(a_head) == (a_elm)) { \ ql_first(a_head) = qr_next(ql_first(a_head), a_field); \ @@ -65,20 +131,63 @@ struct { \ if (ql_first(a_head) != (a_elm)) { \ qr_remove((a_elm), a_field); \ } else { \ - ql_first(a_head) = NULL; \ + ql_new(a_head); \ } \ } while (0) +/* Removes the first item in the list. */ #define ql_head_remove(a_head, a_type, a_field) do { \ a_type *t = ql_first(a_head); \ ql_remove((a_head), t, a_field); \ } while (0) +/* Removes the last item in the list. */ #define ql_tail_remove(a_head, a_type, a_field) do { \ a_type *t = ql_last(a_head, a_field); \ ql_remove((a_head), t, a_field); \ } while (0) +/* + * Given a = [a_1, a_2, ..., a_n-1, a_n, a_n+1, ...], + * ql_split(a, a_n, b, some_field) results in + * a = [a_1, a_2, ..., a_n-1] + * and replaces b's contents with: + * b = [a_n, a_n+1, ...] + */ +#define ql_split(a_head_a, a_elm, a_head_b, a_field) do { \ + if (ql_first(a_head_a) == (a_elm)) { \ + ql_move(a_head_b, a_head_a); \ + } else { \ + qr_split(ql_first(a_head_a), (a_elm), a_field); \ + ql_first(a_head_b) = (a_elm); \ + } \ +} while (0) + +/* + * An optimized version of: + * a_type *t = ql_first(a_head); + * ql_remove((a_head), t, a_field); + * ql_tail_insert((a_head), t, a_field); + */ +#define ql_rotate(a_head, a_field) do { \ + ql_first(a_head) = qr_next(ql_first(a_head), a_field); \ +} while (0) + +/* + * Helper macro to iterate over each element in a list in order, starting from + * the head (or in reverse order, starting from the tail). The usage is + * (assuming my_t and my_list_t defined as above). + * + * int sum(my_list_t *list) { + * int sum = 0; + * my_t *iter; + * ql_foreach(iter, list, link) { + * sum += iter->data; + * } + * return sum; + * } + */ + #define ql_foreach(a_var, a_head, a_field) \ qr_foreach((a_var), ql_first(a_head), a_field) diff --git a/contrib/jemalloc/include/jemalloc/internal/qr.h b/contrib/jemalloc/include/jemalloc/internal/qr.h index 1e1056b38685b2..ece4f556860069 100644 --- a/contrib/jemalloc/include/jemalloc/internal/qr.h +++ b/contrib/jemalloc/include/jemalloc/internal/qr.h @@ -1,6 +1,21 @@ #ifndef JEMALLOC_INTERNAL_QR_H #define JEMALLOC_INTERNAL_QR_H +/* + * A ring implementation based on an embedded circular doubly-linked list. + * + * You define your struct like so: + * + * typedef struct my_s my_t; + * struct my_s { + * int data; + * qr(my_t) my_link; + * }; + * + * And then pass a my_t * into macros for a_qr arguments, and the token + * "my_link" into a_field fields. + */ + /* Ring definitions. */ #define qr(a_type) \ struct { \ @@ -8,61 +23,114 @@ struct { \ a_type *qre_prev; \ } -/* Ring functions. */ +/* + * Initialize a qr link. Every link must be initialized before being used, even + * if that initialization is going to be immediately overwritten (say, by being + * passed into an insertion macro). + */ #define qr_new(a_qr, a_field) do { \ (a_qr)->a_field.qre_next = (a_qr); \ (a_qr)->a_field.qre_prev = (a_qr); \ } while (0) +/* + * Go forwards or backwards in the ring. Note that (the ring being circular), this + * always succeeds -- you just keep looping around and around the ring if you + * chase pointers without end. + */ #define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next) - #define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev) -#define qr_before_insert(a_qrelm, a_qr, a_field) do { \ - (a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \ - (a_qr)->a_field.qre_next = (a_qrelm); \ - (a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \ - (a_qrelm)->a_field.qre_prev = (a_qr); \ +/* + * Given two rings: + * a -> a_1 -> ... -> a_n -- + * ^ | + * |------------------------ + * + * b -> b_1 -> ... -> b_n -- + * ^ | + * |------------------------ + * + * Results in the ring: + * a -> a_1 -> ... -> a_n -> b -> b_1 -> ... -> b_n -- + * ^ | + * |-------------------------------------------------| + * + * a_qr_a can directly be a qr_next() macro, but a_qr_b cannot. + */ +#define qr_meld(a_qr_a, a_qr_b, a_field) do { \ + (a_qr_b)->a_field.qre_prev->a_field.qre_next = \ + (a_qr_a)->a_field.qre_prev; \ + (a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \ + (a_qr_b)->a_field.qre_prev = \ + (a_qr_b)->a_field.qre_prev->a_field.qre_next; \ + (a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \ + (a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \ } while (0) -#define qr_after_insert(a_qrelm, a_qr, a_field) do { \ - (a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \ - (a_qr)->a_field.qre_prev = (a_qrelm); \ - (a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \ - (a_qrelm)->a_field.qre_next = (a_qr); \ -} while (0) +/* + * Logically, this is just a meld. The intent, though, is that a_qrelm is a + * single-element ring, so that "before" has a more obvious interpretation than + * meld. + */ +#define qr_before_insert(a_qrelm, a_qr, a_field) \ + qr_meld((a_qrelm), (a_qr), a_field) -#define qr_meld(a_qr_a, a_qr_b, a_type, a_field) do { \ - a_type *t; \ - (a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \ - (a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \ - t = (a_qr_a)->a_field.qre_prev; \ - (a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \ - (a_qr_b)->a_field.qre_prev = t; \ -} while (0) +/* Ditto, but inserting after rather than before. */ +#define qr_after_insert(a_qrelm, a_qr, a_field) \ + qr_before_insert(qr_next(a_qrelm, a_field), (a_qr), a_field) /* + * Inverts meld; given the ring: + * a -> a_1 -> ... -> a_n -> b -> b_1 -> ... -> b_n -- + * ^ | + * |-------------------------------------------------| + * + * Results in two rings: + * a -> a_1 -> ... -> a_n -- + * ^ | + * |------------------------ + * + * b -> b_1 -> ... -> b_n -- + * ^ | + * |------------------------ + * * qr_meld() and qr_split() are functionally equivalent, so there's no need to * have two copies of the code. */ -#define qr_split(a_qr_a, a_qr_b, a_type, a_field) \ - qr_meld((a_qr_a), (a_qr_b), a_type, a_field) +#define qr_split(a_qr_a, a_qr_b, a_field) \ + qr_meld((a_qr_a), (a_qr_b), a_field) -#define qr_remove(a_qr, a_field) do { \ - (a_qr)->a_field.qre_prev->a_field.qre_next \ - = (a_qr)->a_field.qre_next; \ - (a_qr)->a_field.qre_next->a_field.qre_prev \ - = (a_qr)->a_field.qre_prev; \ - (a_qr)->a_field.qre_next = (a_qr); \ - (a_qr)->a_field.qre_prev = (a_qr); \ -} while (0) +/* + * Splits off a_qr from the rest of its ring, so that it becomes a + * single-element ring. + */ +#define qr_remove(a_qr, a_field) \ + qr_split(qr_next(a_qr, a_field), (a_qr), a_field) +/* + * Helper macro to iterate over each element in a ring exactly once, starting + * with a_qr. The usage is (assuming my_t defined as above): + * + * int sum(my_t *item) { + * int sum = 0; + * my_t *iter; + * qr_foreach(iter, item, link) { + * sum += iter->data; + * } + * return sum; + * } + */ #define qr_foreach(var, a_qr, a_field) \ for ((var) = (a_qr); \ (var) != NULL; \ (var) = (((var)->a_field.qre_next != (a_qr)) \ ? (var)->a_field.qre_next : NULL)) +/* + * The same (and with the same usage) as qr_foreach, but in the opposite order, + * ending with a_qr. + */ #define qr_reverse_foreach(var, a_qr, a_field) \ for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \ (var) != NULL; \ diff --git a/contrib/jemalloc/include/jemalloc/internal/quantum.h b/contrib/jemalloc/include/jemalloc/internal/quantum.h index 821086e992cdb8..c22d753aa79fa0 100644 --- a/contrib/jemalloc/include/jemalloc/internal/quantum.h +++ b/contrib/jemalloc/include/jemalloc/internal/quantum.h @@ -30,11 +30,18 @@ # ifdef __hppa__ # define LG_QUANTUM 4 # endif +# ifdef __loongarch__ +# define LG_QUANTUM 4 +# endif # ifdef __m68k__ # define LG_QUANTUM 3 # endif # ifdef __mips__ -# define LG_QUANTUM 3 +# if defined(__mips_n32) || defined(__mips_n64) +# define LG_QUANTUM 4 +# else +# define LG_QUANTUM 3 +# endif # endif # ifdef __nios2__ # define LG_QUANTUM 3 @@ -61,6 +68,9 @@ # ifdef __le32__ # define LG_QUANTUM 4 # endif +# ifdef __arc__ +# define LG_QUANTUM 3 +# endif # ifndef LG_QUANTUM # error "Unknown minimum alignment for architecture; specify via " "--with-lg-quantum" diff --git a/contrib/jemalloc/include/jemalloc/internal/rb.h b/contrib/jemalloc/include/jemalloc/internal/rb.h index 47fa5ca99bbe13..a9a51cb6860418 100644 --- a/contrib/jemalloc/include/jemalloc/internal/rb.h +++ b/contrib/jemalloc/include/jemalloc/internal/rb.h @@ -1,3 +1,6 @@ +#ifndef JEMALLOC_INTERNAL_RB_H +#define JEMALLOC_INTERNAL_RB_H + /*- ******************************************************************************* * @@ -19,13 +22,19 @@ ******************************************************************************* */ -#ifndef RB_H_ -#define RB_H_ - #ifndef __PGI #define RB_COMPACT #endif +/* + * Each node in the RB tree consumes at least 1 byte of space (for the linkage + * if nothing else, so there are a maximum of sizeof(void *) << 3 rb tree nodes + * in any process (and thus, at most sizeof(void *) << 3 nodes in any rb tree). + * The choice of algorithm bounds the depth of a tree to twice the binary log of + * the number of elements in the tree; the following bound follows. + */ +#define RB_MAX_DEPTH (sizeof(void *) << 4) + #ifdef RB_COMPACT /* Node structure. */ #define rb_node(a_type) \ @@ -159,12 +168,22 @@ struct { \ rbtn_right_set(a_type, a_field, (r_node), (a_node)); \ } while (0) +#define rb_summarized_only_false(...) +#define rb_summarized_only_true(...) __VA_ARGS__ +#define rb_empty_summarize(a_node, a_lchild, a_rchild) false + /* - * The rb_proto() macro generates function prototypes that correspond to the - * functions generated by an equivalently parameterized call to rb_gen(). + * The rb_proto() and rb_summarized_proto() macros generate function prototypes + * that correspond to the functions generated by an equivalently parameterized + * call to rb_gen() or rb_summarized_gen(), respectively. */ #define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \ + rb_proto_impl(a_attr, a_prefix, a_rbt_type, a_type, false) +#define rb_summarized_proto(a_attr, a_prefix, a_rbt_type, a_type) \ + rb_proto_impl(a_attr, a_prefix, a_rbt_type, a_type, true) +#define rb_proto_impl(a_attr, a_prefix, a_rbt_type, a_type, \ + a_is_summarized) \ a_attr void \ a_prefix##new(a_rbt_type *rbtree); \ a_attr bool \ @@ -195,31 +214,94 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg); \ a_attr void \ a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \ - void *arg); + void *arg); \ +/* Extended API */ \ +rb_summarized_only_##a_is_summarized( \ +a_attr void \ +a_prefix##update_summaries(a_rbt_type *rbtree, a_type *node); \ +a_attr bool \ +a_prefix##empty_filtered(a_rbt_type *rbtree, \ + bool (*filter_node)(void *, a_type *), \ + bool (*filter_subtree)(void *, a_type *), \ + void *filter_ctx); \ +a_attr a_type * \ +a_prefix##first_filtered(a_rbt_type *rbtree, \ + bool (*filter_node)(void *, a_type *), \ + bool (*filter_subtree)(void *, a_type *), \ + void *filter_ctx); \ +a_attr a_type * \ +a_prefix##last_filtered(a_rbt_type *rbtree, \ + bool (*filter_node)(void *, a_type *), \ + bool (*filter_subtree)(void *, a_type *), \ + void *filter_ctx); \ +a_attr a_type * \ +a_prefix##next_filtered(a_rbt_type *rbtree, a_type *node, \ + bool (*filter_node)(void *, a_type *), \ + bool (*filter_subtree)(void *, a_type *), \ + void *filter_ctx); \ +a_attr a_type * \ +a_prefix##prev_filtered(a_rbt_type *rbtree, a_type *node, \ + bool (*filter_node)(void *, a_type *), \ + bool (*filter_subtree)(void *, a_type *), \ + void *filter_ctx); \ +a_attr a_type * \ +a_prefix##search_filtered(a_rbt_type *rbtree, const a_type *key, \ + bool (*filter_node)(void *, a_type *), \ + bool (*filter_subtree)(void *, a_type *), \ + void *filter_ctx); \ +a_attr a_type * \ +a_prefix##nsearch_filtered(a_rbt_type *rbtree, const a_type *key, \ + bool (*filter_node)(void *, a_type *), \ + bool (*filter_subtree)(void *, a_type *), \ + void *filter_ctx); \ +a_attr a_type * \ +a_prefix##psearch_filtered(a_rbt_type *rbtree, const a_type *key, \ + bool (*filter_node)(void *, a_type *), \ + bool (*filter_subtree)(void *, a_type *), \ + void *filter_ctx); \ +a_attr a_type * \ +a_prefix##iter_filtered(a_rbt_type *rbtree, a_type *start, \ + a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg, \ + bool (*filter_node)(void *, a_type *), \ + bool (*filter_subtree)(void *, a_type *), \ + void *filter_ctx); \ +a_attr a_type * \ +a_prefix##reverse_iter_filtered(a_rbt_type *rbtree, a_type *start, \ + a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg, \ + bool (*filter_node)(void *, a_type *), \ + bool (*filter_subtree)(void *, a_type *), \ + void *filter_ctx); \ +) /* * The rb_gen() macro generates a type-specific red-black tree implementation, * based on the above cpp macros. - * * Arguments: * - * a_attr : Function attribute for generated functions (ex: static). - * a_prefix : Prefix for generated functions (ex: ex_). - * a_rb_type : Type for red-black tree data structure (ex: ex_t). - * a_type : Type for red-black tree node data structure (ex: ex_node_t). - * a_field : Name of red-black tree node linkage (ex: ex_link). - * a_cmp : Node comparison function name, with the following prototype: - * int (a_cmp *)(a_type *a_node, a_type *a_other); - * ^^^^^^ - * or a_key - * Interpretation of comparison function return values: - * -1 : a_node < a_other - * 0 : a_node == a_other - * 1 : a_node > a_other - * In all cases, the a_node or a_key macro argument is the first - * argument to the comparison function, which makes it possible - * to write comparison functions that treat the first argument - * specially. + * a_attr: + * Function attribute for generated functions (ex: static). + * a_prefix: + * Prefix for generated functions (ex: ex_). + * a_rb_type: + * Type for red-black tree data structure (ex: ex_t). + * a_type: + * Type for red-black tree node data structure (ex: ex_node_t). + * a_field: + * Name of red-black tree node linkage (ex: ex_link). + * a_cmp: + * Node comparison function name, with the following prototype: + * + * int a_cmp(a_type *a_node, a_type *a_other); + * ^^^^^^ + * or a_key + * Interpretation of comparison function return values: + * -1 : a_node < a_other + * 0 : a_node == a_other + * 1 : a_node > a_other + * In all cases, the a_node or a_key macro argument is the first argument to + * the comparison function, which makes it possible to write comparison + * functions that treat the first argument specially. a_cmp must be a total + * order on values inserted into the tree -- duplicates are not allowed. * * Assuming the following setup: * @@ -338,8 +420,193 @@ a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \ * during iteration. There is no way to stop iteration once it * has begun. * arg : Opaque pointer passed to cb(). + * + * The rb_summarized_gen() macro generates all the functions above, but has an + * expanded interface. In introduces the notion of summarizing subtrees, and of + * filtering searches in the tree according to the information contained in + * those summaries. + * The extra macro argument is: + * a_summarize: + * Tree summarization function name, with the following prototype: + * + * bool a_summarize(a_type *a_node, const a_type *a_left_child, + * const a_type *a_right_child); + * + * This function should update a_node with the summary of the subtree rooted + * there, using the data contained in it and the summaries in a_left_child + * and a_right_child. One or both of them may be NULL. When the tree + * changes due to an insertion or removal, it updates the summaries of all + * nodes whose subtrees have changed (always updating the summaries of + * children before their parents). If the user alters a node in the tree in + * a way that may change its summary, they can call the generated + * update_summaries function to bubble up the summary changes to the root. + * It should return true if the summary changed (or may have changed), and + * false if it didn't (which will allow the implementation to terminate + * "bubbling up" the summaries early). + * As the parameter names indicate, the children are ordered as they are in + * the tree, a_left_child, if it is not NULL, compares less than a_node, + * which in turn compares less than a_right_child (if a_right_child is not + * NULL). + * + * Using the same setup as above but replacing the macro with + * rb_summarized_gen(static, ex_, ex_t, ex_node_t, ex_link, ex_cmp, + * ex_summarize) + * + * Generates all the previous functions, but adds some more: + * + * static void + * ex_update_summaries(ex_t *tree, ex_node_t *node); + * Description: Recompute all summaries of ancestors of node. + * Args: + * tree: Pointer to an initialized red-black tree object. + * node: The element of the tree whose summary may have changed. + * + * For each of ex_empty, ex_first, ex_last, ex_next, ex_prev, ex_search, + * ex_nsearch, ex_psearch, ex_iter, and ex_reverse_iter, an additional function + * is generated as well, with the suffix _filtered (e.g. ex_empty_filtered, + * ex_first_filtered, etc.). These use the concept of a "filter"; a binary + * property some node either satisfies or does not satisfy. Clever use of the + * a_summary argument to rb_summarized_gen can allow efficient computation of + * these predicates across whole subtrees of the tree. + * The extended API functions accept three additional arguments after the + * arguments to the corresponding non-extended equivalent. + * + * ex_fn(..., bool (*filter_node)(void *, ex_node_t *), + * bool (*filter_subtree)(void *, ex_node_t *), void *filter_ctx); + * filter_node : Returns true if the node passes the filter. + * filter_subtree : Returns true if some node in the subtree rooted at + * node passes the filter. + * filter_ctx : A context argument passed to the filters. + * + * For a more concrete example of summarizing and filtering, suppose we're using + * the red-black tree to track a set of integers: + * + * struct ex_node_s { + * rb_node(ex_node_t) ex_link; + * unsigned data; + * }; + * + * Suppose, for some application-specific reason, we want to be able to quickly + * find numbers in the set which are divisible by large powers of 2 (say, for + * aligned allocation purposes). We augment the node with a summary field: + * + * struct ex_node_s { + * rb_node(ex_node_t) ex_link; + * unsigned data; + * unsigned max_subtree_ffs; + * } + * + * and define our summarization function as follows: + * + * bool + * ex_summarize(ex_node_t *node, const ex_node_t *lchild, + * const ex_node_t *rchild) { + * unsigned new_max_subtree_ffs = ffs(node->data); + * if (lchild != NULL && lchild->max_subtree_ffs > new_max_subtree_ffs) { + * new_max_subtree_ffs = lchild->max_subtree_ffs; + * } + * if (rchild != NULL && rchild->max_subtree_ffs > new_max_subtree_ffs) { + * new_max_subtree_ffs = rchild->max_subtree_ffs; + * } + * bool changed = (node->max_subtree_ffs != new_max_subtree_ffs) + * node->max_subtree_ffs = new_max_subtree_ffs; + * // This could be "return true" without any correctness or big-O + * // performance changes; but practically, precisely reporting summary + * // changes reduces the amount of work that has to be done when "bubbling + * // up" summary changes. + * return changed; + * } + * + * We can now implement our filter functions as follows: + * bool + * ex_filter_node(void *filter_ctx, ex_node_t *node) { + * unsigned required_ffs = *(unsigned *)filter_ctx; + * return ffs(node->data) >= required_ffs; + * } + * bool + * ex_filter_subtree(void *filter_ctx, ex_node_t *node) { + * unsigned required_ffs = *(unsigned *)filter_ctx; + * return node->max_subtree_ffs >= required_ffs; + * } + * + * We can now easily search for, e.g., the smallest integer in the set that's + * divisible by 128: + * ex_node_t * + * find_div_128(ex_tree_t *tree) { + * unsigned min_ffs = 7; + * return ex_first_filtered(tree, &ex_filter_node, &ex_filter_subtree, + * &min_ffs); + * } + * + * We could with similar ease: + * - Fnd the next multiple of 128 in the set that's larger than 12345 (with + * ex_nsearch_filtered) + * - Iterate over just those multiples of 64 that are in the set (with + * ex_iter_filtered) + * - Determine if the set contains any multiples of 1024 (with + * ex_empty_filtered). + * + * Some possibly subtle API notes: + * - The node argument to ex_next_filtered and ex_prev_filtered need not pass + * the filter; it will find the next/prev node that passes the filter. + * - ex_search_filtered will fail even for a node in the tree, if that node does + * not pass the filter. ex_psearch_filtered and ex_nsearch_filtered behave + * similarly; they may return a node larger/smaller than the key, even if a + * node equivalent to the key is in the tree (but does not pass the filter). + * - Similarly, if the start argument to a filtered iteration function does not + * pass the filter, the callback won't be invoked on it. + * + * These should make sense after a moment's reflection; each post-condition is + * the same as with the unfiltered version, with the added constraint that the + * returned node must pass the filter. */ #define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \ + rb_gen_impl(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp, \ + rb_empty_summarize, false) +#define rb_summarized_gen(a_attr, a_prefix, a_rbt_type, a_type, \ + a_field, a_cmp, a_summarize) \ + rb_gen_impl(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp, \ + a_summarize, true) + +#define rb_gen_impl(a_attr, a_prefix, a_rbt_type, a_type, \ + a_field, a_cmp, a_summarize, a_is_summarized) \ +typedef struct { \ + a_type *node; \ + int cmp; \ +} a_prefix##path_entry_t; \ +static inline void \ +a_prefix##summarize_range(a_prefix##path_entry_t *rfirst, \ + a_prefix##path_entry_t *rlast) { \ + while ((uintptr_t)rlast >= (uintptr_t)rfirst) { \ + a_type *node = rlast->node; \ + /* Avoid a warning when a_summarize is rb_empty_summarize. */ \ + (void)node; \ + bool changed = a_summarize(node, rbtn_left_get(a_type, a_field, \ + node), rbtn_right_get(a_type, a_field, node)); \ + if (!changed) { \ + break; \ + } \ + rlast--; \ + } \ +} \ +/* On the remove pathways, we sometimes swap the node being removed */\ +/* and its first successor; in such cases we need to do two range */\ +/* updates; one from the node to its (former) swapped successor, the */\ +/* next from that successor to the root (with either allowed to */\ +/* bail out early if appropriate. */\ +static inline void \ +a_prefix##summarize_swapped_range(a_prefix##path_entry_t *rfirst, \ + a_prefix##path_entry_t *rlast, a_prefix##path_entry_t *swap_loc) { \ + if (swap_loc == NULL || rlast <= swap_loc) { \ + a_prefix##summarize_range(rfirst, rlast); \ + } else { \ + a_prefix##summarize_range(swap_loc + 1, rlast); \ + (void)a_summarize(swap_loc->node, \ + rbtn_left_get(a_type, a_field, swap_loc->node), \ + rbtn_right_get(a_type, a_field, swap_loc->node)); \ + a_prefix##summarize_range(rfirst, swap_loc - 1); \ + } \ +} \ a_attr void \ a_prefix##new(a_rbt_type *rbtree) { \ rb_new(a_type, a_field, rbtree); \ @@ -465,10 +732,8 @@ a_prefix##psearch(a_rbt_type *rbtree, const a_type *key) { \ } \ a_attr void \ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \ - struct { \ - a_type *node; \ - int cmp; \ - } path[sizeof(void *) << 4], *pathp; \ + a_prefix##path_entry_t path[RB_MAX_DEPTH]; \ + a_prefix##path_entry_t *pathp; \ rbt_node_new(a_type, a_field, rbtree, node); \ /* Wind. */ \ path->node = rbtree->rbt_root; \ @@ -484,6 +749,13 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \ } \ } \ pathp->node = node; \ + /* A loop invariant we maintain is that all nodes with */\ + /* out-of-date summaries live in path[0], path[1], ..., *pathp. */\ + /* To maintain this, we have to summarize node, since we */\ + /* decrement pathp before the first iteration. */\ + assert(rbtn_left_get(a_type, a_field, node) == NULL); \ + assert(rbtn_right_get(a_type, a_field, node) == NULL); \ + (void)a_summarize(node, NULL, NULL); \ /* Unwind. */ \ for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \ a_type *cnode = pathp->node; \ @@ -498,9 +770,13 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \ a_type *tnode; \ rbtn_black_set(a_type, a_field, leftleft); \ rbtn_rotate_right(a_type, a_field, cnode, tnode); \ + (void)a_summarize(cnode, \ + rbtn_left_get(a_type, a_field, cnode), \ + rbtn_right_get(a_type, a_field, cnode)); \ cnode = tnode; \ } \ } else { \ + a_prefix##summarize_range(path, pathp); \ return; \ } \ } else { \ @@ -521,13 +797,20 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \ rbtn_rotate_left(a_type, a_field, cnode, tnode); \ rbtn_color_set(a_type, a_field, tnode, tred); \ rbtn_red_set(a_type, a_field, cnode); \ + (void)a_summarize(cnode, \ + rbtn_left_get(a_type, a_field, cnode), \ + rbtn_right_get(a_type, a_field, cnode)); \ cnode = tnode; \ } \ } else { \ + a_prefix##summarize_range(path, pathp); \ return; \ } \ } \ pathp->node = cnode; \ + (void)a_summarize(cnode, \ + rbtn_left_get(a_type, a_field, cnode), \ + rbtn_right_get(a_type, a_field, cnode)); \ } \ /* Set root, and make it black. */ \ rbtree->rbt_root = path->node; \ @@ -535,12 +818,18 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \ } \ a_attr void \ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ - struct { \ - a_type *node; \ - int cmp; \ - } *pathp, *nodep, path[sizeof(void *) << 4]; \ + a_prefix##path_entry_t path[RB_MAX_DEPTH]; \ + a_prefix##path_entry_t *pathp; \ + a_prefix##path_entry_t *nodep; \ + a_prefix##path_entry_t *swap_loc; \ + /* This is a "real" sentinel -- NULL means we didn't swap the */\ + /* node to be pruned with one of its successors, and so */\ + /* summarization can terminate early whenever some summary */\ + /* doesn't change. */\ + swap_loc = NULL; \ + /* This is just to silence a compiler warning. */ \ + nodep = NULL; \ /* Wind. */ \ - nodep = NULL; /* Silence compiler warning. */ \ path->node = rbtree->rbt_root; \ for (pathp = path; pathp->node != NULL; pathp++) { \ int cmp = pathp->cmp = a_cmp(node, pathp->node); \ @@ -567,6 +856,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ pathp--; \ if (pathp->node != node) { \ /* Swap node with its successor. */ \ + swap_loc = nodep; \ bool tred = rbtn_red_get(a_type, a_field, pathp->node); \ rbtn_color_set(a_type, a_field, pathp->node, \ rbtn_red_get(a_type, a_field, node)); \ @@ -604,6 +894,9 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ rbtn_black_set(a_type, a_field, left); \ if (pathp == path) { \ rbtree->rbt_root = left; \ + /* Nothing to summarize -- the subtree rooted at the */\ + /* node's left child hasn't changed, and it's now the */\ + /* root. */\ } else { \ if (pathp[-1].cmp < 0) { \ rbtn_left_set(a_type, a_field, pathp[-1].node, \ @@ -612,6 +905,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ rbtn_right_set(a_type, a_field, pathp[-1].node, \ left); \ } \ + a_prefix##summarize_swapped_range(path, &pathp[-1], \ + swap_loc); \ } \ return; \ } else if (pathp == path) { \ @@ -620,10 +915,15 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ return; \ } \ } \ + /* We've now established the invariant that the node has no right */\ + /* child (well, morally; we didn't bother nulling it out if we */\ + /* swapped it with its successor), and that the only nodes with */\ + /* out-of-date summaries live in path[0], path[1], ..., pathp[-1].*/\ if (rbtn_red_get(a_type, a_field, pathp->node)) { \ /* Prune red node, which requires no fixup. */ \ assert(pathp[-1].cmp < 0); \ rbtn_left_set(a_type, a_field, pathp[-1].node, NULL); \ + a_prefix##summarize_swapped_range(path, &pathp[-1], swap_loc); \ return; \ } \ /* The node to be pruned is black, so unwind until balance is */\ @@ -657,6 +957,12 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ rbtn_right_set(a_type, a_field, pathp->node, tnode);\ rbtn_rotate_left(a_type, a_field, pathp->node, \ tnode); \ + (void)a_summarize(pathp->node, \ + rbtn_left_get(a_type, a_field, pathp->node), \ + rbtn_right_get(a_type, a_field, pathp->node)); \ + (void)a_summarize(right, \ + rbtn_left_get(a_type, a_field, right), \ + rbtn_right_get(a_type, a_field, right)); \ } else { \ /* || */\ /* pathp(r) */\ @@ -667,7 +973,12 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ /* */\ rbtn_rotate_left(a_type, a_field, pathp->node, \ tnode); \ + (void)a_summarize(pathp->node, \ + rbtn_left_get(a_type, a_field, pathp->node), \ + rbtn_right_get(a_type, a_field, pathp->node)); \ } \ + (void)a_summarize(tnode, rbtn_left_get(a_type, a_field, \ + tnode), rbtn_right_get(a_type, a_field, tnode)); \ /* Balance restored, but rotation modified subtree */\ /* root. */\ assert((uintptr_t)pathp > (uintptr_t)path); \ @@ -678,6 +989,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ rbtn_right_set(a_type, a_field, pathp[-1].node, \ tnode); \ } \ + a_prefix##summarize_swapped_range(path, &pathp[-1], \ + swap_loc); \ return; \ } else { \ a_type *right = rbtn_right_get(a_type, a_field, \ @@ -698,6 +1011,15 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ rbtn_right_set(a_type, a_field, pathp->node, tnode);\ rbtn_rotate_left(a_type, a_field, pathp->node, \ tnode); \ + (void)a_summarize(pathp->node, \ + rbtn_left_get(a_type, a_field, pathp->node), \ + rbtn_right_get(a_type, a_field, pathp->node)); \ + (void)a_summarize(right, \ + rbtn_left_get(a_type, a_field, right), \ + rbtn_right_get(a_type, a_field, right)); \ + (void)a_summarize(tnode, \ + rbtn_left_get(a_type, a_field, tnode), \ + rbtn_right_get(a_type, a_field, tnode)); \ /* Balance restored, but rotation modified */\ /* subtree root, which may actually be the tree */\ /* root. */\ @@ -712,6 +1034,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ rbtn_right_set(a_type, a_field, \ pathp[-1].node, tnode); \ } \ + a_prefix##summarize_swapped_range(path, \ + &pathp[-1], swap_loc); \ } \ return; \ } else { \ @@ -725,6 +1049,12 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ rbtn_red_set(a_type, a_field, pathp->node); \ rbtn_rotate_left(a_type, a_field, pathp->node, \ tnode); \ + (void)a_summarize(pathp->node, \ + rbtn_left_get(a_type, a_field, pathp->node), \ + rbtn_right_get(a_type, a_field, pathp->node)); \ + (void)a_summarize(tnode, \ + rbtn_left_get(a_type, a_field, tnode), \ + rbtn_right_get(a_type, a_field, tnode)); \ pathp->node = tnode; \ } \ } \ @@ -757,6 +1087,12 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ tnode); \ rbtn_right_set(a_type, a_field, unode, tnode); \ rbtn_rotate_left(a_type, a_field, unode, tnode); \ + (void)a_summarize(pathp->node, \ + rbtn_left_get(a_type, a_field, pathp->node), \ + rbtn_right_get(a_type, a_field, pathp->node)); \ + (void)a_summarize(unode, \ + rbtn_left_get(a_type, a_field, unode), \ + rbtn_right_get(a_type, a_field, unode)); \ } else { \ /* || */\ /* pathp(b) */\ @@ -771,7 +1107,13 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ rbtn_rotate_right(a_type, a_field, pathp->node, \ tnode); \ rbtn_black_set(a_type, a_field, tnode); \ + (void)a_summarize(pathp->node, \ + rbtn_left_get(a_type, a_field, pathp->node), \ + rbtn_right_get(a_type, a_field, pathp->node)); \ } \ + (void)a_summarize(tnode, \ + rbtn_left_get(a_type, a_field, tnode), \ + rbtn_right_get(a_type, a_field, tnode)); \ /* Balance restored, but rotation modified subtree */\ /* root, which may actually be the tree root. */\ if (pathp == path) { \ @@ -785,6 +1127,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ rbtn_right_set(a_type, a_field, pathp[-1].node, \ tnode); \ } \ + a_prefix##summarize_swapped_range(path, &pathp[-1], \ + swap_loc); \ } \ return; \ } else if (rbtn_red_get(a_type, a_field, pathp->node)) { \ @@ -803,6 +1147,12 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ rbtn_black_set(a_type, a_field, leftleft); \ rbtn_rotate_right(a_type, a_field, pathp->node, \ tnode); \ + (void)a_summarize(pathp->node, \ + rbtn_left_get(a_type, a_field, pathp->node), \ + rbtn_right_get(a_type, a_field, pathp->node)); \ + (void)a_summarize(tnode, \ + rbtn_left_get(a_type, a_field, tnode), \ + rbtn_right_get(a_type, a_field, tnode)); \ /* Balance restored, but rotation modified */\ /* subtree root. */\ assert((uintptr_t)pathp > (uintptr_t)path); \ @@ -813,6 +1163,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ rbtn_right_set(a_type, a_field, pathp[-1].node, \ tnode); \ } \ + a_prefix##summarize_swapped_range(path, &pathp[-1], \ + swap_loc); \ return; \ } else { \ /* || */\ @@ -824,6 +1176,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ rbtn_red_set(a_type, a_field, left); \ rbtn_black_set(a_type, a_field, pathp->node); \ /* Balance restored. */ \ + a_prefix##summarize_swapped_range(path, pathp, \ + swap_loc); \ return; \ } \ } else { \ @@ -840,6 +1194,12 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ rbtn_black_set(a_type, a_field, leftleft); \ rbtn_rotate_right(a_type, a_field, pathp->node, \ tnode); \ + (void)a_summarize(pathp->node, \ + rbtn_left_get(a_type, a_field, pathp->node), \ + rbtn_right_get(a_type, a_field, pathp->node)); \ + (void)a_summarize(tnode, \ + rbtn_left_get(a_type, a_field, tnode), \ + rbtn_right_get(a_type, a_field, tnode)); \ /* Balance restored, but rotation modified */\ /* subtree root, which may actually be the tree */\ /* root. */\ @@ -854,6 +1214,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ rbtn_right_set(a_type, a_field, \ pathp[-1].node, tnode); \ } \ + a_prefix##summarize_swapped_range(path, \ + &pathp[-1], swap_loc); \ } \ return; \ } else { \ @@ -864,6 +1226,9 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ /* / */\ /* (b) */\ rbtn_red_set(a_type, a_field, left); \ + (void)a_summarize(pathp->node, \ + rbtn_left_get(a_type, a_field, pathp->node), \ + rbtn_right_get(a_type, a_field, pathp->node)); \ } \ } \ } \ @@ -1001,6 +1366,491 @@ a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \ void *arg) { \ a_prefix##destroy_recurse(rbtree, rbtree->rbt_root, cb, arg); \ rbtree->rbt_root = NULL; \ -} +} \ +/* BEGIN SUMMARIZED-ONLY IMPLEMENTATION */ \ +rb_summarized_only_##a_is_summarized( \ +static inline a_prefix##path_entry_t * \ +a_prefix##wind(a_rbt_type *rbtree, \ + a_prefix##path_entry_t path[RB_MAX_DEPTH], a_type *node) { \ + a_prefix##path_entry_t *pathp; \ + path->node = rbtree->rbt_root; \ + for (pathp = path; ; pathp++) { \ + assert((size_t)(pathp - path) < RB_MAX_DEPTH); \ + pathp->cmp = a_cmp(node, pathp->node); \ + if (pathp->cmp < 0) { \ + pathp[1].node = rbtn_left_get(a_type, a_field, \ + pathp->node); \ + } else if (pathp->cmp == 0) { \ + return pathp; \ + } else { \ + pathp[1].node = rbtn_right_get(a_type, a_field, \ + pathp->node); \ + } \ + } \ + unreachable(); \ +} \ +a_attr void \ +a_prefix##update_summaries(a_rbt_type *rbtree, a_type *node) { \ + a_prefix##path_entry_t path[RB_MAX_DEPTH]; \ + a_prefix##path_entry_t *pathp = a_prefix##wind(rbtree, path, node); \ + a_prefix##summarize_range(path, pathp); \ +} \ +a_attr bool \ +a_prefix##empty_filtered(a_rbt_type *rbtree, \ + bool (*filter_node)(void *, a_type *), \ + bool (*filter_subtree)(void *, a_type *), \ + void *filter_ctx) { \ + a_type *node = rbtree->rbt_root; \ + return node == NULL || !filter_subtree(filter_ctx, node); \ +} \ +static inline a_type * \ +a_prefix##first_filtered_from_node(a_type *node, \ + bool (*filter_node)(void *, a_type *), \ + bool (*filter_subtree)(void *, a_type *), \ + void *filter_ctx) { \ + assert(node != NULL && filter_subtree(filter_ctx, node)); \ + while (true) { \ + a_type *left = rbtn_left_get(a_type, a_field, node); \ + a_type *right = rbtn_right_get(a_type, a_field, node); \ + if (left != NULL && filter_subtree(filter_ctx, left)) { \ + node = left; \ + } else if (filter_node(filter_ctx, node)) { \ + return node; \ + } else { \ + assert(right != NULL \ + && filter_subtree(filter_ctx, right)); \ + node = right; \ + } \ + } \ + unreachable(); \ +} \ +a_attr a_type * \ +a_prefix##first_filtered(a_rbt_type *rbtree, \ + bool (*filter_node)(void *, a_type *), \ + bool (*filter_subtree)(void *, a_type *), \ + void *filter_ctx) { \ + a_type *node = rbtree->rbt_root; \ + if (node == NULL || !filter_subtree(filter_ctx, node)) { \ + return NULL; \ + } \ + return a_prefix##first_filtered_from_node(node, filter_node, \ + filter_subtree, filter_ctx); \ +} \ +static inline a_type * \ +a_prefix##last_filtered_from_node(a_type *node, \ + bool (*filter_node)(void *, a_type *), \ + bool (*filter_subtree)(void *, a_type *), \ + void *filter_ctx) { \ + assert(node != NULL && filter_subtree(filter_ctx, node)); \ + while (true) { \ + a_type *left = rbtn_left_get(a_type, a_field, node); \ + a_type *right = rbtn_right_get(a_type, a_field, node); \ + if (right != NULL && filter_subtree(filter_ctx, right)) { \ + node = right; \ + } else if (filter_node(filter_ctx, node)) { \ + return node; \ + } else { \ + assert(left != NULL \ + && filter_subtree(filter_ctx, left)); \ + node = left; \ + } \ + } \ + unreachable(); \ +} \ +a_attr a_type * \ +a_prefix##last_filtered(a_rbt_type *rbtree, \ + bool (*filter_node)(void *, a_type *), \ + bool (*filter_subtree)(void *, a_type *), \ + void *filter_ctx) { \ + a_type *node = rbtree->rbt_root; \ + if (node == NULL || !filter_subtree(filter_ctx, node)) { \ + return NULL; \ + } \ + return a_prefix##last_filtered_from_node(node, filter_node, \ + filter_subtree, filter_ctx); \ +} \ +/* Internal implementation function. Search for a node comparing */\ +/* equal to key matching the filter. If such a node is in the tree, */\ +/* return it. Additionally, the caller has the option to ask for */\ +/* bounds on the next / prev node in the tree passing the filter. */\ +/* If nextbound is true, then this function will do one of the */\ +/* following: */\ +/* - Fill in *nextbound_node with the smallest node in the tree */\ +/* greater than key passing the filter, and NULL-out */\ +/* *nextbound_subtree. */\ +/* - Fill in *nextbound_subtree with a parent of that node which is */\ +/* not a parent of the searched-for node, and NULL-out */\ +/* *nextbound_node. */\ +/* - NULL-out both *nextbound_node and *nextbound_subtree, in which */\ +/* case no node greater than key but passing the filter is in the */\ +/* tree. */\ +/* The prevbound case is similar. If the caller knows that key is in */\ +/* the tree and that the subtree rooted at key does not contain a */\ +/* node satisfying the bound being searched for, then they can pass */\ +/* false for include_subtree, in which case we won't bother searching */\ +/* there (risking a cache miss). */\ +/* */\ +/* This API is unfortunately complex; but the logic for filtered */\ +/* searches is very subtle, and otherwise we would have to repeat it */\ +/* multiple times for filtered search, nsearch, psearch, next, and */\ +/* prev. */\ +static inline a_type * \ +a_prefix##search_with_filter_bounds(a_rbt_type *rbtree, \ + const a_type *key, \ + bool (*filter_node)(void *, a_type *), \ + bool (*filter_subtree)(void *, a_type *), \ + void *filter_ctx, \ + bool include_subtree, \ + bool nextbound, a_type **nextbound_node, a_type **nextbound_subtree, \ + bool prevbound, a_type **prevbound_node, a_type **prevbound_subtree) {\ + if (nextbound) { \ + *nextbound_node = NULL; \ + *nextbound_subtree = NULL; \ + } \ + if (prevbound) { \ + *prevbound_node = NULL; \ + *prevbound_subtree = NULL; \ + } \ + a_type *tnode = rbtree->rbt_root; \ + while (tnode != NULL && filter_subtree(filter_ctx, tnode)) { \ + int cmp = a_cmp(key, tnode); \ + a_type *tleft = rbtn_left_get(a_type, a_field, tnode); \ + a_type *tright = rbtn_right_get(a_type, a_field, tnode); \ + if (cmp < 0) { \ + if (nextbound) { \ + if (filter_node(filter_ctx, tnode)) { \ + *nextbound_node = tnode; \ + *nextbound_subtree = NULL; \ + } else if (tright != NULL && filter_subtree( \ + filter_ctx, tright)) { \ + *nextbound_node = NULL; \ + *nextbound_subtree = tright; \ + } \ + } \ + tnode = tleft; \ + } else if (cmp > 0) { \ + if (prevbound) { \ + if (filter_node(filter_ctx, tnode)) { \ + *prevbound_node = tnode; \ + *prevbound_subtree = NULL; \ + } else if (tleft != NULL && filter_subtree( \ + filter_ctx, tleft)) { \ + *prevbound_node = NULL; \ + *prevbound_subtree = tleft; \ + } \ + } \ + tnode = tright; \ + } else { \ + if (filter_node(filter_ctx, tnode)) { \ + return tnode; \ + } \ + if (include_subtree) { \ + if (prevbound && tleft != NULL && filter_subtree( \ + filter_ctx, tleft)) { \ + *prevbound_node = NULL; \ + *prevbound_subtree = tleft; \ + } \ + if (nextbound && tright != NULL && filter_subtree( \ + filter_ctx, tright)) { \ + *nextbound_node = NULL; \ + *nextbound_subtree = tright; \ + } \ + } \ + return NULL; \ + } \ + } \ + return NULL; \ +} \ +a_attr a_type * \ +a_prefix##next_filtered(a_rbt_type *rbtree, a_type *node, \ + bool (*filter_node)(void *, a_type *), \ + bool (*filter_subtree)(void *, a_type *), \ + void *filter_ctx) { \ + a_type *nright = rbtn_right_get(a_type, a_field, node); \ + if (nright != NULL && filter_subtree(filter_ctx, nright)) { \ + return a_prefix##first_filtered_from_node(nright, filter_node, \ + filter_subtree, filter_ctx); \ + } \ + a_type *node_candidate; \ + a_type *subtree_candidate; \ + a_type *search_result = a_prefix##search_with_filter_bounds( \ + rbtree, node, filter_node, filter_subtree, filter_ctx, \ + /* include_subtree */ false, \ + /* nextbound */ true, &node_candidate, &subtree_candidate, \ + /* prevbound */ false, NULL, NULL); \ + assert(node == search_result \ + || !filter_node(filter_ctx, node)); \ + if (node_candidate != NULL) { \ + return node_candidate; \ + } \ + if (subtree_candidate != NULL) { \ + return a_prefix##first_filtered_from_node( \ + subtree_candidate, filter_node, filter_subtree, \ + filter_ctx); \ + } \ + return NULL; \ +} \ +a_attr a_type * \ +a_prefix##prev_filtered(a_rbt_type *rbtree, a_type *node, \ + bool (*filter_node)(void *, a_type *), \ + bool (*filter_subtree)(void *, a_type *), \ + void *filter_ctx) { \ + a_type *nleft = rbtn_left_get(a_type, a_field, node); \ + if (nleft != NULL && filter_subtree(filter_ctx, nleft)) { \ + return a_prefix##last_filtered_from_node(nleft, filter_node, \ + filter_subtree, filter_ctx); \ + } \ + a_type *node_candidate; \ + a_type *subtree_candidate; \ + a_type *search_result = a_prefix##search_with_filter_bounds( \ + rbtree, node, filter_node, filter_subtree, filter_ctx, \ + /* include_subtree */ false, \ + /* nextbound */ false, NULL, NULL, \ + /* prevbound */ true, &node_candidate, &subtree_candidate); \ + assert(node == search_result \ + || !filter_node(filter_ctx, node)); \ + if (node_candidate != NULL) { \ + return node_candidate; \ + } \ + if (subtree_candidate != NULL) { \ + return a_prefix##last_filtered_from_node( \ + subtree_candidate, filter_node, filter_subtree, \ + filter_ctx); \ + } \ + return NULL; \ +} \ +a_attr a_type * \ +a_prefix##search_filtered(a_rbt_type *rbtree, const a_type *key, \ + bool (*filter_node)(void *, a_type *), \ + bool (*filter_subtree)(void *, a_type *), \ + void *filter_ctx) { \ + a_type *result = a_prefix##search_with_filter_bounds(rbtree, key, \ + filter_node, filter_subtree, filter_ctx, \ + /* include_subtree */ false, \ + /* nextbound */ false, NULL, NULL, \ + /* prevbound */ false, NULL, NULL); \ + return result; \ +} \ +a_attr a_type * \ +a_prefix##nsearch_filtered(a_rbt_type *rbtree, const a_type *key, \ + bool (*filter_node)(void *, a_type *), \ + bool (*filter_subtree)(void *, a_type *), \ + void *filter_ctx) { \ + a_type *node_candidate; \ + a_type *subtree_candidate; \ + a_type *result = a_prefix##search_with_filter_bounds(rbtree, key, \ + filter_node, filter_subtree, filter_ctx, \ + /* include_subtree */ true, \ + /* nextbound */ true, &node_candidate, &subtree_candidate, \ + /* prevbound */ false, NULL, NULL); \ + if (result != NULL) { \ + return result; \ + } \ + if (node_candidate != NULL) { \ + return node_candidate; \ + } \ + if (subtree_candidate != NULL) { \ + return a_prefix##first_filtered_from_node( \ + subtree_candidate, filter_node, filter_subtree, \ + filter_ctx); \ + } \ + return NULL; \ +} \ +a_attr a_type * \ +a_prefix##psearch_filtered(a_rbt_type *rbtree, const a_type *key, \ + bool (*filter_node)(void *, a_type *), \ + bool (*filter_subtree)(void *, a_type *), \ + void *filter_ctx) { \ + a_type *node_candidate; \ + a_type *subtree_candidate; \ + a_type *result = a_prefix##search_with_filter_bounds(rbtree, key, \ + filter_node, filter_subtree, filter_ctx, \ + /* include_subtree */ true, \ + /* nextbound */ false, NULL, NULL, \ + /* prevbound */ true, &node_candidate, &subtree_candidate); \ + if (result != NULL) { \ + return result; \ + } \ + if (node_candidate != NULL) { \ + return node_candidate; \ + } \ + if (subtree_candidate != NULL) { \ + return a_prefix##last_filtered_from_node( \ + subtree_candidate, filter_node, filter_subtree, \ + filter_ctx); \ + } \ + return NULL; \ +} \ +a_attr a_type * \ +a_prefix##iter_recurse_filtered(a_rbt_type *rbtree, a_type *node, \ + a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg, \ + bool (*filter_node)(void *, a_type *), \ + bool (*filter_subtree)(void *, a_type *), \ + void *filter_ctx) { \ + if (node == NULL || !filter_subtree(filter_ctx, node)) { \ + return NULL; \ + } \ + a_type *ret; \ + a_type *left = rbtn_left_get(a_type, a_field, node); \ + a_type *right = rbtn_right_get(a_type, a_field, node); \ + ret = a_prefix##iter_recurse_filtered(rbtree, left, cb, arg, \ + filter_node, filter_subtree, filter_ctx); \ + if (ret != NULL) { \ + return ret; \ + } \ + if (filter_node(filter_ctx, node)) { \ + ret = cb(rbtree, node, arg); \ + } \ + if (ret != NULL) { \ + return ret; \ + } \ + return a_prefix##iter_recurse_filtered(rbtree, right, cb, arg, \ + filter_node, filter_subtree, filter_ctx); \ +} \ +a_attr a_type * \ +a_prefix##iter_start_filtered(a_rbt_type *rbtree, a_type *start, \ + a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *), \ + void *arg, bool (*filter_node)(void *, a_type *), \ + bool (*filter_subtree)(void *, a_type *), \ + void *filter_ctx) { \ + if (!filter_subtree(filter_ctx, node)) { \ + return NULL; \ + } \ + int cmp = a_cmp(start, node); \ + a_type *ret; \ + a_type *left = rbtn_left_get(a_type, a_field, node); \ + a_type *right = rbtn_right_get(a_type, a_field, node); \ + if (cmp < 0) { \ + ret = a_prefix##iter_start_filtered(rbtree, start, left, cb, \ + arg, filter_node, filter_subtree, filter_ctx); \ + if (ret != NULL) { \ + return ret; \ + } \ + if (filter_node(filter_ctx, node)) { \ + ret = cb(rbtree, node, arg); \ + if (ret != NULL) { \ + return ret; \ + } \ + } \ + return a_prefix##iter_recurse_filtered(rbtree, right, cb, arg, \ + filter_node, filter_subtree, filter_ctx); \ + } else if (cmp > 0) { \ + return a_prefix##iter_start_filtered(rbtree, start, right, \ + cb, arg, filter_node, filter_subtree, filter_ctx); \ + } else { \ + if (filter_node(filter_ctx, node)) { \ + ret = cb(rbtree, node, arg); \ + if (ret != NULL) { \ + return ret; \ + } \ + } \ + return a_prefix##iter_recurse_filtered(rbtree, right, cb, arg, \ + filter_node, filter_subtree, filter_ctx); \ + } \ +} \ +a_attr a_type * \ +a_prefix##iter_filtered(a_rbt_type *rbtree, a_type *start, \ + a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg, \ + bool (*filter_node)(void *, a_type *), \ + bool (*filter_subtree)(void *, a_type *), \ + void *filter_ctx) { \ + a_type *ret; \ + if (start != NULL) { \ + ret = a_prefix##iter_start_filtered(rbtree, start, \ + rbtree->rbt_root, cb, arg, filter_node, filter_subtree, \ + filter_ctx); \ + } else { \ + ret = a_prefix##iter_recurse_filtered(rbtree, rbtree->rbt_root, \ + cb, arg, filter_node, filter_subtree, filter_ctx); \ + } \ + return ret; \ +} \ +a_attr a_type * \ +a_prefix##reverse_iter_recurse_filtered(a_rbt_type *rbtree, \ + a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *), \ + void *arg, \ + bool (*filter_node)(void *, a_type *), \ + bool (*filter_subtree)(void *, a_type *), \ + void *filter_ctx) { \ + if (node == NULL || !filter_subtree(filter_ctx, node)) { \ + return NULL; \ + } \ + a_type *ret; \ + a_type *left = rbtn_left_get(a_type, a_field, node); \ + a_type *right = rbtn_right_get(a_type, a_field, node); \ + ret = a_prefix##reverse_iter_recurse_filtered(rbtree, right, cb, \ + arg, filter_node, filter_subtree, filter_ctx); \ + if (ret != NULL) { \ + return ret; \ + } \ + if (filter_node(filter_ctx, node)) { \ + ret = cb(rbtree, node, arg); \ + } \ + if (ret != NULL) { \ + return ret; \ + } \ + return a_prefix##reverse_iter_recurse_filtered(rbtree, left, cb, \ + arg, filter_node, filter_subtree, filter_ctx); \ +} \ +a_attr a_type * \ +a_prefix##reverse_iter_start_filtered(a_rbt_type *rbtree, a_type *start,\ + a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *), \ + void *arg, bool (*filter_node)(void *, a_type *), \ + bool (*filter_subtree)(void *, a_type *), \ + void *filter_ctx) { \ + if (!filter_subtree(filter_ctx, node)) { \ + return NULL; \ + } \ + int cmp = a_cmp(start, node); \ + a_type *ret; \ + a_type *left = rbtn_left_get(a_type, a_field, node); \ + a_type *right = rbtn_right_get(a_type, a_field, node); \ + if (cmp > 0) { \ + ret = a_prefix##reverse_iter_start_filtered(rbtree, start, \ + right, cb, arg, filter_node, filter_subtree, filter_ctx); \ + if (ret != NULL) { \ + return ret; \ + } \ + if (filter_node(filter_ctx, node)) { \ + ret = cb(rbtree, node, arg); \ + if (ret != NULL) { \ + return ret; \ + } \ + } \ + return a_prefix##reverse_iter_recurse_filtered(rbtree, left, cb,\ + arg, filter_node, filter_subtree, filter_ctx); \ + } else if (cmp < 0) { \ + return a_prefix##reverse_iter_start_filtered(rbtree, start, \ + left, cb, arg, filter_node, filter_subtree, filter_ctx); \ + } else { \ + if (filter_node(filter_ctx, node)) { \ + ret = cb(rbtree, node, arg); \ + if (ret != NULL) { \ + return ret; \ + } \ + } \ + return a_prefix##reverse_iter_recurse_filtered(rbtree, left, cb,\ + arg, filter_node, filter_subtree, filter_ctx); \ + } \ +} \ +a_attr a_type * \ +a_prefix##reverse_iter_filtered(a_rbt_type *rbtree, a_type *start, \ + a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg, \ + bool (*filter_node)(void *, a_type *), \ + bool (*filter_subtree)(void *, a_type *), \ + void *filter_ctx) { \ + a_type *ret; \ + if (start != NULL) { \ + ret = a_prefix##reverse_iter_start_filtered(rbtree, start, \ + rbtree->rbt_root, cb, arg, filter_node, filter_subtree, \ + filter_ctx); \ + } else { \ + ret = a_prefix##reverse_iter_recurse_filtered(rbtree, \ + rbtree->rbt_root, cb, arg, filter_node, filter_subtree, \ + filter_ctx); \ + } \ + return ret; \ +} \ +) /* end rb_summarized_only */ -#endif /* RB_H_ */ +#endif /* JEMALLOC_INTERNAL_RB_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/rtree.h b/contrib/jemalloc/include/jemalloc/internal/rtree.h index 16ccbebee7f0a2..a00adb2982f323 100644 --- a/contrib/jemalloc/include/jemalloc/internal/rtree.h +++ b/contrib/jemalloc/include/jemalloc/internal/rtree.h @@ -35,33 +35,52 @@ # define RTREE_LEAF_COMPACT #endif -/* Needed for initialization only. */ -#define RTREE_LEAFKEY_INVALID ((uintptr_t)1) - typedef struct rtree_node_elm_s rtree_node_elm_t; struct rtree_node_elm_s { atomic_p_t child; /* (rtree_{node,leaf}_elm_t *) */ }; +typedef struct rtree_metadata_s rtree_metadata_t; +struct rtree_metadata_s { + szind_t szind; + extent_state_t state; /* Mirrors edata->state. */ + bool is_head; /* Mirrors edata->is_head. */ + bool slab; +}; + +typedef struct rtree_contents_s rtree_contents_t; +struct rtree_contents_s { + edata_t *edata; + rtree_metadata_t metadata; +}; + +#define RTREE_LEAF_STATE_WIDTH EDATA_BITS_STATE_WIDTH +#define RTREE_LEAF_STATE_SHIFT 2 +#define RTREE_LEAF_STATE_MASK MASK(RTREE_LEAF_STATE_WIDTH, RTREE_LEAF_STATE_SHIFT) + struct rtree_leaf_elm_s { #ifdef RTREE_LEAF_COMPACT /* * Single pointer-width field containing all three leaf element fields. * For example, on a 64-bit x64 system with 48 significant virtual - * memory address bits, the index, extent, and slab fields are packed as + * memory address bits, the index, edata, and slab fields are packed as * such: * * x: index - * e: extent + * e: edata + * s: state + * h: is_head * b: slab * - * 00000000 xxxxxxxx eeeeeeee [...] eeeeeeee eeee000b + * 00000000 xxxxxxxx eeeeeeee [...] eeeeeeee e00ssshb */ atomic_p_t le_bits; #else - atomic_p_t le_extent; /* (extent_t *) */ - atomic_u_t le_szind; /* (szind_t) */ - atomic_b_t le_slab; /* (bool) */ + atomic_p_t le_edata; /* (edata_t *) */ + /* + * From high to low bits: szind (8 bits), state (4 bits), is_head, slab + */ + atomic_u_t le_metadata; #endif }; @@ -78,6 +97,7 @@ struct rtree_level_s { typedef struct rtree_s rtree_t; struct rtree_s { + base_t *base; malloc_mutex_t init_lock; /* Number of elements based on rtree_levels[0].bits. */ #if RTREE_HEIGHT > 1 @@ -109,42 +129,29 @@ static const rtree_level_t rtree_levels[] = { #endif }; -bool rtree_new(rtree_t *rtree, bool zeroed); - -typedef rtree_node_elm_t *(rtree_node_alloc_t)(tsdn_t *, rtree_t *, size_t); -extern rtree_node_alloc_t *JET_MUTABLE rtree_node_alloc; +bool rtree_new(rtree_t *rtree, base_t *base, bool zeroed); -typedef rtree_leaf_elm_t *(rtree_leaf_alloc_t)(tsdn_t *, rtree_t *, size_t); -extern rtree_leaf_alloc_t *JET_MUTABLE rtree_leaf_alloc; - -typedef void (rtree_node_dalloc_t)(tsdn_t *, rtree_t *, rtree_node_elm_t *); -extern rtree_node_dalloc_t *JET_MUTABLE rtree_node_dalloc; - -typedef void (rtree_leaf_dalloc_t)(tsdn_t *, rtree_t *, rtree_leaf_elm_t *); -extern rtree_leaf_dalloc_t *JET_MUTABLE rtree_leaf_dalloc; -#ifdef JEMALLOC_JET -void rtree_delete(tsdn_t *tsdn, rtree_t *rtree); -#endif rtree_leaf_elm_t *rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent, bool init_missing); -JEMALLOC_ALWAYS_INLINE uintptr_t -rtree_leafkey(uintptr_t key) { +JEMALLOC_ALWAYS_INLINE unsigned +rtree_leaf_maskbits(void) { unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3); unsigned cumbits = (rtree_levels[RTREE_HEIGHT-1].cumbits - rtree_levels[RTREE_HEIGHT-1].bits); - unsigned maskbits = ptrbits - cumbits; - uintptr_t mask = ~((ZU(1) << maskbits) - 1); + return ptrbits - cumbits; +} + +JEMALLOC_ALWAYS_INLINE uintptr_t +rtree_leafkey(uintptr_t key) { + uintptr_t mask = ~((ZU(1) << rtree_leaf_maskbits()) - 1); return (key & mask); } JEMALLOC_ALWAYS_INLINE size_t rtree_cache_direct_map(uintptr_t key) { - unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3); - unsigned cumbits = (rtree_levels[RTREE_HEIGHT-1].cumbits - - rtree_levels[RTREE_HEIGHT-1].bits); - unsigned maskbits = ptrbits - cumbits; - return (size_t)((key >> maskbits) & (RTREE_CTX_NCACHE - 1)); + return (size_t)((key >> rtree_leaf_maskbits()) & + (RTREE_CTX_NCACHE - 1)); } JEMALLOC_ALWAYS_INLINE uintptr_t @@ -176,151 +183,174 @@ rtree_leaf_elm_bits_read(tsdn_t *tsdn, rtree_t *rtree, ? ATOMIC_RELAXED : ATOMIC_ACQUIRE); } -JEMALLOC_ALWAYS_INLINE extent_t * -rtree_leaf_elm_bits_extent_get(uintptr_t bits) { +JEMALLOC_ALWAYS_INLINE uintptr_t +rtree_leaf_elm_bits_encode(rtree_contents_t contents) { + assert((uintptr_t)contents.edata % (uintptr_t)EDATA_ALIGNMENT == 0); + uintptr_t edata_bits = (uintptr_t)contents.edata + & (((uintptr_t)1 << LG_VADDR) - 1); + + uintptr_t szind_bits = (uintptr_t)contents.metadata.szind << LG_VADDR; + uintptr_t slab_bits = (uintptr_t)contents.metadata.slab; + uintptr_t is_head_bits = (uintptr_t)contents.metadata.is_head << 1; + uintptr_t state_bits = (uintptr_t)contents.metadata.state << + RTREE_LEAF_STATE_SHIFT; + uintptr_t metadata_bits = szind_bits | state_bits | is_head_bits | + slab_bits; + assert((edata_bits & metadata_bits) == 0); + + return edata_bits | metadata_bits; +} + +JEMALLOC_ALWAYS_INLINE rtree_contents_t +rtree_leaf_elm_bits_decode(uintptr_t bits) { + rtree_contents_t contents; + /* Do the easy things first. */ + contents.metadata.szind = bits >> LG_VADDR; + contents.metadata.slab = (bool)(bits & 1); + contents.metadata.is_head = (bool)(bits & (1 << 1)); + + uintptr_t state_bits = (bits & RTREE_LEAF_STATE_MASK) >> + RTREE_LEAF_STATE_SHIFT; + assert(state_bits <= extent_state_max); + contents.metadata.state = (extent_state_t)state_bits; + + uintptr_t low_bit_mask = ~((uintptr_t)EDATA_ALIGNMENT - 1); # ifdef __aarch64__ /* * aarch64 doesn't sign extend the highest virtual address bit to set - * the higher ones. Instead, the high bits gets zeroed. + * the higher ones. Instead, the high bits get zeroed. */ uintptr_t high_bit_mask = ((uintptr_t)1 << LG_VADDR) - 1; - /* Mask off the slab bit. */ - uintptr_t low_bit_mask = ~(uintptr_t)1; + /* Mask off metadata. */ uintptr_t mask = high_bit_mask & low_bit_mask; - return (extent_t *)(bits & mask); + contents.edata = (edata_t *)(bits & mask); # else - /* Restore sign-extended high bits, mask slab bit. */ - return (extent_t *)((uintptr_t)((intptr_t)(bits << RTREE_NHIB) >> - RTREE_NHIB) & ~((uintptr_t)0x1)); + /* Restore sign-extended high bits, mask metadata bits. */ + contents.edata = (edata_t *)((uintptr_t)((intptr_t)(bits << RTREE_NHIB) + >> RTREE_NHIB) & low_bit_mask); # endif + assert((uintptr_t)contents.edata % (uintptr_t)EDATA_ALIGNMENT == 0); + return contents; } -JEMALLOC_ALWAYS_INLINE szind_t -rtree_leaf_elm_bits_szind_get(uintptr_t bits) { - return (szind_t)(bits >> LG_VADDR); -} - -JEMALLOC_ALWAYS_INLINE bool -rtree_leaf_elm_bits_slab_get(uintptr_t bits) { - return (bool)(bits & (uintptr_t)0x1); -} +# endif /* RTREE_LEAF_COMPACT */ -# endif - -JEMALLOC_ALWAYS_INLINE extent_t * -rtree_leaf_elm_extent_read(tsdn_t *tsdn, rtree_t *rtree, - rtree_leaf_elm_t *elm, bool dependent) { +JEMALLOC_ALWAYS_INLINE rtree_contents_t +rtree_leaf_elm_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, + bool dependent) { #ifdef RTREE_LEAF_COMPACT uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent); - return rtree_leaf_elm_bits_extent_get(bits); + rtree_contents_t contents = rtree_leaf_elm_bits_decode(bits); + return contents; #else - extent_t *extent = (extent_t *)atomic_load_p(&elm->le_extent, dependent + rtree_contents_t contents; + unsigned metadata_bits = atomic_load_u(&elm->le_metadata, dependent ? ATOMIC_RELAXED : ATOMIC_ACQUIRE); - return extent; -#endif -} + contents.metadata.slab = (bool)(metadata_bits & 1); + contents.metadata.is_head = (bool)(metadata_bits & (1 << 1)); -JEMALLOC_ALWAYS_INLINE szind_t -rtree_leaf_elm_szind_read(tsdn_t *tsdn, rtree_t *rtree, - rtree_leaf_elm_t *elm, bool dependent) { -#ifdef RTREE_LEAF_COMPACT - uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent); - return rtree_leaf_elm_bits_szind_get(bits); -#else - return (szind_t)atomic_load_u(&elm->le_szind, dependent ? ATOMIC_RELAXED - : ATOMIC_ACQUIRE); + uintptr_t state_bits = (metadata_bits & RTREE_LEAF_STATE_MASK) >> + RTREE_LEAF_STATE_SHIFT; + assert(state_bits <= extent_state_max); + contents.metadata.state = (extent_state_t)state_bits; + contents.metadata.szind = metadata_bits >> (RTREE_LEAF_STATE_SHIFT + + RTREE_LEAF_STATE_WIDTH); + + contents.edata = (edata_t *)atomic_load_p(&elm->le_edata, dependent + ? ATOMIC_RELAXED : ATOMIC_ACQUIRE); + + return contents; #endif } -JEMALLOC_ALWAYS_INLINE bool -rtree_leaf_elm_slab_read(tsdn_t *tsdn, rtree_t *rtree, - rtree_leaf_elm_t *elm, bool dependent) { +JEMALLOC_ALWAYS_INLINE void +rtree_contents_encode(rtree_contents_t contents, void **bits, + unsigned *additional) { #ifdef RTREE_LEAF_COMPACT - uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent); - return rtree_leaf_elm_bits_slab_get(bits); + *bits = (void *)rtree_leaf_elm_bits_encode(contents); #else - return atomic_load_b(&elm->le_slab, dependent ? ATOMIC_RELAXED : - ATOMIC_ACQUIRE); + *additional = (unsigned)contents.metadata.slab + | ((unsigned)contents.metadata.is_head << 1) + | ((unsigned)contents.metadata.state << RTREE_LEAF_STATE_SHIFT) + | ((unsigned)contents.metadata.szind << (RTREE_LEAF_STATE_SHIFT + + RTREE_LEAF_STATE_WIDTH)); + *bits = contents.edata; #endif } -static inline void -rtree_leaf_elm_extent_write(tsdn_t *tsdn, rtree_t *rtree, - rtree_leaf_elm_t *elm, extent_t *extent) { +JEMALLOC_ALWAYS_INLINE void +rtree_leaf_elm_write_commit(tsdn_t *tsdn, rtree_t *rtree, + rtree_leaf_elm_t *elm, void *bits, unsigned additional) { #ifdef RTREE_LEAF_COMPACT - uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, true); - uintptr_t bits = ((uintptr_t)rtree_leaf_elm_bits_szind_get(old_bits) << - LG_VADDR) | ((uintptr_t)extent & (((uintptr_t)0x1 << LG_VADDR) - 1)) - | ((uintptr_t)rtree_leaf_elm_bits_slab_get(old_bits)); - atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE); + atomic_store_p(&elm->le_bits, bits, ATOMIC_RELEASE); #else - atomic_store_p(&elm->le_extent, extent, ATOMIC_RELEASE); + atomic_store_u(&elm->le_metadata, additional, ATOMIC_RELEASE); + /* + * Write edata last, since the element is atomically considered valid + * as soon as the edata field is non-NULL. + */ + atomic_store_p(&elm->le_edata, bits, ATOMIC_RELEASE); #endif } -static inline void -rtree_leaf_elm_szind_write(tsdn_t *tsdn, rtree_t *rtree, - rtree_leaf_elm_t *elm, szind_t szind) { - assert(szind <= SC_NSIZES); +JEMALLOC_ALWAYS_INLINE void +rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree, + rtree_leaf_elm_t *elm, rtree_contents_t contents) { + assert((uintptr_t)contents.edata % EDATA_ALIGNMENT == 0); + void *bits; + unsigned additional; -#ifdef RTREE_LEAF_COMPACT - uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, - true); - uintptr_t bits = ((uintptr_t)szind << LG_VADDR) | - ((uintptr_t)rtree_leaf_elm_bits_extent_get(old_bits) & - (((uintptr_t)0x1 << LG_VADDR) - 1)) | - ((uintptr_t)rtree_leaf_elm_bits_slab_get(old_bits)); - atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE); -#else - atomic_store_u(&elm->le_szind, szind, ATOMIC_RELEASE); -#endif + rtree_contents_encode(contents, &bits, &additional); + rtree_leaf_elm_write_commit(tsdn, rtree, elm, bits, additional); } -static inline void -rtree_leaf_elm_slab_write(tsdn_t *tsdn, rtree_t *rtree, - rtree_leaf_elm_t *elm, bool slab) { +/* The state field can be updated independently (and more frequently). */ +JEMALLOC_ALWAYS_INLINE void +rtree_leaf_elm_state_update(tsdn_t *tsdn, rtree_t *rtree, + rtree_leaf_elm_t *elm1, rtree_leaf_elm_t *elm2, extent_state_t state) { + assert(elm1 != NULL); #ifdef RTREE_LEAF_COMPACT - uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, - true); - uintptr_t bits = ((uintptr_t)rtree_leaf_elm_bits_szind_get(old_bits) << - LG_VADDR) | ((uintptr_t)rtree_leaf_elm_bits_extent_get(old_bits) & - (((uintptr_t)0x1 << LG_VADDR) - 1)) | ((uintptr_t)slab); - atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE); + uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm1, + /* dependent */ true); + bits &= ~RTREE_LEAF_STATE_MASK; + bits |= state << RTREE_LEAF_STATE_SHIFT; + atomic_store_p(&elm1->le_bits, (void *)bits, ATOMIC_RELEASE); + if (elm2 != NULL) { + atomic_store_p(&elm2->le_bits, (void *)bits, ATOMIC_RELEASE); + } #else - atomic_store_b(&elm->le_slab, slab, ATOMIC_RELEASE); + unsigned bits = atomic_load_u(&elm1->le_metadata, ATOMIC_RELAXED); + bits &= ~RTREE_LEAF_STATE_MASK; + bits |= state << RTREE_LEAF_STATE_SHIFT; + atomic_store_u(&elm1->le_metadata, bits, ATOMIC_RELEASE); + if (elm2 != NULL) { + atomic_store_u(&elm2->le_metadata, bits, ATOMIC_RELEASE); + } #endif } -static inline void -rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree, - rtree_leaf_elm_t *elm, extent_t *extent, szind_t szind, bool slab) { -#ifdef RTREE_LEAF_COMPACT - uintptr_t bits = ((uintptr_t)szind << LG_VADDR) | - ((uintptr_t)extent & (((uintptr_t)0x1 << LG_VADDR) - 1)) | - ((uintptr_t)slab); - atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE); -#else - rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab); - rtree_leaf_elm_szind_write(tsdn, rtree, elm, szind); - /* - * Write extent last, since the element is atomically considered valid - * as soon as the extent field is non-NULL. - */ - rtree_leaf_elm_extent_write(tsdn, rtree, elm, extent); -#endif -} +/* + * Tries to look up the key in the L1 cache, returning false if there's a hit, or + * true if there's a miss. + * Key is allowed to be NULL; returns true in this case. + */ +JEMALLOC_ALWAYS_INLINE bool +rtree_leaf_elm_lookup_fast(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, + uintptr_t key, rtree_leaf_elm_t **elm) { + size_t slot = rtree_cache_direct_map(key); + uintptr_t leafkey = rtree_leafkey(key); + assert(leafkey != RTREE_LEAFKEY_INVALID); -static inline void -rtree_leaf_elm_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree, - rtree_leaf_elm_t *elm, szind_t szind, bool slab) { - assert(!slab || szind < SC_NBINS); + if (unlikely(rtree_ctx->cache[slot].leafkey != leafkey)) { + return true; + } - /* - * The caller implicitly assures that it is the only writer to the szind - * and slab fields, and that the extent field cannot currently change. - */ - rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab); - rtree_leaf_elm_szind_write(tsdn, rtree, elm, szind); + rtree_leaf_elm_t *leaf = rtree_ctx->cache[slot].leaf; + assert(leaf != NULL); + uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1); + *elm = &leaf[subkey]; + + return false; } JEMALLOC_ALWAYS_INLINE rtree_leaf_elm_t * @@ -382,147 +412,143 @@ rtree_leaf_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, dependent, init_missing); } +/* + * Returns true on lookup failure. + */ static inline bool -rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key, - extent_t *extent, szind_t szind, bool slab) { - /* Use rtree_clear() to set the extent to NULL. */ - assert(extent != NULL); - +rtree_read_independent(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, + uintptr_t key, rtree_contents_t *r_contents) { rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, - key, false, true); + key, /* dependent */ false, /* init_missing */ false); if (elm == NULL) { return true; } - - assert(rtree_leaf_elm_extent_read(tsdn, rtree, elm, false) == NULL); - rtree_leaf_elm_write(tsdn, rtree, elm, extent, szind, slab); - + *r_contents = rtree_leaf_elm_read(tsdn, rtree, elm, + /* dependent */ false); return false; } -JEMALLOC_ALWAYS_INLINE rtree_leaf_elm_t * -rtree_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key, - bool dependent) { +static inline rtree_contents_t +rtree_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, + uintptr_t key) { rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, - key, dependent, false); - if (!dependent && elm == NULL) { - return NULL; - } + key, /* dependent */ true, /* init_missing */ false); assert(elm != NULL); - return elm; + return rtree_leaf_elm_read(tsdn, rtree, elm, /* dependent */ true); } -JEMALLOC_ALWAYS_INLINE extent_t * -rtree_extent_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, - uintptr_t key, bool dependent) { - rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, - dependent); - if (!dependent && elm == NULL) { - return NULL; - } - return rtree_leaf_elm_extent_read(tsdn, rtree, elm, dependent); -} - -JEMALLOC_ALWAYS_INLINE szind_t -rtree_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, - uintptr_t key, bool dependent) { - rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, - dependent); - if (!dependent && elm == NULL) { - return SC_NSIZES; - } - return rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent); +static inline rtree_metadata_t +rtree_metadata_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, + uintptr_t key) { + rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, + key, /* dependent */ true, /* init_missing */ false); + assert(elm != NULL); + return rtree_leaf_elm_read(tsdn, rtree, elm, + /* dependent */ true).metadata; } /* - * rtree_slab_read() is intentionally omitted because slab is always read in - * conjunction with szind, which makes rtree_szind_slab_read() a better choice. + * Returns true when the request cannot be fulfilled by fastpath. */ - -JEMALLOC_ALWAYS_INLINE bool -rtree_extent_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, - uintptr_t key, bool dependent, extent_t **r_extent, szind_t *r_szind) { - rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, - dependent); - if (!dependent && elm == NULL) { +static inline bool +rtree_metadata_try_read_fast(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, + uintptr_t key, rtree_metadata_t *r_rtree_metadata) { + rtree_leaf_elm_t *elm; + /* + * Should check the bool return value (lookup success or not) instead of + * elm == NULL (which will result in an extra branch). This is because + * when the cache lookup succeeds, there will never be a NULL pointer + * returned (which is unknown to the compiler). + */ + if (rtree_leaf_elm_lookup_fast(tsdn, rtree, rtree_ctx, key, &elm)) { return true; } - *r_extent = rtree_leaf_elm_extent_read(tsdn, rtree, elm, dependent); - *r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent); + assert(elm != NULL); + *r_rtree_metadata = rtree_leaf_elm_read(tsdn, rtree, elm, + /* dependent */ true).metadata; return false; } -/* - * Try to read szind_slab from the L1 cache. Returns true on a hit, - * and fills in r_szind and r_slab. Otherwise returns false. - * - * Key is allowed to be NULL in order to save an extra branch on the - * fastpath. returns false in this case. - */ -JEMALLOC_ALWAYS_INLINE bool -rtree_szind_slab_read_fast(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, - uintptr_t key, szind_t *r_szind, bool *r_slab) { - rtree_leaf_elm_t *elm; - - size_t slot = rtree_cache_direct_map(key); - uintptr_t leafkey = rtree_leafkey(key); - assert(leafkey != RTREE_LEAFKEY_INVALID); - - if (likely(rtree_ctx->cache[slot].leafkey == leafkey)) { - rtree_leaf_elm_t *leaf = rtree_ctx->cache[slot].leaf; - assert(leaf != NULL); - uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1); - elm = &leaf[subkey]; - -#ifdef RTREE_LEAF_COMPACT - uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, - elm, true); - *r_szind = rtree_leaf_elm_bits_szind_get(bits); - *r_slab = rtree_leaf_elm_bits_slab_get(bits); -#else - *r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, true); - *r_slab = rtree_leaf_elm_slab_read(tsdn, rtree, elm, true); -#endif - return true; - } else { - return false; +JEMALLOC_ALWAYS_INLINE void +rtree_write_range_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, + uintptr_t base, uintptr_t end, rtree_contents_t contents, bool clearing) { + assert((base & PAGE_MASK) == 0 && (end & PAGE_MASK) == 0); + /* + * Only used for emap_(de)register_interior, which implies the + * boundaries have been registered already. Therefore all the lookups + * are dependent w/o init_missing, assuming the range spans across at + * most 2 rtree leaf nodes (each covers 1 GiB of vaddr). + */ + void *bits; + unsigned additional; + rtree_contents_encode(contents, &bits, &additional); + + rtree_leaf_elm_t *elm = NULL; /* Dead store. */ + for (uintptr_t addr = base; addr <= end; addr += PAGE) { + if (addr == base || + (addr & ((ZU(1) << rtree_leaf_maskbits()) - 1)) == 0) { + elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, addr, + /* dependent */ true, /* init_missing */ false); + assert(elm != NULL); + } + assert(elm == rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, addr, + /* dependent */ true, /* init_missing */ false)); + assert(!clearing || rtree_leaf_elm_read(tsdn, rtree, elm, + /* dependent */ true).edata != NULL); + rtree_leaf_elm_write_commit(tsdn, rtree, elm, bits, additional); + elm++; } } + +JEMALLOC_ALWAYS_INLINE void +rtree_write_range(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, + uintptr_t base, uintptr_t end, rtree_contents_t contents) { + rtree_write_range_impl(tsdn, rtree, rtree_ctx, base, end, contents, + /* clearing */ false); +} + JEMALLOC_ALWAYS_INLINE bool -rtree_szind_slab_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, - uintptr_t key, bool dependent, szind_t *r_szind, bool *r_slab) { - rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, - dependent); - if (!dependent && elm == NULL) { +rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key, + rtree_contents_t contents) { + rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, + key, /* dependent */ false, /* init_missing */ true); + if (elm == NULL) { return true; } -#ifdef RTREE_LEAF_COMPACT - uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent); - *r_szind = rtree_leaf_elm_bits_szind_get(bits); - *r_slab = rtree_leaf_elm_bits_slab_get(bits); -#else - *r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent); - *r_slab = rtree_leaf_elm_slab_read(tsdn, rtree, elm, dependent); -#endif - return false; -} -static inline void -rtree_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, - uintptr_t key, szind_t szind, bool slab) { - assert(!slab || szind < SC_NBINS); + rtree_leaf_elm_write(tsdn, rtree, elm, contents); - rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, true); - rtree_leaf_elm_szind_slab_update(tsdn, rtree, elm, szind, slab); + return false; } static inline void rtree_clear(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key) { - rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, true); - assert(rtree_leaf_elm_extent_read(tsdn, rtree, elm, false) != - NULL); - rtree_leaf_elm_write(tsdn, rtree, elm, NULL, SC_NSIZES, false); + rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, + key, /* dependent */ true, /* init_missing */ false); + assert(elm != NULL); + assert(rtree_leaf_elm_read(tsdn, rtree, elm, + /* dependent */ true).edata != NULL); + rtree_contents_t contents; + contents.edata = NULL; + contents.metadata.szind = SC_NSIZES; + contents.metadata.slab = false; + contents.metadata.is_head = false; + contents.metadata.state = (extent_state_t)0; + rtree_leaf_elm_write(tsdn, rtree, elm, contents); +} + +static inline void +rtree_clear_range(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, + uintptr_t base, uintptr_t end) { + rtree_contents_t contents; + contents.edata = NULL; + contents.metadata.szind = SC_NSIZES; + contents.metadata.slab = false; + contents.metadata.is_head = false; + contents.metadata.state = (extent_state_t)0; + rtree_write_range_impl(tsdn, rtree, rtree_ctx, base, end, contents, + /* clearing */ true); } #endif /* JEMALLOC_INTERNAL_RTREE_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/rtree_tsd.h b/contrib/jemalloc/include/jemalloc/internal/rtree_tsd.h index 562e29297a7669..e45525c5e6a3eb 100644 --- a/contrib/jemalloc/include/jemalloc/internal/rtree_tsd.h +++ b/contrib/jemalloc/include/jemalloc/internal/rtree_tsd.h @@ -18,16 +18,28 @@ * cache misses if made overly large, plus the cost of linear search in the LRU * cache. */ -#define RTREE_CTX_LG_NCACHE 4 -#define RTREE_CTX_NCACHE (1 << RTREE_CTX_LG_NCACHE) +#define RTREE_CTX_NCACHE 16 #define RTREE_CTX_NCACHE_L2 8 +/* Needed for initialization only. */ +#define RTREE_LEAFKEY_INVALID ((uintptr_t)1) +#define RTREE_CTX_CACHE_ELM_INVALID {RTREE_LEAFKEY_INVALID, NULL} + +#define RTREE_CTX_INIT_ELM_1 RTREE_CTX_CACHE_ELM_INVALID +#define RTREE_CTX_INIT_ELM_2 RTREE_CTX_INIT_ELM_1, RTREE_CTX_INIT_ELM_1 +#define RTREE_CTX_INIT_ELM_4 RTREE_CTX_INIT_ELM_2, RTREE_CTX_INIT_ELM_2 +#define RTREE_CTX_INIT_ELM_8 RTREE_CTX_INIT_ELM_4, RTREE_CTX_INIT_ELM_4 +#define RTREE_CTX_INIT_ELM_16 RTREE_CTX_INIT_ELM_8, RTREE_CTX_INIT_ELM_8 + +#define _RTREE_CTX_INIT_ELM_DATA(n) RTREE_CTX_INIT_ELM_##n +#define RTREE_CTX_INIT_ELM_DATA(n) _RTREE_CTX_INIT_ELM_DATA(n) + /* - * Zero initializer required for tsd initialization only. Proper initialization - * done via rtree_ctx_data_init(). + * Static initializer (to invalidate the cache entries) is required because the + * free fastpath may access the rtree cache before a full tsd initialization. */ -#define RTREE_CTX_ZERO_INITIALIZER {{{0, 0}}, {{0, 0}}} - +#define RTREE_CTX_INITIALIZER {{RTREE_CTX_INIT_ELM_DATA(RTREE_CTX_NCACHE)}, \ + {RTREE_CTX_INIT_ELM_DATA(RTREE_CTX_NCACHE_L2)}} typedef struct rtree_leaf_elm_s rtree_leaf_elm_t; diff --git a/contrib/jemalloc/include/jemalloc/internal/safety_check.h b/contrib/jemalloc/include/jemalloc/internal/safety_check.h index 53339ac12f2def..f1a74f174dee96 100644 --- a/contrib/jemalloc/include/jemalloc/internal/safety_check.h +++ b/contrib/jemalloc/include/jemalloc/internal/safety_check.h @@ -1,9 +1,14 @@ #ifndef JEMALLOC_INTERNAL_SAFETY_CHECK_H #define JEMALLOC_INTERNAL_SAFETY_CHECK_H +void safety_check_fail_sized_dealloc(bool current_dealloc, const void *ptr, + size_t true_size, size_t input_size); void safety_check_fail(const char *format, ...); + +typedef void (*safety_check_abort_hook_t)(const char *message); + /* Can set to NULL for a default. */ -void safety_check_set_abort(void (*abort_fn)()); +void safety_check_set_abort(safety_check_abort_hook_t abort_fn); JEMALLOC_ALWAYS_INLINE void safety_check_set_redzone(void *ptr, size_t usize, size_t bumped_usize) { diff --git a/contrib/jemalloc/include/jemalloc/internal/san.h b/contrib/jemalloc/include/jemalloc/internal/san.h new file mode 100644 index 00000000000000..8813d6bbe7dee7 --- /dev/null +++ b/contrib/jemalloc/include/jemalloc/internal/san.h @@ -0,0 +1,191 @@ +#ifndef JEMALLOC_INTERNAL_GUARD_H +#define JEMALLOC_INTERNAL_GUARD_H + +#include "jemalloc/internal/ehooks.h" +#include "jemalloc/internal/emap.h" + +#define SAN_PAGE_GUARD PAGE +#define SAN_PAGE_GUARDS_SIZE (SAN_PAGE_GUARD * 2) + +#define SAN_GUARD_LARGE_EVERY_N_EXTENTS_DEFAULT 0 +#define SAN_GUARD_SMALL_EVERY_N_EXTENTS_DEFAULT 0 + +#define SAN_LG_UAF_ALIGN_DEFAULT (-1) +#define SAN_CACHE_BIN_NONFAST_MASK_DEFAULT (uintptr_t)(-1) + +static const uintptr_t uaf_detect_junk = (uintptr_t)0x5b5b5b5b5b5b5b5bULL; + +/* 0 means disabled, i.e. never guarded. */ +extern size_t opt_san_guard_large; +extern size_t opt_san_guard_small; +/* -1 means disabled, i.e. never check for use-after-free. */ +extern ssize_t opt_lg_san_uaf_align; + +void san_guard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, + emap_t *emap, bool left, bool right, bool remap); +void san_unguard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, + emap_t *emap, bool left, bool right); +/* + * Unguard the extent, but don't modify emap boundaries. Must be called on an + * extent that has been erased from emap and shouldn't be placed back. + */ +void san_unguard_pages_pre_destroy(tsdn_t *tsdn, ehooks_t *ehooks, + edata_t *edata, emap_t *emap); +void san_check_stashed_ptrs(void **ptrs, size_t nstashed, size_t usize); + +void tsd_san_init(tsd_t *tsd); +void san_init(ssize_t lg_san_uaf_align); + +static inline void +san_guard_pages_two_sided(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, + emap_t *emap, bool remap) { + san_guard_pages(tsdn, ehooks, edata, emap, true, true, remap); +} + +static inline void +san_unguard_pages_two_sided(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, + emap_t *emap) { + san_unguard_pages(tsdn, ehooks, edata, emap, true, true); +} + +static inline size_t +san_two_side_unguarded_sz(size_t size) { + assert(size % PAGE == 0); + assert(size >= SAN_PAGE_GUARDS_SIZE); + return size - SAN_PAGE_GUARDS_SIZE; +} + +static inline size_t +san_two_side_guarded_sz(size_t size) { + assert(size % PAGE == 0); + return size + SAN_PAGE_GUARDS_SIZE; +} + +static inline size_t +san_one_side_unguarded_sz(size_t size) { + assert(size % PAGE == 0); + assert(size >= SAN_PAGE_GUARD); + return size - SAN_PAGE_GUARD; +} + +static inline size_t +san_one_side_guarded_sz(size_t size) { + assert(size % PAGE == 0); + return size + SAN_PAGE_GUARD; +} + +static inline bool +san_guard_enabled(void) { + return (opt_san_guard_large != 0 || opt_san_guard_small != 0); +} + +static inline bool +san_large_extent_decide_guard(tsdn_t *tsdn, ehooks_t *ehooks, size_t size, + size_t alignment) { + if (opt_san_guard_large == 0 || ehooks_guard_will_fail(ehooks) || + tsdn_null(tsdn)) { + return false; + } + + tsd_t *tsd = tsdn_tsd(tsdn); + uint64_t n = tsd_san_extents_until_guard_large_get(tsd); + assert(n >= 1); + if (n > 1) { + /* + * Subtract conditionally because the guard may not happen due + * to alignment or size restriction below. + */ + *tsd_san_extents_until_guard_largep_get(tsd) = n - 1; + } + + if (n == 1 && (alignment <= PAGE) && + (san_two_side_guarded_sz(size) <= SC_LARGE_MAXCLASS)) { + *tsd_san_extents_until_guard_largep_get(tsd) = + opt_san_guard_large; + return true; + } else { + assert(tsd_san_extents_until_guard_large_get(tsd) >= 1); + return false; + } +} + +static inline bool +san_slab_extent_decide_guard(tsdn_t *tsdn, ehooks_t *ehooks) { + if (opt_san_guard_small == 0 || ehooks_guard_will_fail(ehooks) || + tsdn_null(tsdn)) { + return false; + } + + tsd_t *tsd = tsdn_tsd(tsdn); + uint64_t n = tsd_san_extents_until_guard_small_get(tsd); + assert(n >= 1); + if (n == 1) { + *tsd_san_extents_until_guard_smallp_get(tsd) = + opt_san_guard_small; + return true; + } else { + *tsd_san_extents_until_guard_smallp_get(tsd) = n - 1; + assert(tsd_san_extents_until_guard_small_get(tsd) >= 1); + return false; + } +} + +static inline void +san_junk_ptr_locations(void *ptr, size_t usize, void **first, void **mid, + void **last) { + size_t ptr_sz = sizeof(void *); + + *first = ptr; + + *mid = (void *)((uintptr_t)ptr + ((usize >> 1) & ~(ptr_sz - 1))); + assert(*first != *mid || usize == ptr_sz); + assert((uintptr_t)*first <= (uintptr_t)*mid); + + /* + * When usize > 32K, the gap between requested_size and usize might be + * greater than 4K -- this means the last write may access an + * likely-untouched page (default settings w/ 4K pages). However by + * default the tcache only goes up to the 32K size class, and is usually + * tuned lower instead of higher, which makes it less of a concern. + */ + *last = (void *)((uintptr_t)ptr + usize - sizeof(uaf_detect_junk)); + assert(*first != *last || usize == ptr_sz); + assert(*mid != *last || usize <= ptr_sz * 2); + assert((uintptr_t)*mid <= (uintptr_t)*last); +} + +static inline bool +san_junk_ptr_should_slow(void) { + /* + * The latter condition (pointer size greater than the min size class) + * is not expected -- fall back to the slow path for simplicity. + */ + return config_debug || (LG_SIZEOF_PTR > SC_LG_TINY_MIN); +} + +static inline void +san_junk_ptr(void *ptr, size_t usize) { + if (san_junk_ptr_should_slow()) { + memset(ptr, (char)uaf_detect_junk, usize); + return; + } + + void *first, *mid, *last; + san_junk_ptr_locations(ptr, usize, &first, &mid, &last); + *(uintptr_t *)first = uaf_detect_junk; + *(uintptr_t *)mid = uaf_detect_junk; + *(uintptr_t *)last = uaf_detect_junk; +} + +static inline bool +san_uaf_detection_enabled(void) { + bool ret = config_uaf_detection && (opt_lg_san_uaf_align != -1); + if (config_uaf_detection && ret) { + assert(san_cache_bin_nonfast_mask == ((uintptr_t)1 << + opt_lg_san_uaf_align) - 1); + } + + return ret; +} + +#endif /* JEMALLOC_INTERNAL_GUARD_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/san_bump.h b/contrib/jemalloc/include/jemalloc/internal/san_bump.h new file mode 100644 index 00000000000000..d5902ba7b3f599 --- /dev/null +++ b/contrib/jemalloc/include/jemalloc/internal/san_bump.h @@ -0,0 +1,51 @@ +#ifndef JEMALLOC_INTERNAL_SAN_BUMP_H +#define JEMALLOC_INTERNAL_SAN_BUMP_H + +#include "jemalloc/internal/edata.h" +#include "jemalloc/internal/exp_grow.h" +#include "jemalloc/internal/mutex.h" + +#define SBA_RETAINED_ALLOC_SIZE ((size_t)4 << 20) + +extern bool opt_retain; + +typedef struct pac_s pac_t; + +typedef struct san_bump_alloc_s san_bump_alloc_t; +struct san_bump_alloc_s { + malloc_mutex_t mtx; + + edata_t *curr_reg; +}; + +static inline bool +san_bump_enabled() { + /* + * We enable san_bump allocator only when it's possible to break up a + * mapping and unmap a part of it (maps_coalesce). This is needed to + * ensure the arena destruction process can destroy all retained guarded + * extents one by one and to unmap a trailing part of a retained guarded + * region when it's too small to fit a pending allocation. + * opt_retain is required, because this allocator retains a large + * virtual memory mapping and returns smaller parts of it. + */ + return maps_coalesce && opt_retain; +} + +static inline bool +san_bump_alloc_init(san_bump_alloc_t* sba) { + bool err = malloc_mutex_init(&sba->mtx, "sanitizer_bump_allocator", + WITNESS_RANK_SAN_BUMP_ALLOC, malloc_mutex_rank_exclusive); + if (err) { + return true; + } + sba->curr_reg = NULL; + + return false; +} + +edata_t * +san_bump_alloc(tsdn_t *tsdn, san_bump_alloc_t* sba, pac_t *pac, ehooks_t *ehooks, + size_t size, bool zero); + +#endif /* JEMALLOC_INTERNAL_SAN_BUMP_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/sc.h b/contrib/jemalloc/include/jemalloc/internal/sc.h index 9a099d8b6457d6..9bab347beeed1d 100644 --- a/contrib/jemalloc/include/jemalloc/internal/sc.h +++ b/contrib/jemalloc/include/jemalloc/internal/sc.h @@ -197,30 +197,34 @@ (SC_LG_BASE_MAX - SC_LG_FIRST_REGULAR_BASE + 1) - 1) #define SC_NSIZES (SC_NTINY + SC_NPSEUDO + SC_NREGULAR) -/* The number of size classes that are a multiple of the page size. */ -#define SC_NPSIZES ( \ - /* Start with all the size classes. */ \ - SC_NSIZES \ - /* Subtract out those groups with too small a base. */ \ - - (LG_PAGE - 1 - SC_LG_FIRST_REGULAR_BASE) * SC_NGROUP \ - /* And the pseudo-group. */ \ - - SC_NPSEUDO \ - /* And the tiny group. */ \ - - SC_NTINY \ - /* Sizes where ndelta*delta is not a multiple of the page size. */ \ - - (SC_LG_NGROUP * SC_NGROUP)) /* - * Note that the last line is computed as the sum of the second column in the - * following table: - * lg(base) | count of sizes to exclude - * ------------------------------|----------------------------- - * LG_PAGE - 1 | SC_NGROUP - 1 - * LG_PAGE | SC_NGROUP - 1 - * LG_PAGE + 1 | SC_NGROUP - 2 - * LG_PAGE + 2 | SC_NGROUP - 4 - * ... | ... - * LG_PAGE + (SC_LG_NGROUP - 1) | SC_NGROUP - (SC_NGROUP / 2) + * The number of size classes that are a multiple of the page size. + * + * Here are the first few bases that have a page-sized SC. + * + * lg(base) | base | highest SC | page-multiple SCs + * --------------|------------------------------------------ + * LG_PAGE - 1 | PAGE / 2 | PAGE | 1 + * LG_PAGE | PAGE | 2 * PAGE | 1 + * LG_PAGE + 1 | 2 * PAGE | 4 * PAGE | 2 + * LG_PAGE + 2 | 4 * PAGE | 8 * PAGE | 4 + * + * The number of page-multiple SCs continues to grow in powers of two, up until + * lg_delta == lg_page, which corresponds to setting lg_base to lg_page + + * SC_LG_NGROUP. So, then, the number of size classes that are multiples of the + * page size whose lg_delta is less than the page size are + * is 1 + (2**0 + 2**1 + ... + 2**(lg_ngroup - 1) == 2**lg_ngroup. + * + * For each base with lg_base in [lg_page + lg_ngroup, lg_base_max), there are + * NGROUP page-sized size classes, and when lg_base == lg_base_max, there are + * NGROUP - 1. + * + * This gives us the quantity we seek. */ +#define SC_NPSIZES ( \ + SC_NGROUP \ + + (SC_LG_BASE_MAX - (LG_PAGE + SC_LG_NGROUP)) * SC_NGROUP \ + + SC_NGROUP - 1) /* * We declare a size class is binnable if size < page size * group. Or, in other @@ -242,17 +246,23 @@ # error "Too many small size classes" #endif -/* The largest size class in the lookup table. */ -#define SC_LOOKUP_MAXCLASS ((size_t)1 << 12) +/* The largest size class in the lookup table, and its binary log. */ +#define SC_LG_MAX_LOOKUP 12 +#define SC_LOOKUP_MAXCLASS (1 << SC_LG_MAX_LOOKUP) /* Internal, only used for the definition of SC_SMALL_MAXCLASS. */ -#define SC_SMALL_MAX_BASE ((size_t)1 << (LG_PAGE + SC_LG_NGROUP - 1)) -#define SC_SMALL_MAX_DELTA ((size_t)1 << (LG_PAGE - 1)) +#define SC_SMALL_MAX_BASE (1 << (LG_PAGE + SC_LG_NGROUP - 1)) +#define SC_SMALL_MAX_DELTA (1 << (LG_PAGE - 1)) /* The largest size class allocated out of a slab. */ #define SC_SMALL_MAXCLASS (SC_SMALL_MAX_BASE \ + (SC_NGROUP - 1) * SC_SMALL_MAX_DELTA) +/* The fastpath assumes all lookup-able sizes are small. */ +#if (SC_SMALL_MAXCLASS < SC_LOOKUP_MAXCLASS) +# error "Lookup table sizes must be small" +#endif + /* The smallest size class not allocated out of a slab. */ #define SC_LARGE_MINCLASS ((size_t)1ULL << (LG_PAGE + SC_LG_NGROUP)) #define SC_LG_LARGE_MINCLASS (LG_PAGE + SC_LG_NGROUP) @@ -264,6 +274,19 @@ /* The largest size class supported. */ #define SC_LARGE_MAXCLASS (SC_MAX_BASE + (SC_NGROUP - 1) * SC_MAX_DELTA) +/* Maximum number of regions in one slab. */ +#ifndef CONFIG_LG_SLAB_MAXREGS +# define SC_LG_SLAB_MAXREGS (LG_PAGE - SC_LG_TINY_MIN) +#else +# if CONFIG_LG_SLAB_MAXREGS < (LG_PAGE - SC_LG_TINY_MIN) +# error "Unsupported SC_LG_SLAB_MAXREGS" +# else +# define SC_LG_SLAB_MAXREGS CONFIG_LG_SLAB_MAXREGS +# endif +#endif + +#define SC_SLAB_MAXREGS (1U << SC_LG_SLAB_MAXREGS) + typedef struct sc_s sc_t; struct sc_s { /* Size class index, or -1 if not a valid size class. */ @@ -321,10 +344,11 @@ struct sc_data_s { sc_t sc[SC_NSIZES]; }; +size_t reg_size_compute(int lg_base, int lg_delta, int ndelta); void sc_data_init(sc_data_t *data); /* * Updates slab sizes in [begin, end] to be pgs pages in length, if possible. - * Otherwise, does its best to accomodate the request. + * Otherwise, does its best to accommodate the request. */ void sc_data_update_slab_size(sc_data_t *data, size_t begin, size_t end, int pgs); diff --git a/contrib/jemalloc/include/jemalloc/internal/sec.h b/contrib/jemalloc/include/jemalloc/internal/sec.h new file mode 100644 index 00000000000000..fa863382db97ff --- /dev/null +++ b/contrib/jemalloc/include/jemalloc/internal/sec.h @@ -0,0 +1,120 @@ +#ifndef JEMALLOC_INTERNAL_SEC_H +#define JEMALLOC_INTERNAL_SEC_H + +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/pai.h" + +/* + * Small extent cache. + * + * This includes some utilities to cache small extents. We have a per-pszind + * bin with its own list of extents of that size. We don't try to do any + * coalescing of extents (since it would in general require cross-shard locks or + * knowledge of the underlying PAI implementation). + */ + +/* + * For now, this is just one field; eventually, we'll probably want to get more + * fine-grained data out (like per-size class statistics). + */ +typedef struct sec_stats_s sec_stats_t; +struct sec_stats_s { + /* Sum of bytes_cur across all shards. */ + size_t bytes; +}; + +static inline void +sec_stats_accum(sec_stats_t *dst, sec_stats_t *src) { + dst->bytes += src->bytes; +} + +/* A collections of free extents, all of the same size. */ +typedef struct sec_bin_s sec_bin_t; +struct sec_bin_s { + /* + * When we fail to fulfill an allocation, we do a batch-alloc on the + * underlying allocator to fill extra items, as well. We drop the SEC + * lock while doing so, to allow operations on other bins to succeed. + * That introduces the possibility of other threads also trying to + * allocate out of this bin, failing, and also going to the backing + * allocator. To avoid a thundering herd problem in which lots of + * threads do batch allocs and overfill this bin as a result, we only + * allow one batch allocation at a time for a bin. This bool tracks + * whether or not some thread is already batch allocating. + * + * Eventually, the right answer may be a smarter sharding policy for the + * bins (e.g. a mutex per bin, which would also be more scalable + * generally; the batch-allocating thread could hold it while + * batch-allocating). + */ + bool being_batch_filled; + + /* + * Number of bytes in this particular bin (as opposed to the + * sec_shard_t's bytes_cur. This isn't user visible or reported in + * stats; rather, it allows us to quickly determine the change in the + * centralized counter when flushing. + */ + size_t bytes_cur; + edata_list_active_t freelist; +}; + +typedef struct sec_shard_s sec_shard_t; +struct sec_shard_s { + /* + * We don't keep per-bin mutexes, even though that would allow more + * sharding; this allows global cache-eviction, which in turn allows for + * better balancing across free lists. + */ + malloc_mutex_t mtx; + /* + * A SEC may need to be shut down (i.e. flushed of its contents and + * prevented from further caching). To avoid tricky synchronization + * issues, we just track enabled-status in each shard, guarded by a + * mutex. In practice, this is only ever checked during brief races, + * since the arena-level atomic boolean tracking HPA enabled-ness means + * that we won't go down these pathways very often after custom extent + * hooks are installed. + */ + bool enabled; + sec_bin_t *bins; + /* Number of bytes in all bins in the shard. */ + size_t bytes_cur; + /* The next pszind to flush in the flush-some pathways. */ + pszind_t to_flush_next; +}; + +typedef struct sec_s sec_t; +struct sec_s { + pai_t pai; + pai_t *fallback; + + sec_opts_t opts; + sec_shard_t *shards; + pszind_t npsizes; +}; + +bool sec_init(tsdn_t *tsdn, sec_t *sec, base_t *base, pai_t *fallback, + const sec_opts_t *opts); +void sec_flush(tsdn_t *tsdn, sec_t *sec); +void sec_disable(tsdn_t *tsdn, sec_t *sec); + +/* + * Morally, these two stats methods probably ought to be a single one (and the + * mutex_prof_data ought to live in the sec_stats_t. But splitting them apart + * lets them fit easily into the pa_shard stats framework (which also has this + * split), which simplifies the stats management. + */ +void sec_stats_merge(tsdn_t *tsdn, sec_t *sec, sec_stats_t *stats); +void sec_mutex_stats_read(tsdn_t *tsdn, sec_t *sec, + mutex_prof_data_t *mutex_prof_data); + +/* + * We use the arena lock ordering; these are acquired in phase 2 of forking, but + * should be acquired before the underlying allocator mutexes. + */ +void sec_prefork2(tsdn_t *tsdn, sec_t *sec); +void sec_postfork_parent(tsdn_t *tsdn, sec_t *sec); +void sec_postfork_child(tsdn_t *tsdn, sec_t *sec); + +#endif /* JEMALLOC_INTERNAL_SEC_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/sec_opts.h b/contrib/jemalloc/include/jemalloc/internal/sec_opts.h new file mode 100644 index 00000000000000..a3ad72fbece5a1 --- /dev/null +++ b/contrib/jemalloc/include/jemalloc/internal/sec_opts.h @@ -0,0 +1,59 @@ +#ifndef JEMALLOC_INTERNAL_SEC_OPTS_H +#define JEMALLOC_INTERNAL_SEC_OPTS_H + +/* + * The configuration settings used by an sec_t. Morally, this is part of the + * SEC interface, but we put it here for header-ordering reasons. + */ + +typedef struct sec_opts_s sec_opts_t; +struct sec_opts_s { + /* + * We don't necessarily always use all the shards; requests are + * distributed across shards [0, nshards - 1). + */ + size_t nshards; + /* + * We'll automatically refuse to cache any objects in this sec if + * they're larger than max_alloc bytes, instead forwarding such objects + * directly to the fallback. + */ + size_t max_alloc; + /* + * Exceeding this amount of cached extents in a shard causes us to start + * flushing bins in that shard until we fall below bytes_after_flush. + */ + size_t max_bytes; + /* + * The number of bytes (in all bins) we flush down to when we exceed + * bytes_cur. We want this to be less than bytes_cur, because + * otherwise we could get into situations where a shard undergoing + * net-deallocation keeps bytes_cur very near to max_bytes, so that + * most deallocations get immediately forwarded to the underlying PAI + * implementation, defeating the point of the SEC. + */ + size_t bytes_after_flush; + /* + * When we can't satisfy an allocation out of the SEC because there are + * no available ones cached, we allocate multiple of that size out of + * the fallback allocator. Eventually we might want to do something + * cleverer, but for now we just grab a fixed number. + */ + size_t batch_fill_extra; +}; + +#define SEC_OPTS_DEFAULT { \ + /* nshards */ \ + 4, \ + /* max_alloc */ \ + (32 * 1024) < PAGE ? PAGE : (32 * 1024), \ + /* max_bytes */ \ + 256 * 1024, \ + /* bytes_after_flush */ \ + 128 * 1024, \ + /* batch_fill_extra */ \ + 0 \ +} + + +#endif /* JEMALLOC_INTERNAL_SEC_OPTS_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/slab_data.h b/contrib/jemalloc/include/jemalloc/internal/slab_data.h new file mode 100644 index 00000000000000..e821863d8d1cf1 --- /dev/null +++ b/contrib/jemalloc/include/jemalloc/internal/slab_data.h @@ -0,0 +1,12 @@ +#ifndef JEMALLOC_INTERNAL_SLAB_DATA_H +#define JEMALLOC_INTERNAL_SLAB_DATA_H + +#include "jemalloc/internal/bitmap.h" + +typedef struct slab_data_s slab_data_t; +struct slab_data_s { + /* Per region allocated/deallocated bitmap. */ + bitmap_t bitmap[BITMAP_GROUPS_MAX]; +}; + +#endif /* JEMALLOC_INTERNAL_SLAB_DATA_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/stats.h b/contrib/jemalloc/include/jemalloc/internal/stats.h index 3b9e0eac12b8f3..727f7dcbd71872 100644 --- a/contrib/jemalloc/include/jemalloc/internal/stats.h +++ b/contrib/jemalloc/include/jemalloc/internal/stats.h @@ -11,7 +11,8 @@ OPTION('b', bins, true, false) \ OPTION('l', large, true, false) \ OPTION('x', mutex, true, false) \ - OPTION('e', extents, true, false) + OPTION('e', extents, true, false) \ + OPTION('h', hpa, config_stats, false) enum { #define OPTION(o, v, d, s) stats_print_option_num_##v, @@ -24,8 +25,30 @@ enum { extern bool opt_stats_print; extern char opt_stats_print_opts[stats_print_tot_num_options+1]; +/* Utilities for stats_interval. */ +extern int64_t opt_stats_interval; +extern char opt_stats_interval_opts[stats_print_tot_num_options+1]; + +#define STATS_INTERVAL_DEFAULT -1 +/* + * Batch-increment the counter to reduce synchronization overhead. Each thread + * merges after (interval >> LG_BATCH_SIZE) bytes of allocations; also limit the + * BATCH_MAX for accuracy when the interval is huge (which is expected). + */ +#define STATS_INTERVAL_ACCUM_LG_BATCH_SIZE 6 +#define STATS_INTERVAL_ACCUM_BATCH_MAX (4 << 20) + +/* Only accessed by thread event. */ +uint64_t stats_interval_new_event_wait(tsd_t *tsd); +uint64_t stats_interval_postponed_event_wait(tsd_t *tsd); +void stats_interval_event_handler(tsd_t *tsd, uint64_t elapsed); + /* Implements je_malloc_stats_print. */ -void stats_print(void (*write_cb)(void *, const char *), void *cbopaque, - const char *opts); +void stats_print(write_cb_t *write_cb, void *cbopaque, const char *opts); + +bool stats_boot(void); +void stats_prefork(tsdn_t *tsdn); +void stats_postfork_parent(tsdn_t *tsdn); +void stats_postfork_child(tsdn_t *tsdn); #endif /* JEMALLOC_INTERNAL_STATS_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/sz.h b/contrib/jemalloc/include/jemalloc/internal/sz.h index 68e558abfea3ca..3c0fc1da33a641 100644 --- a/contrib/jemalloc/include/jemalloc/internal/sz.h +++ b/contrib/jemalloc/include/jemalloc/internal/sz.h @@ -22,6 +22,12 @@ * size that would result from such an allocation. */ +/* Page size index type. */ +typedef unsigned pszind_t; + +/* Size class index type. */ +typedef unsigned szind_t; + /* * sz_pind2sz_tab encodes the same information as could be computed by * sz_pind2sz_compute(). @@ -39,34 +45,62 @@ extern size_t sz_index2size_tab[SC_NSIZES]; */ extern uint8_t sz_size2index_tab[]; -static const size_t sz_large_pad = -#ifdef JEMALLOC_CACHE_OBLIVIOUS - PAGE -#else - 0 -#endif - ; +/* + * Padding for large allocations: PAGE when opt_cache_oblivious == true (to + * enable cache index randomization); 0 otherwise. + */ +extern size_t sz_large_pad; -extern void sz_boot(const sc_data_t *sc_data); +extern void sz_boot(const sc_data_t *sc_data, bool cache_oblivious); JEMALLOC_ALWAYS_INLINE pszind_t sz_psz2ind(size_t psz) { + assert(psz > 0); if (unlikely(psz > SC_LARGE_MAXCLASS)) { return SC_NPSIZES; } - pszind_t x = lg_floor((psz<<1)-1); - pszind_t shift = (x < SC_LG_NGROUP + LG_PAGE) ? + /* x is the lg of the first base >= psz. */ + pszind_t x = lg_ceil(psz); + /* + * sc.h introduces a lot of size classes. These size classes are divided + * into different size class groups. There is a very special size class + * group, each size class in or after it is an integer multiple of PAGE. + * We call it first_ps_rg. It means first page size regular group. The + * range of first_ps_rg is (base, base * 2], and base == PAGE * + * SC_NGROUP. off_to_first_ps_rg begins from 1, instead of 0. e.g. + * off_to_first_ps_rg is 1 when psz is (PAGE * SC_NGROUP + 1). + */ + pszind_t off_to_first_ps_rg = (x < SC_LG_NGROUP + LG_PAGE) ? 0 : x - (SC_LG_NGROUP + LG_PAGE); - pszind_t grp = shift << SC_LG_NGROUP; - pszind_t lg_delta = (x < SC_LG_NGROUP + LG_PAGE + 1) ? - LG_PAGE : x - SC_LG_NGROUP - 1; + /* + * Same as sc_s::lg_delta. + * Delta for off_to_first_ps_rg == 1 is PAGE, + * for each increase in offset, it's multiplied by two. + * Therefore, lg_delta = LG_PAGE + (off_to_first_ps_rg - 1). + */ + pszind_t lg_delta = (off_to_first_ps_rg == 0) ? + LG_PAGE : LG_PAGE + (off_to_first_ps_rg - 1); - size_t delta_inverse_mask = ZU(-1) << lg_delta; - pszind_t mod = ((((psz-1) & delta_inverse_mask) >> lg_delta)) & - ((ZU(1) << SC_LG_NGROUP) - 1); + /* + * Let's write psz in binary, e.g. 0011 for 0x3, 0111 for 0x7. + * The leftmost bits whose len is lg_base decide the base of psz. + * The rightmost bits whose len is lg_delta decide (pgz % PAGE). + * The middle bits whose len is SC_LG_NGROUP decide ndelta. + * ndelta is offset to the first size class in the size class group, + * starts from 1. + * If you don't know lg_base, ndelta or lg_delta, see sc.h. + * |xxxxxxxxxxxxxxxxxxxx|------------------------|yyyyyyyyyyyyyyyyyyyyy| + * |<-- len: lg_base -->|<-- len: SC_LG_NGROUP-->|<-- len: lg_delta -->| + * |<-- ndelta -->| + * rg_inner_off = ndelta - 1 + * Why use (psz - 1)? + * To handle case: psz % (1 << lg_delta) == 0. + */ + pszind_t rg_inner_off = (((psz - 1)) >> lg_delta) & (SC_NGROUP - 1); - pszind_t ind = grp + mod; + pszind_t base_ind = off_to_first_ps_rg << SC_LG_NGROUP; + pszind_t ind = base_ind + rg_inner_off; return ind; } @@ -152,10 +186,15 @@ sz_size2index_compute(size_t size) { } JEMALLOC_ALWAYS_INLINE szind_t -sz_size2index_lookup(size_t size) { +sz_size2index_lookup_impl(size_t size) { assert(size <= SC_LOOKUP_MAXCLASS); - szind_t ret = (sz_size2index_tab[(size + (ZU(1) << SC_LG_TINY_MIN) - 1) - >> SC_LG_TINY_MIN]); + return sz_size2index_tab[(size + (ZU(1) << SC_LG_TINY_MIN) - 1) + >> SC_LG_TINY_MIN]; +} + +JEMALLOC_ALWAYS_INLINE szind_t +sz_size2index_lookup(size_t size) { + szind_t ret = sz_size2index_lookup_impl(size); assert(ret == sz_size2index_compute(size)); return ret; } @@ -194,9 +233,14 @@ sz_index2size_compute(szind_t index) { } } +JEMALLOC_ALWAYS_INLINE size_t +sz_index2size_lookup_impl(szind_t index) { + return sz_index2size_tab[index]; +} + JEMALLOC_ALWAYS_INLINE size_t sz_index2size_lookup(szind_t index) { - size_t ret = (size_t)sz_index2size_tab[index]; + size_t ret = sz_index2size_lookup_impl(index); assert(ret == sz_index2size_compute(index)); return ret; } @@ -207,6 +251,12 @@ sz_index2size(szind_t index) { return sz_index2size_lookup(index); } +JEMALLOC_ALWAYS_INLINE void +sz_size2index_usize_fastpath(size_t size, szind_t *ind, size_t *usize) { + *ind = sz_size2index_lookup_impl(size); + *usize = sz_index2size_lookup_impl(*ind); +} + JEMALLOC_ALWAYS_INLINE size_t sz_s2u_compute(size_t size) { if (unlikely(size > SC_LARGE_MAXCLASS)) { @@ -266,7 +316,7 @@ sz_sa2u(size_t size, size_t alignment) { assert(alignment != 0 && ((alignment - 1) & alignment) == 0); /* Try for a small size class. */ - if (size <= SC_SMALL_MAXCLASS && alignment < PAGE) { + if (size <= SC_SMALL_MAXCLASS && alignment <= PAGE) { /* * Round size up to the nearest multiple of alignment. * @@ -315,4 +365,7 @@ sz_sa2u(size_t size, size_t alignment) { return usize; } +size_t sz_psz_quantize_floor(size_t size); +size_t sz_psz_quantize_ceil(size_t size); + #endif /* JEMALLOC_INTERNAL_SIZE_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/tcache_externs.h b/contrib/jemalloc/include/jemalloc/internal/tcache_externs.h index d63eafde8ce67a..a2ab7101b065ee 100644 --- a/contrib/jemalloc/include/jemalloc/internal/tcache_externs.h +++ b/contrib/jemalloc/include/jemalloc/internal/tcache_externs.h @@ -1,10 +1,17 @@ #ifndef JEMALLOC_INTERNAL_TCACHE_EXTERNS_H #define JEMALLOC_INTERNAL_TCACHE_EXTERNS_H -extern bool opt_tcache; -extern ssize_t opt_lg_tcache_max; - -extern cache_bin_info_t *tcache_bin_info; +extern bool opt_tcache; +extern size_t opt_tcache_max; +extern ssize_t opt_lg_tcache_nslots_mul; +extern unsigned opt_tcache_nslots_small_min; +extern unsigned opt_tcache_nslots_small_max; +extern unsigned opt_tcache_nslots_large; +extern ssize_t opt_lg_tcache_shift; +extern size_t opt_tcache_gc_incr_bytes; +extern size_t opt_tcache_gc_delay_bytes; +extern unsigned opt_lg_tcache_flush_small_div; +extern unsigned opt_lg_tcache_flush_large_div; /* * Number of tcache bins. There are SC_NBINS small-object bins, plus 0 or more @@ -15,6 +22,8 @@ extern unsigned nhbins; /* Maximum cached size class. */ extern size_t tcache_maxclass; +extern cache_bin_info_t *tcache_bin_info; + /* * Explicit tcaches, managed via the tcache.{create,flush,destroy} mallctls and * usable via the MALLOCX_TCACHE() flag. The automatic per thread tcaches are @@ -25,24 +34,27 @@ extern size_t tcache_maxclass; */ extern tcaches_t *tcaches; -size_t tcache_salloc(tsdn_t *tsdn, const void *ptr); -void tcache_event_hard(tsd_t *tsd, tcache_t *tcache); -void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, +size_t tcache_salloc(tsdn_t *tsdn, const void *ptr); +void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, cache_bin_t *tbin, szind_t binind, bool *tcache_success); -void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin, + +void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin, szind_t binind, unsigned rem); -void tcache_bin_flush_large(tsd_t *tsd, cache_bin_t *tbin, szind_t binind, - unsigned rem, tcache_t *tcache); -void tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, - arena_t *arena); +void tcache_bin_flush_large(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin, + szind_t binind, unsigned rem); +void tcache_bin_flush_stashed(tsd_t *tsd, tcache_t *tcache, cache_bin_t *bin, + szind_t binind, bool is_small); +void tcache_arena_reassociate(tsdn_t *tsdn, tcache_slow_t *tcache_slow, + tcache_t *tcache, arena_t *arena); tcache_t *tcache_create_explicit(tsd_t *tsd); -void tcache_cleanup(tsd_t *tsd); -void tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena); -bool tcaches_create(tsd_t *tsd, unsigned *r_ind); -void tcaches_flush(tsd_t *tsd, unsigned ind); -void tcaches_destroy(tsd_t *tsd, unsigned ind); -bool tcache_boot(tsdn_t *tsdn); -void tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena); +void tcache_cleanup(tsd_t *tsd); +void tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena); +bool tcaches_create(tsd_t *tsd, base_t *base, unsigned *r_ind); +void tcaches_flush(tsd_t *tsd, unsigned ind); +void tcaches_destroy(tsd_t *tsd, unsigned ind); +bool tcache_boot(tsdn_t *tsdn, base_t *base); +void tcache_arena_associate(tsdn_t *tsdn, tcache_slow_t *tcache_slow, + tcache_t *tcache, arena_t *arena); void tcache_prefork(tsdn_t *tsdn); void tcache_postfork_parent(tsdn_t *tsdn); void tcache_postfork_child(tsdn_t *tsdn); @@ -50,4 +62,14 @@ void tcache_flush(tsd_t *tsd); bool tsd_tcache_data_init(tsd_t *tsd); bool tsd_tcache_enabled_data_init(tsd_t *tsd); +void tcache_assert_initialized(tcache_t *tcache); + +/* Only accessed by thread event. */ +uint64_t tcache_gc_new_event_wait(tsd_t *tsd); +uint64_t tcache_gc_postponed_event_wait(tsd_t *tsd); +void tcache_gc_event_handler(tsd_t *tsd, uint64_t elapsed); +uint64_t tcache_gc_dalloc_new_event_wait(tsd_t *tsd); +uint64_t tcache_gc_dalloc_postponed_event_wait(tsd_t *tsd); +void tcache_gc_dalloc_event_handler(tsd_t *tsd, uint64_t elapsed); + #endif /* JEMALLOC_INTERNAL_TCACHE_EXTERNS_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/tcache_inlines.h b/contrib/jemalloc/include/jemalloc/internal/tcache_inlines.h index 5eca20e893b464..2634f145dc393b 100644 --- a/contrib/jemalloc/include/jemalloc/internal/tcache_inlines.h +++ b/contrib/jemalloc/include/jemalloc/internal/tcache_inlines.h @@ -3,9 +3,9 @@ #include "jemalloc/internal/bin.h" #include "jemalloc/internal/jemalloc_internal_types.h" +#include "jemalloc/internal/san.h" #include "jemalloc/internal/sc.h" #include "jemalloc/internal/sz.h" -#include "jemalloc/internal/ticker.h" #include "jemalloc/internal/util.h" static inline bool @@ -27,28 +27,29 @@ tcache_enabled_set(tsd_t *tsd, bool enabled) { tsd_slow_update(tsd); } -JEMALLOC_ALWAYS_INLINE void -tcache_event(tsd_t *tsd, tcache_t *tcache) { - if (TCACHE_GC_INCR == 0) { - return; +JEMALLOC_ALWAYS_INLINE bool +tcache_small_bin_disabled(szind_t ind, cache_bin_t *bin) { + assert(ind < SC_NBINS); + bool ret = (cache_bin_info_ncached_max(&tcache_bin_info[ind]) == 0); + if (ret && bin != NULL) { + /* small size class but cache bin disabled. */ + assert(ind >= nhbins); + assert((uintptr_t)(*bin->stack_head) == + cache_bin_preceding_junk); } - if (unlikely(ticker_tick(&tcache->gc_ticker))) { - tcache_event_hard(tsd, tcache); - } + return ret; } JEMALLOC_ALWAYS_INLINE void * tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size, szind_t binind, bool zero, bool slow_path) { void *ret; - cache_bin_t *bin; bool tcache_success; - size_t usize JEMALLOC_CC_SILENCE_INIT(0); assert(binind < SC_NBINS); - bin = tcache_small_bin_get(tcache, binind); - ret = cache_bin_alloc_easy(bin, &tcache_success); + cache_bin_t *bin = &tcache->bins[binind]; + ret = cache_bin_alloc(bin, &tcache_success); assert(tcache_success == (ret != NULL)); if (unlikely(!tcache_success)) { bool tcache_hard_success; @@ -56,6 +57,13 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, if (unlikely(arena == NULL)) { return NULL; } + if (unlikely(tcache_small_bin_disabled(binind, bin))) { + /* stats and zero are handled directly by the arena. */ + return arena_malloc_hard(tsd_tsdn(tsd), arena, size, + binind, zero); + } + tcache_bin_flush_stashed(tsd, tcache, bin, binind, + /* is_small */ true); ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache, bin, binind, &tcache_hard_success); @@ -65,38 +73,14 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, } assert(ret); - /* - * Only compute usize if required. The checks in the following if - * statement are all static. - */ - if (config_prof || (slow_path && config_fill) || unlikely(zero)) { - usize = sz_index2size(binind); + if (unlikely(zero)) { + size_t usize = sz_index2size(binind); assert(tcache_salloc(tsd_tsdn(tsd), ret) == usize); - } - - if (likely(!zero)) { - if (slow_path && config_fill) { - if (unlikely(opt_junk_alloc)) { - arena_alloc_junk_small(ret, &bin_infos[binind], - false); - } else if (unlikely(opt_zero)) { - memset(ret, 0, usize); - } - } - } else { - if (slow_path && config_fill && unlikely(opt_junk_alloc)) { - arena_alloc_junk_small(ret, &bin_infos[binind], true); - } memset(ret, 0, usize); } - if (config_stats) { bin->tstats.nrequests++; } - if (config_prof) { - tcache->prof_accumbytes += usize; - } - tcache_event(tsd, tcache); return ret; } @@ -104,12 +88,11 @@ JEMALLOC_ALWAYS_INLINE void * tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size, szind_t binind, bool zero, bool slow_path) { void *ret; - cache_bin_t *bin; bool tcache_success; - assert(binind >= SC_NBINS &&binind < nhbins); - bin = tcache_large_bin_get(tcache, binind); - ret = cache_bin_alloc_easy(bin, &tcache_success); + assert(binind >= SC_NBINS && binind < nhbins); + cache_bin_t *bin = &tcache->bins[binind]; + ret = cache_bin_alloc(bin, &tcache_success); assert(tcache_success == (ret != NULL)); if (unlikely(!tcache_success)) { /* @@ -120,96 +103,79 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size, if (unlikely(arena == NULL)) { return NULL; } + tcache_bin_flush_stashed(tsd, tcache, bin, binind, + /* is_small */ false); ret = large_malloc(tsd_tsdn(tsd), arena, sz_s2u(size), zero); if (ret == NULL) { return NULL; } } else { - size_t usize JEMALLOC_CC_SILENCE_INIT(0); - - /* Only compute usize on demand */ - if (config_prof || (slow_path && config_fill) || - unlikely(zero)) { - usize = sz_index2size(binind); + if (unlikely(zero)) { + size_t usize = sz_index2size(binind); assert(usize <= tcache_maxclass); - } - - if (likely(!zero)) { - if (slow_path && config_fill) { - if (unlikely(opt_junk_alloc)) { - memset(ret, JEMALLOC_ALLOC_JUNK, - usize); - } else if (unlikely(opt_zero)) { - memset(ret, 0, usize); - } - } - } else { memset(ret, 0, usize); } if (config_stats) { bin->tstats.nrequests++; } - if (config_prof) { - tcache->prof_accumbytes += usize; - } } - tcache_event(tsd, tcache); return ret; } JEMALLOC_ALWAYS_INLINE void tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind, bool slow_path) { - cache_bin_t *bin; - cache_bin_info_t *bin_info; + assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= SC_SMALL_MAXCLASS); - assert(tcache_salloc(tsd_tsdn(tsd), ptr) - <= SC_SMALL_MAXCLASS); - - if (slow_path && config_fill && unlikely(opt_junk_free)) { - arena_dalloc_junk_small(ptr, &bin_infos[binind]); + cache_bin_t *bin = &tcache->bins[binind]; + /* + * Not marking the branch unlikely because this is past free_fastpath() + * (which handles the most common cases), i.e. at this point it's often + * uncommon cases. + */ + if (cache_bin_nonfast_aligned(ptr)) { + /* Junk unconditionally, even if bin is full. */ + san_junk_ptr(ptr, sz_index2size(binind)); + if (cache_bin_stash(bin, ptr)) { + return; + } + assert(cache_bin_full(bin)); + /* Bin full; fall through into the flush branch. */ } - bin = tcache_small_bin_get(tcache, binind); - bin_info = &tcache_bin_info[binind]; - if (unlikely(!cache_bin_dalloc_easy(bin, bin_info, ptr))) { - tcache_bin_flush_small(tsd, tcache, bin, binind, - (bin_info->ncached_max >> 1)); - bool ret = cache_bin_dalloc_easy(bin, bin_info, ptr); + if (unlikely(!cache_bin_dalloc_easy(bin, ptr))) { + if (unlikely(tcache_small_bin_disabled(binind, bin))) { + arena_dalloc_small(tsd_tsdn(tsd), ptr); + return; + } + cache_bin_sz_t max = cache_bin_info_ncached_max( + &tcache_bin_info[binind]); + unsigned remain = max >> opt_lg_tcache_flush_small_div; + tcache_bin_flush_small(tsd, tcache, bin, binind, remain); + bool ret = cache_bin_dalloc_easy(bin, ptr); assert(ret); } - - tcache_event(tsd, tcache); } JEMALLOC_ALWAYS_INLINE void tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind, bool slow_path) { - cache_bin_t *bin; - cache_bin_info_t *bin_info; assert(tcache_salloc(tsd_tsdn(tsd), ptr) > SC_SMALL_MAXCLASS); assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass); - if (slow_path && config_fill && unlikely(opt_junk_free)) { - large_dalloc_junk(ptr, sz_index2size(binind)); - } - - bin = tcache_large_bin_get(tcache, binind); - bin_info = &tcache_bin_info[binind]; - if (unlikely(bin->ncached == bin_info->ncached_max)) { - tcache_bin_flush_large(tsd, bin, binind, - (bin_info->ncached_max >> 1), tcache); + cache_bin_t *bin = &tcache->bins[binind]; + if (unlikely(!cache_bin_dalloc_easy(bin, ptr))) { + unsigned remain = cache_bin_info_ncached_max( + &tcache_bin_info[binind]) >> opt_lg_tcache_flush_large_div; + tcache_bin_flush_large(tsd, tcache, bin, binind, remain); + bool ret = cache_bin_dalloc_easy(bin, ptr); + assert(ret); } - assert(bin->ncached < bin_info->ncached_max); - bin->ncached++; - *(bin->avail - bin->ncached) = ptr; - - tcache_event(tsd, tcache); } JEMALLOC_ALWAYS_INLINE tcache_t * diff --git a/contrib/jemalloc/include/jemalloc/internal/tcache_structs.h b/contrib/jemalloc/include/jemalloc/internal/tcache_structs.h index 172ef9040c049b..176d73de95b7c9 100644 --- a/contrib/jemalloc/include/jemalloc/internal/tcache_structs.h +++ b/contrib/jemalloc/include/jemalloc/internal/tcache_structs.h @@ -7,36 +7,19 @@ #include "jemalloc/internal/ticker.h" #include "jemalloc/internal/tsd_types.h" -/* Various uses of this struct need it to be a named type. */ -typedef ql_elm(tsd_t) tsd_link_t; +/* + * The tcache state is split into the slow and hot path data. Each has a + * pointer to the other, and the data always comes in pairs. The layout of each + * of them varies in practice; tcache_slow lives in the TSD for the automatic + * tcache, and as part of a dynamic allocation for manual allocations. Keeping + * a pointer to tcache_slow lets us treat these cases uniformly, rather than + * splitting up the tcache [de]allocation code into those paths called with the + * TSD tcache and those called with a manual tcache. + */ -struct tcache_s { - /* - * To minimize our cache-footprint, we put the frequently accessed data - * together at the start of this struct. - */ - - /* Cleared after arena_prof_accum(). */ - uint64_t prof_accumbytes; - /* Drives incremental GC. */ - ticker_t gc_ticker; - /* - * The pointer stacks associated with bins follow as a contiguous array. - * During tcache initialization, the avail pointer in each element of - * tbins is initialized to point to the proper offset within this array. - */ - cache_bin_t bins_small[SC_NBINS]; - - /* - * This data is less hot; we can be a little less careful with our - * footprint here. - */ +struct tcache_slow_s { /* Lets us track all the tcaches in an arena. */ - ql_elm(tcache_t) link; - - /* Logically scoped to tsd, but put here for cache layout reasons. */ - ql_elm(tsd_t) tsd_link; - bool in_hook; + ql_elm(tcache_slow_t) link; /* * The descriptor lets the arena find our cache bins without seeing the @@ -51,12 +34,27 @@ struct tcache_s { szind_t next_gc_bin; /* For small bins, fill (ncached_max >> lg_fill_div). */ uint8_t lg_fill_div[SC_NBINS]; + /* For small bins, whether has been refilled since last GC. */ + bool bin_refilled[SC_NBINS]; + /* + * For small bins, the number of items we can pretend to flush before + * actually flushing. + */ + uint8_t bin_flush_delay_items[SC_NBINS]; /* - * We put the cache bins for large size classes at the end of the - * struct, since some of them might not get used. This might end up - * letting us avoid touching an extra page if we don't have to. + * The start of the allocation containing the dynamic allocation for + * either the cache bins alone, or the cache bin memory as well as this + * tcache_slow_t and its associated tcache_t. */ - cache_bin_t bins_large[SC_NSIZES-SC_NBINS]; + void *dyn_alloc; + + /* The associated bins. */ + tcache_t *tcache; +}; + +struct tcache_s { + tcache_slow_t *tcache_slow; + cache_bin_t bins[TCACHE_NBINS_MAX]; }; /* Linkage for list of available (previously used) explicit tcache IDs. */ diff --git a/contrib/jemalloc/include/jemalloc/internal/tcache_types.h b/contrib/jemalloc/include/jemalloc/internal/tcache_types.h index dce69382ebb0e7..583677ea2d35cc 100644 --- a/contrib/jemalloc/include/jemalloc/internal/tcache_types.h +++ b/contrib/jemalloc/include/jemalloc/internal/tcache_types.h @@ -3,6 +3,7 @@ #include "jemalloc/internal/sc.h" +typedef struct tcache_slow_s tcache_slow_t; typedef struct tcache_s tcache_t; typedef struct tcaches_s tcaches_t; @@ -16,39 +17,9 @@ typedef struct tcaches_s tcaches_t; #define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3) #define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY -/* - * Absolute minimum number of cache slots for each small bin. - */ -#define TCACHE_NSLOTS_SMALL_MIN 20 - -/* - * Absolute maximum number of cache slots for each small bin in the thread - * cache. This is an additional constraint beyond that imposed as: twice the - * number of regions per slab for this size class. - * - * This constant must be an even number. - */ -#define TCACHE_NSLOTS_SMALL_MAX 200 - -/* Number of cache slots for large size classes. */ -#define TCACHE_NSLOTS_LARGE 20 - -/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */ -#define LG_TCACHE_MAXCLASS_DEFAULT 15 - -/* - * TCACHE_GC_SWEEP is the approximate number of allocation events between - * full GC sweeps. Integer rounding may cause the actual number to be - * slightly higher, since GC is performed incrementally. - */ -#define TCACHE_GC_SWEEP 8192 - -/* Number of tcache allocation/deallocation events between incremental GCs. */ -#define TCACHE_GC_INCR \ - ((TCACHE_GC_SWEEP / SC_NBINS) + ((TCACHE_GC_SWEEP / SC_NBINS == 0) ? 0 : 1)) - -/* Used in TSD static initializer only. Real init in tcache_data_init(). */ +/* Used in TSD static initializer only. Real init in tsd_tcache_data_init(). */ #define TCACHE_ZERO_INITIALIZER {0} +#define TCACHE_SLOW_ZERO_INITIALIZER {0} /* Used in TSD static initializer only. Will be initialized to opt_tcache. */ #define TCACHE_ENABLED_ZERO_INITIALIZER false @@ -56,4 +27,9 @@ typedef struct tcaches_s tcaches_t; /* Used for explicit tcache only. Means flushed but not destroyed. */ #define TCACHES_ELM_NEED_REINIT ((tcache_t *)(uintptr_t)1) +#define TCACHE_LG_MAXCLASS_LIMIT 23 /* tcache_maxclass = 8M */ +#define TCACHE_MAXCLASS_LIMIT ((size_t)1 << TCACHE_LG_MAXCLASS_LIMIT) +#define TCACHE_NBINS_MAX (SC_NBINS + SC_NGROUP * \ + (TCACHE_LG_MAXCLASS_LIMIT - SC_LG_LARGE_MINCLASS) + 1) + #endif /* JEMALLOC_INTERNAL_TCACHE_TYPES_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/test_hooks.h b/contrib/jemalloc/include/jemalloc/internal/test_hooks.h index 0780c52fa270fa..3d530b5c575614 100644 --- a/contrib/jemalloc/include/jemalloc/internal/test_hooks.h +++ b/contrib/jemalloc/include/jemalloc/internal/test_hooks.h @@ -4,9 +4,21 @@ extern JEMALLOC_EXPORT void (*test_hooks_arena_new_hook)(); extern JEMALLOC_EXPORT void (*test_hooks_libc_hook)(); -#define JEMALLOC_HOOK(fn, hook) ((void)(hook != NULL && (hook(), 0)), fn) +#if defined(JEMALLOC_JET) || defined(JEMALLOC_UNIT_TEST) +# define JEMALLOC_TEST_HOOK(fn, hook) ((void)(hook != NULL && (hook(), 0)), fn) +# define open JEMALLOC_TEST_HOOK(open, test_hooks_libc_hook) +# define read JEMALLOC_TEST_HOOK(read, test_hooks_libc_hook) +# define write JEMALLOC_TEST_HOOK(write, test_hooks_libc_hook) +# define readlink JEMALLOC_TEST_HOOK(readlink, test_hooks_libc_hook) +# define close JEMALLOC_TEST_HOOK(close, test_hooks_libc_hook) +# define creat JEMALLOC_TEST_HOOK(creat, test_hooks_libc_hook) +# define secure_getenv JEMALLOC_TEST_HOOK(secure_getenv, test_hooks_libc_hook) /* Note that this is undef'd and re-define'd in src/prof.c. */ -#define _Unwind_Backtrace JEMALLOC_HOOK(_Unwind_Backtrace, test_hooks_libc_hook) +# define _Unwind_Backtrace JEMALLOC_TEST_HOOK(_Unwind_Backtrace, test_hooks_libc_hook) +#else +# define JEMALLOC_TEST_HOOK(fn, hook) fn +#endif + #endif /* JEMALLOC_INTERNAL_TEST_HOOKS_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/thread_event.h b/contrib/jemalloc/include/jemalloc/internal/thread_event.h new file mode 100644 index 00000000000000..2f4e1b39c7bcbd --- /dev/null +++ b/contrib/jemalloc/include/jemalloc/internal/thread_event.h @@ -0,0 +1,301 @@ +#ifndef JEMALLOC_INTERNAL_THREAD_EVENT_H +#define JEMALLOC_INTERNAL_THREAD_EVENT_H + +#include "jemalloc/internal/tsd.h" + +/* "te" is short for "thread_event" */ + +/* + * TE_MIN_START_WAIT should not exceed the minimal allocation usize. + */ +#define TE_MIN_START_WAIT ((uint64_t)1U) +#define TE_MAX_START_WAIT UINT64_MAX + +/* + * Maximum threshold on thread_(de)allocated_next_event_fast, so that there is + * no need to check overflow in malloc fast path. (The allocation size in malloc + * fast path never exceeds SC_LOOKUP_MAXCLASS.) + */ +#define TE_NEXT_EVENT_FAST_MAX (UINT64_MAX - SC_LOOKUP_MAXCLASS + 1U) + +/* + * The max interval helps make sure that malloc stays on the fast path in the + * common case, i.e. thread_allocated < thread_allocated_next_event_fast. When + * thread_allocated is within an event's distance to TE_NEXT_EVENT_FAST_MAX + * above, thread_allocated_next_event_fast is wrapped around and we fall back to + * the medium-fast path. The max interval makes sure that we're not staying on + * the fallback case for too long, even if there's no active event or if all + * active events have long wait times. + */ +#define TE_MAX_INTERVAL ((uint64_t)(4U << 20)) + +/* + * Invalid elapsed time, for situations where elapsed time is not needed. See + * comments in thread_event.c for more info. + */ +#define TE_INVALID_ELAPSED UINT64_MAX + +typedef struct te_ctx_s { + bool is_alloc; + uint64_t *current; + uint64_t *last_event; + uint64_t *next_event; + uint64_t *next_event_fast; +} te_ctx_t; + +void te_assert_invariants_debug(tsd_t *tsd); +void te_event_trigger(tsd_t *tsd, te_ctx_t *ctx); +void te_recompute_fast_threshold(tsd_t *tsd); +void tsd_te_init(tsd_t *tsd); + +/* + * List of all events, in the following format: + * E(event, (condition), is_alloc_event) + */ +#define ITERATE_OVER_ALL_EVENTS \ + E(tcache_gc, (opt_tcache_gc_incr_bytes > 0), true) \ + E(prof_sample, (config_prof && opt_prof), true) \ + E(stats_interval, (opt_stats_interval >= 0), true) \ + E(tcache_gc_dalloc, (opt_tcache_gc_incr_bytes > 0), false) \ + E(peak_alloc, config_stats, true) \ + E(peak_dalloc, config_stats, false) + +#define E(event, condition_unused, is_alloc_event_unused) \ + C(event##_event_wait) + +/* List of all thread event counters. */ +#define ITERATE_OVER_ALL_COUNTERS \ + C(thread_allocated) \ + C(thread_allocated_last_event) \ + ITERATE_OVER_ALL_EVENTS \ + C(prof_sample_last_event) \ + C(stats_interval_last_event) + +/* Getters directly wrap TSD getters. */ +#define C(counter) \ +JEMALLOC_ALWAYS_INLINE uint64_t \ +counter##_get(tsd_t *tsd) { \ + return tsd_##counter##_get(tsd); \ +} + +ITERATE_OVER_ALL_COUNTERS +#undef C + +/* + * Setters call the TSD pointer getters rather than the TSD setters, so that + * the counters can be modified even when TSD state is reincarnated or + * minimal_initialized: if an event is triggered in such cases, we will + * temporarily delay the event and let it be immediately triggered at the next + * allocation call. + */ +#define C(counter) \ +JEMALLOC_ALWAYS_INLINE void \ +counter##_set(tsd_t *tsd, uint64_t v) { \ + *tsd_##counter##p_get(tsd) = v; \ +} + +ITERATE_OVER_ALL_COUNTERS +#undef C + +/* + * For generating _event_wait getter / setter functions for each individual + * event. + */ +#undef E + +/* + * The malloc and free fastpath getters -- use the unsafe getters since tsd may + * be non-nominal, in which case the fast_threshold will be set to 0. This + * allows checking for events and tsd non-nominal in a single branch. + * + * Note that these can only be used on the fastpath. + */ +JEMALLOC_ALWAYS_INLINE void +te_malloc_fastpath_ctx(tsd_t *tsd, uint64_t *allocated, uint64_t *threshold) { + *allocated = *tsd_thread_allocatedp_get_unsafe(tsd); + *threshold = *tsd_thread_allocated_next_event_fastp_get_unsafe(tsd); + assert(*threshold <= TE_NEXT_EVENT_FAST_MAX); +} + +JEMALLOC_ALWAYS_INLINE void +te_free_fastpath_ctx(tsd_t *tsd, uint64_t *deallocated, uint64_t *threshold) { + /* Unsafe getters since this may happen before tsd_init. */ + *deallocated = *tsd_thread_deallocatedp_get_unsafe(tsd); + *threshold = *tsd_thread_deallocated_next_event_fastp_get_unsafe(tsd); + assert(*threshold <= TE_NEXT_EVENT_FAST_MAX); +} + +JEMALLOC_ALWAYS_INLINE bool +te_ctx_is_alloc(te_ctx_t *ctx) { + return ctx->is_alloc; +} + +JEMALLOC_ALWAYS_INLINE uint64_t +te_ctx_current_bytes_get(te_ctx_t *ctx) { + return *ctx->current; +} + +JEMALLOC_ALWAYS_INLINE void +te_ctx_current_bytes_set(te_ctx_t *ctx, uint64_t v) { + *ctx->current = v; +} + +JEMALLOC_ALWAYS_INLINE uint64_t +te_ctx_last_event_get(te_ctx_t *ctx) { + return *ctx->last_event; +} + +JEMALLOC_ALWAYS_INLINE void +te_ctx_last_event_set(te_ctx_t *ctx, uint64_t v) { + *ctx->last_event = v; +} + +/* Below 3 for next_event_fast. */ +JEMALLOC_ALWAYS_INLINE uint64_t +te_ctx_next_event_fast_get(te_ctx_t *ctx) { + uint64_t v = *ctx->next_event_fast; + assert(v <= TE_NEXT_EVENT_FAST_MAX); + return v; +} + +JEMALLOC_ALWAYS_INLINE void +te_ctx_next_event_fast_set(te_ctx_t *ctx, uint64_t v) { + assert(v <= TE_NEXT_EVENT_FAST_MAX); + *ctx->next_event_fast = v; +} + +JEMALLOC_ALWAYS_INLINE void +te_next_event_fast_set_non_nominal(tsd_t *tsd) { + /* + * Set the fast thresholds to zero when tsd is non-nominal. Use the + * unsafe getter as this may get called during tsd init and clean up. + */ + *tsd_thread_allocated_next_event_fastp_get_unsafe(tsd) = 0; + *tsd_thread_deallocated_next_event_fastp_get_unsafe(tsd) = 0; +} + +/* For next_event. Setter also updates the fast threshold. */ +JEMALLOC_ALWAYS_INLINE uint64_t +te_ctx_next_event_get(te_ctx_t *ctx) { + return *ctx->next_event; +} + +JEMALLOC_ALWAYS_INLINE void +te_ctx_next_event_set(tsd_t *tsd, te_ctx_t *ctx, uint64_t v) { + *ctx->next_event = v; + te_recompute_fast_threshold(tsd); +} + +/* + * The function checks in debug mode whether the thread event counters are in + * a consistent state, which forms the invariants before and after each round + * of thread event handling that we can rely on and need to promise. + * The invariants are only temporarily violated in the middle of + * te_event_advance() if an event is triggered (the te_event_trigger() call at + * the end will restore the invariants). + */ +JEMALLOC_ALWAYS_INLINE void +te_assert_invariants(tsd_t *tsd) { + if (config_debug) { + te_assert_invariants_debug(tsd); + } +} + +JEMALLOC_ALWAYS_INLINE void +te_ctx_get(tsd_t *tsd, te_ctx_t *ctx, bool is_alloc) { + ctx->is_alloc = is_alloc; + if (is_alloc) { + ctx->current = tsd_thread_allocatedp_get(tsd); + ctx->last_event = tsd_thread_allocated_last_eventp_get(tsd); + ctx->next_event = tsd_thread_allocated_next_eventp_get(tsd); + ctx->next_event_fast = + tsd_thread_allocated_next_event_fastp_get(tsd); + } else { + ctx->current = tsd_thread_deallocatedp_get(tsd); + ctx->last_event = tsd_thread_deallocated_last_eventp_get(tsd); + ctx->next_event = tsd_thread_deallocated_next_eventp_get(tsd); + ctx->next_event_fast = + tsd_thread_deallocated_next_event_fastp_get(tsd); + } +} + +/* + * The lookahead functionality facilitates events to be able to lookahead, i.e. + * without touching the event counters, to determine whether an event would be + * triggered. The event counters are not advanced until the end of the + * allocation / deallocation calls, so the lookahead can be useful if some + * preparation work for some event must be done early in the allocation / + * deallocation calls. + * + * Currently only the profiling sampling event needs the lookahead + * functionality, so we don't yet define general purpose lookahead functions. + * + * Surplus is a terminology referring to the amount of bytes beyond what's + * needed for triggering an event, which can be a useful quantity to have in + * general when lookahead is being called. + */ + +JEMALLOC_ALWAYS_INLINE bool +te_prof_sample_event_lookahead_surplus(tsd_t *tsd, size_t usize, + size_t *surplus) { + if (surplus != NULL) { + /* + * This is a dead store: the surplus will be overwritten before + * any read. The initialization suppresses compiler warnings. + * Meanwhile, using SIZE_MAX to initialize is good for + * debugging purpose, because a valid surplus value is strictly + * less than usize, which is at most SIZE_MAX. + */ + *surplus = SIZE_MAX; + } + if (unlikely(!tsd_nominal(tsd) || tsd_reentrancy_level_get(tsd) > 0)) { + return false; + } + /* The subtraction is intentionally susceptible to underflow. */ + uint64_t accumbytes = tsd_thread_allocated_get(tsd) + usize - + tsd_thread_allocated_last_event_get(tsd); + uint64_t sample_wait = tsd_prof_sample_event_wait_get(tsd); + if (accumbytes < sample_wait) { + return false; + } + assert(accumbytes - sample_wait < (uint64_t)usize); + if (surplus != NULL) { + *surplus = (size_t)(accumbytes - sample_wait); + } + return true; +} + +JEMALLOC_ALWAYS_INLINE bool +te_prof_sample_event_lookahead(tsd_t *tsd, size_t usize) { + return te_prof_sample_event_lookahead_surplus(tsd, usize, NULL); +} + +JEMALLOC_ALWAYS_INLINE void +te_event_advance(tsd_t *tsd, size_t usize, bool is_alloc) { + te_assert_invariants(tsd); + + te_ctx_t ctx; + te_ctx_get(tsd, &ctx, is_alloc); + + uint64_t bytes_before = te_ctx_current_bytes_get(&ctx); + te_ctx_current_bytes_set(&ctx, bytes_before + usize); + + /* The subtraction is intentionally susceptible to underflow. */ + if (likely(usize < te_ctx_next_event_get(&ctx) - bytes_before)) { + te_assert_invariants(tsd); + } else { + te_event_trigger(tsd, &ctx); + } +} + +JEMALLOC_ALWAYS_INLINE void +thread_dalloc_event(tsd_t *tsd, size_t usize) { + te_event_advance(tsd, usize, false); +} + +JEMALLOC_ALWAYS_INLINE void +thread_alloc_event(tsd_t *tsd, size_t usize) { + te_event_advance(tsd, usize, true); +} + +#endif /* JEMALLOC_INTERNAL_THREAD_EVENT_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/ticker.h b/contrib/jemalloc/include/jemalloc/internal/ticker.h index 52d0db4c89c69c..6b51ddec43bf35 100644 --- a/contrib/jemalloc/include/jemalloc/internal/ticker.h +++ b/contrib/jemalloc/include/jemalloc/internal/ticker.h @@ -1,6 +1,7 @@ #ifndef JEMALLOC_INTERNAL_TICKER_H #define JEMALLOC_INTERNAL_TICKER_H +#include "jemalloc/internal/prng.h" #include "jemalloc/internal/util.h" /** @@ -10,11 +11,11 @@ * have occurred with a call to ticker_ticks), which will return true (and reset * the counter) if the countdown hit zero. */ - -typedef struct { +typedef struct ticker_s ticker_t; +struct ticker_s { int32_t tick; int32_t nticks; -} ticker_t; +}; static inline void ticker_init(ticker_t *ticker, int32_t nticks) { @@ -75,7 +76,7 @@ ticker_tick(ticker_t *ticker) { return ticker_ticks(ticker, 1); } -/* +/* * Try to tick. If ticker would fire, return true, but rely on * slowpath to reset ticker. */ @@ -88,4 +89,87 @@ ticker_trytick(ticker_t *ticker) { return false; } +/* + * The ticker_geom_t is much like the ticker_t, except that instead of ticker + * having a constant countdown, it has an approximate one; each tick has + * approximately a 1/nticks chance of triggering the count. + * + * The motivation is in triggering arena decay. With a naive strategy, each + * thread would maintain a ticker per arena, and check if decay is necessary + * each time that the arena's ticker fires. This has two costs: + * - Since under reasonable assumptions both threads and arenas can scale + * linearly with the number of CPUs, maintaining per-arena data in each thread + * scales quadratically with the number of CPUs. + * - These tickers are often a cache miss down tcache flush pathways. + * + * By giving each tick a 1/nticks chance of firing, we still maintain the same + * average number of ticks-until-firing per arena, with only a single ticker's + * worth of metadata. + */ + +/* See ticker.c for an explanation of these constants. */ +#define TICKER_GEOM_NBITS 6 +#define TICKER_GEOM_MUL 61 +extern const uint8_t ticker_geom_table[1 << TICKER_GEOM_NBITS]; + +/* Not actually any different from ticker_t; just for type safety. */ +typedef struct ticker_geom_s ticker_geom_t; +struct ticker_geom_s { + int32_t tick; + int32_t nticks; +}; + +/* + * Just pick the average delay for the first counter. We're more concerned with + * the behavior over long periods of time rather than the exact timing of the + * initial ticks. + */ +#define TICKER_GEOM_INIT(nticks) {nticks, nticks} + +static inline void +ticker_geom_init(ticker_geom_t *ticker, int32_t nticks) { + /* + * Make sure there's no overflow possible. This shouldn't really be a + * problem for reasonable nticks choices, which are all static and + * relatively small. + */ + assert((uint64_t)nticks * (uint64_t)255 / (uint64_t)TICKER_GEOM_MUL + <= (uint64_t)INT32_MAX); + ticker->tick = nticks; + ticker->nticks = nticks; +} + +static inline int32_t +ticker_geom_read(const ticker_geom_t *ticker) { + return ticker->tick; +} + +/* Same deal as above. */ +#if defined(__GNUC__) && !defined(__clang__) \ + && (defined(__x86_64__) || defined(__i386__)) +JEMALLOC_NOINLINE +#endif +static bool +ticker_geom_fixup(ticker_geom_t *ticker, uint64_t *prng_state) { + uint64_t idx = prng_lg_range_u64(prng_state, TICKER_GEOM_NBITS); + ticker->tick = (uint32_t)( + (uint64_t)ticker->nticks * (uint64_t)ticker_geom_table[idx] + / (uint64_t)TICKER_GEOM_MUL); + return true; +} + +static inline bool +ticker_geom_ticks(ticker_geom_t *ticker, uint64_t *prng_state, int32_t nticks) { + ticker->tick -= nticks; + if (unlikely(ticker->tick < 0)) { + return ticker_geom_fixup(ticker, prng_state); + } + return false; +} + +static inline bool +ticker_geom_tick(ticker_geom_t *ticker, uint64_t *prng_state) { + return ticker_geom_ticks(ticker, prng_state, 1); +} + #endif /* JEMALLOC_INTERNAL_TICKER_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/tsd.h b/contrib/jemalloc/include/jemalloc/internal/tsd.h index ecfda5d60df110..6cd52aeee962cd 100644 --- a/contrib/jemalloc/include/jemalloc/internal/tsd.h +++ b/contrib/jemalloc/include/jemalloc/internal/tsd.h @@ -1,10 +1,12 @@ #ifndef JEMALLOC_INTERNAL_TSD_H #define JEMALLOC_INTERNAL_TSD_H +#include "jemalloc/internal/activity_callback.h" #include "jemalloc/internal/arena_types.h" #include "jemalloc/internal/assert.h" #include "jemalloc/internal/bin_types.h" #include "jemalloc/internal/jemalloc_internal_externs.h" +#include "jemalloc/internal/peak.h" #include "jemalloc/internal/prof_types.h" #include "jemalloc/internal/ql.h" #include "jemalloc/internal/rtree_tsd.h" @@ -15,39 +17,30 @@ /* * Thread-Specific-Data layout - * --- data accessed on tcache fast path: state, rtree_ctx, stats, prof --- - * s: state - * e: tcache_enabled - * m: thread_allocated (config_stats) - * f: thread_deallocated (config_stats) - * p: prof_tdata (config_prof) - * c: rtree_ctx (rtree cache accessed on deallocation) - * t: tcache - * --- data not accessed on tcache fast path: arena-related fields --- - * d: arenas_tdata_bypass - * r: reentrancy_level - * x: narenas_tdata - * i: iarena - * a: arena - * o: arenas_tdata - * Loading TSD data is on the critical path of basically all malloc operations. - * In particular, tcache and rtree_ctx rely on hot CPU cache to be effective. - * Use a compact layout to reduce cache footprint. - * +--- 64-bit and 64B cacheline; 1B each letter; First byte on the left. ---+ - * |---------------------------- 1st cacheline ----------------------------| - * | sedrxxxx mmmmmmmm ffffffff pppppppp [c * 32 ........ ........ .......] | - * |---------------------------- 2nd cacheline ----------------------------| - * | [c * 64 ........ ........ ........ ........ ........ ........ .......] | - * |---------------------------- 3nd cacheline ----------------------------| - * | [c * 32 ........ ........ .......] iiiiiiii aaaaaaaa oooooooo [t...... | - * +-------------------------------------------------------------------------+ - * Note: the entire tcache is embedded into TSD and spans multiple cachelines. * - * The last 3 members (i, a and o) before tcache isn't really needed on tcache - * fast path. However we have a number of unused tcache bins and witnesses - * (never touched unless config_debug) at the end of tcache, so we place them - * there to avoid breaking the cachelines and possibly paging in an extra page. + * At least some thread-local data gets touched on the fast-path of almost all + * malloc operations. But much of it is only necessary down slow-paths, or + * testing. We want to colocate the fast-path data so that it can live on the + * same cacheline if possible. So we define three tiers of hotness: + * TSD_DATA_FAST: Touched on the alloc/dalloc fast paths. + * TSD_DATA_SLOW: Touched down slow paths. "Slow" here is sort of general; + * there are "semi-slow" paths like "not a sized deallocation, but can still + * live in the tcache". We'll want to keep these closer to the fast-path + * data. + * TSD_DATA_SLOWER: Only touched in test or debug modes, or not touched at all. + * + * An additional concern is that the larger tcache bins won't be used (we have a + * bin per size class, but by default only cache relatively small objects). So + * the earlier bins are in the TSD_DATA_FAST tier, but the later ones are in the + * TSD_DATA_SLOWER tier. + * + * As a result of all this, we put the slow data first, then the fast data, then + * the slower data, while keeping the tcache as the last element of the fast + * data (so that the fast -> slower transition happens midway through the + * tcache). While we don't yet play alignment tricks to guarantee it, this + * increases our odds of getting some cache/page locality on fast paths. */ + #ifdef JEMALLOC_JET typedef void (*test_callback_t)(int *); # define MALLOC_TSD_TEST_DATA_INIT 0x72b65c10 @@ -60,50 +53,112 @@ typedef void (*test_callback_t)(int *); # define MALLOC_TEST_TSD_INITIALIZER #endif -/* O(name, type, nullable type */ -#define MALLOC_TSD \ +typedef ql_elm(tsd_t) tsd_link_t; + +/* O(name, type, nullable type) */ +#define TSD_DATA_SLOW \ O(tcache_enabled, bool, bool) \ - O(arenas_tdata_bypass, bool, bool) \ O(reentrancy_level, int8_t, int8_t) \ - O(narenas_tdata, uint32_t, uint32_t) \ - O(offset_state, uint64_t, uint64_t) \ - O(thread_allocated, uint64_t, uint64_t) \ - O(thread_deallocated, uint64_t, uint64_t) \ - O(bytes_until_sample, int64_t, int64_t) \ + O(thread_allocated_last_event, uint64_t, uint64_t) \ + O(thread_allocated_next_event, uint64_t, uint64_t) \ + O(thread_deallocated_last_event, uint64_t, uint64_t) \ + O(thread_deallocated_next_event, uint64_t, uint64_t) \ + O(tcache_gc_event_wait, uint64_t, uint64_t) \ + O(tcache_gc_dalloc_event_wait, uint64_t, uint64_t) \ + O(prof_sample_event_wait, uint64_t, uint64_t) \ + O(prof_sample_last_event, uint64_t, uint64_t) \ + O(stats_interval_event_wait, uint64_t, uint64_t) \ + O(stats_interval_last_event, uint64_t, uint64_t) \ + O(peak_alloc_event_wait, uint64_t, uint64_t) \ + O(peak_dalloc_event_wait, uint64_t, uint64_t) \ O(prof_tdata, prof_tdata_t *, prof_tdata_t *) \ - O(rtree_ctx, rtree_ctx_t, rtree_ctx_t) \ + O(prng_state, uint64_t, uint64_t) \ + O(san_extents_until_guard_small, uint64_t, uint64_t) \ + O(san_extents_until_guard_large, uint64_t, uint64_t) \ O(iarena, arena_t *, arena_t *) \ O(arena, arena_t *, arena_t *) \ - O(arenas_tdata, arena_tdata_t *, arena_tdata_t *)\ + O(arena_decay_ticker, ticker_geom_t, ticker_geom_t) \ + O(sec_shard, uint8_t, uint8_t) \ O(binshards, tsd_binshards_t, tsd_binshards_t)\ - O(tcache, tcache_t, tcache_t) \ + O(tsd_link, tsd_link_t, tsd_link_t) \ + O(in_hook, bool, bool) \ + O(peak, peak_t, peak_t) \ + O(activity_callback_thunk, activity_callback_thunk_t, \ + activity_callback_thunk_t) \ + O(tcache_slow, tcache_slow_t, tcache_slow_t) \ + O(rtree_ctx, rtree_ctx_t, rtree_ctx_t) + +#define TSD_DATA_SLOW_INITIALIZER \ + /* tcache_enabled */ TCACHE_ENABLED_ZERO_INITIALIZER, \ + /* reentrancy_level */ 0, \ + /* thread_allocated_last_event */ 0, \ + /* thread_allocated_next_event */ 0, \ + /* thread_deallocated_last_event */ 0, \ + /* thread_deallocated_next_event */ 0, \ + /* tcache_gc_event_wait */ 0, \ + /* tcache_gc_dalloc_event_wait */ 0, \ + /* prof_sample_event_wait */ 0, \ + /* prof_sample_last_event */ 0, \ + /* stats_interval_event_wait */ 0, \ + /* stats_interval_last_event */ 0, \ + /* peak_alloc_event_wait */ 0, \ + /* peak_dalloc_event_wait */ 0, \ + /* prof_tdata */ NULL, \ + /* prng_state */ 0, \ + /* san_extents_until_guard_small */ 0, \ + /* san_extents_until_guard_large */ 0, \ + /* iarena */ NULL, \ + /* arena */ NULL, \ + /* arena_decay_ticker */ \ + TICKER_GEOM_INIT(ARENA_DECAY_NTICKS_PER_UPDATE), \ + /* sec_shard */ (uint8_t)-1, \ + /* binshards */ TSD_BINSHARDS_ZERO_INITIALIZER, \ + /* tsd_link */ {NULL}, \ + /* in_hook */ false, \ + /* peak */ PEAK_INITIALIZER, \ + /* activity_callback_thunk */ \ + ACTIVITY_CALLBACK_THUNK_INITIALIZER, \ + /* tcache_slow */ TCACHE_SLOW_ZERO_INITIALIZER, \ + /* rtree_ctx */ RTREE_CTX_INITIALIZER, + +/* O(name, type, nullable type) */ +#define TSD_DATA_FAST \ + O(thread_allocated, uint64_t, uint64_t) \ + O(thread_allocated_next_event_fast, uint64_t, uint64_t) \ + O(thread_deallocated, uint64_t, uint64_t) \ + O(thread_deallocated_next_event_fast, uint64_t, uint64_t) \ + O(tcache, tcache_t, tcache_t) + +#define TSD_DATA_FAST_INITIALIZER \ + /* thread_allocated */ 0, \ + /* thread_allocated_next_event_fast */ 0, \ + /* thread_deallocated */ 0, \ + /* thread_deallocated_next_event_fast */ 0, \ + /* tcache */ TCACHE_ZERO_INITIALIZER, + +/* O(name, type, nullable type) */ +#define TSD_DATA_SLOWER \ O(witness_tsd, witness_tsd_t, witness_tsdn_t) \ MALLOC_TEST_TSD +#define TSD_DATA_SLOWER_INITIALIZER \ + /* witness */ WITNESS_TSD_INITIALIZER \ + /* test data */ MALLOC_TEST_TSD_INITIALIZER + + #define TSD_INITIALIZER { \ - ATOMIC_INIT(tsd_state_uninitialized), \ - TCACHE_ENABLED_ZERO_INITIALIZER, \ - false, \ - 0, \ - 0, \ - 0, \ - 0, \ - 0, \ - 0, \ - NULL, \ - RTREE_CTX_ZERO_INITIALIZER, \ - NULL, \ - NULL, \ - NULL, \ - TSD_BINSHARDS_ZERO_INITIALIZER, \ - TCACHE_ZERO_INITIALIZER, \ - WITNESS_TSD_INITIALIZER \ - MALLOC_TEST_TSD_INITIALIZER \ + TSD_DATA_SLOW_INITIALIZER \ + /* state */ ATOMIC_INIT(tsd_state_uninitialized), \ + TSD_DATA_FAST_INITIALIZER \ + TSD_DATA_SLOWER_INITIALIZER \ } +#if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32) +void _malloc_tsd_cleanup_register(bool (*f)(void)); +#endif + void *malloc_tsd_malloc(size_t size); void malloc_tsd_dalloc(void *wrapper); -void malloc_tsd_cleanup_register(bool (*f)(void)); tsd_t *malloc_tsd_boot0(void); void malloc_tsd_boot1(void); void tsd_cleanup(void *arg); @@ -189,14 +244,17 @@ struct tsd_s { * setters below. */ +#define O(n, t, nt) \ + t TSD_MANGLE(n); + + TSD_DATA_SLOW /* * We manually limit the state to just a single byte. Unless the 8-bit * atomics are unavailable (which is rare). */ tsd_state_t state; -#define O(n, t, nt) \ - t TSD_MANGLE(n); -MALLOC_TSD + TSD_DATA_FAST + TSD_DATA_SLOWER #undef O /* AddressSanitizer requires TLS data to be aligned to at least 8 bytes. */ } JEMALLOC_ALIGNED(16); @@ -263,7 +321,9 @@ JEMALLOC_ALWAYS_INLINE t * \ tsd_##n##p_get_unsafe(tsd_t *tsd) { \ return &tsd->TSD_MANGLE(n); \ } -MALLOC_TSD +TSD_DATA_SLOW +TSD_DATA_FAST +TSD_DATA_SLOWER #undef O /* tsd_foop_get(tsd) returns a pointer to the thread-local instance of foo. */ @@ -282,7 +342,9 @@ tsd_##n##p_get(tsd_t *tsd) { \ state == tsd_state_minimal_initialized); \ return tsd_##n##p_get_unsafe(tsd); \ } -MALLOC_TSD +TSD_DATA_SLOW +TSD_DATA_FAST +TSD_DATA_SLOWER #undef O /* @@ -298,7 +360,9 @@ tsdn_##n##p_get(tsdn_t *tsdn) { \ tsd_t *tsd = tsdn_tsd(tsdn); \ return (nt *)tsd_##n##p_get(tsd); \ } -MALLOC_TSD +TSD_DATA_SLOW +TSD_DATA_FAST +TSD_DATA_SLOWER #undef O /* tsd_foo_get(tsd) returns the value of the thread-local instance of foo. */ @@ -307,7 +371,9 @@ JEMALLOC_ALWAYS_INLINE t \ tsd_##n##_get(tsd_t *tsd) { \ return *tsd_##n##p_get(tsd); \ } -MALLOC_TSD +TSD_DATA_SLOW +TSD_DATA_FAST +TSD_DATA_SLOWER #undef O /* tsd_foo_set(tsd, val) updates the thread-local instance of foo to be val. */ @@ -318,7 +384,9 @@ tsd_##n##_set(tsd_t *tsd, t val) { \ tsd_state_get(tsd) != tsd_state_minimal_initialized); \ *tsd_##n##p_get(tsd) = val; \ } -MALLOC_TSD +TSD_DATA_SLOW +TSD_DATA_FAST +TSD_DATA_SLOWER #undef O JEMALLOC_ALWAYS_INLINE void @@ -383,7 +451,10 @@ tsd_fetch(void) { static inline bool tsd_nominal(tsd_t *tsd) { - return (tsd_state_get(tsd) <= tsd_state_nominal_max); + bool nominal = tsd_state_get(tsd) <= tsd_state_nominal_max; + assert(nominal || tsd_reentrancy_level_get(tsd) > 0); + + return nominal; } JEMALLOC_ALWAYS_INLINE tsdn_t * @@ -413,4 +484,36 @@ tsdn_rtree_ctx(tsdn_t *tsdn, rtree_ctx_t *fallback) { return tsd_rtree_ctx(tsdn_tsd(tsdn)); } +static inline bool +tsd_state_nocleanup(tsd_t *tsd) { + return tsd_state_get(tsd) == tsd_state_reincarnated || + tsd_state_get(tsd) == tsd_state_minimal_initialized; +} + +/* + * These "raw" tsd reentrancy functions don't have any debug checking to make + * sure that we're not touching arena 0. Better is to call pre_reentrancy and + * post_reentrancy if this is possible. + */ +static inline void +tsd_pre_reentrancy_raw(tsd_t *tsd) { + bool fast = tsd_fast(tsd); + assert(tsd_reentrancy_level_get(tsd) < INT8_MAX); + ++*tsd_reentrancy_levelp_get(tsd); + if (fast) { + /* Prepare slow path for reentrancy. */ + tsd_slow_update(tsd); + assert(tsd_state_get(tsd) == tsd_state_nominal_slow); + } +} + +static inline void +tsd_post_reentrancy_raw(tsd_t *tsd) { + int8_t *reentrancy_level = tsd_reentrancy_levelp_get(tsd); + assert(*reentrancy_level > 0); + if (--*reentrancy_level == 0) { + tsd_slow_update(tsd); + } +} + #endif /* JEMALLOC_INTERNAL_TSD_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/tsd_generic.h b/contrib/jemalloc/include/jemalloc/internal/tsd_generic.h index cf73c0c715532b..a718472f32ef3e 100644 --- a/contrib/jemalloc/include/jemalloc/internal/tsd_generic.h +++ b/contrib/jemalloc/include/jemalloc/internal/tsd_generic.h @@ -52,6 +52,9 @@ tsd_cleanup_wrapper(void *arg) { JEMALLOC_ALWAYS_INLINE void tsd_wrapper_set(tsd_wrapper_t *wrapper) { + if (unlikely(!tsd_booted)) { + return; + } if (pthread_setspecific(tsd_tsd, (void *)wrapper) != 0) { malloc_write(": Error setting TSD\n"); abort(); @@ -60,7 +63,13 @@ tsd_wrapper_set(tsd_wrapper_t *wrapper) { JEMALLOC_ALWAYS_INLINE tsd_wrapper_t * tsd_wrapper_get(bool init) { - tsd_wrapper_t *wrapper = (tsd_wrapper_t *)pthread_getspecific(tsd_tsd); + tsd_wrapper_t *wrapper; + + if (unlikely(!tsd_booted)) { + return &tsd_boot_wrapper; + } + + wrapper = (tsd_wrapper_t *)pthread_getspecific(tsd_tsd); if (init && unlikely(wrapper == NULL)) { tsd_init_block_t block; @@ -91,11 +100,21 @@ tsd_wrapper_get(bool init) { JEMALLOC_ALWAYS_INLINE bool tsd_boot0(void) { + tsd_wrapper_t *wrapper; + tsd_init_block_t block; + + wrapper = (tsd_wrapper_t *) + tsd_init_check_recursion(&tsd_init_head, &block); + if (wrapper) { + return false; + } + block.data = &tsd_boot_wrapper; if (pthread_key_create(&tsd_tsd, tsd_cleanup_wrapper) != 0) { return true; } - tsd_wrapper_set(&tsd_boot_wrapper); tsd_booted = true; + tsd_wrapper_set(&tsd_boot_wrapper); + tsd_init_finish(&tsd_init_head, &block); return false; } diff --git a/contrib/jemalloc/include/jemalloc/internal/tsd_malloc_thread_cleanup.h b/contrib/jemalloc/include/jemalloc/internal/tsd_malloc_thread_cleanup.h index 65852d5c1492f6..d8f3ef13c00fda 100644 --- a/contrib/jemalloc/include/jemalloc/internal/tsd_malloc_thread_cleanup.h +++ b/contrib/jemalloc/include/jemalloc/internal/tsd_malloc_thread_cleanup.h @@ -21,7 +21,7 @@ tsd_cleanup_wrapper(void) { JEMALLOC_ALWAYS_INLINE bool tsd_boot0(void) { - malloc_tsd_cleanup_register(&tsd_cleanup_wrapper); + _malloc_tsd_cleanup_register(&tsd_cleanup_wrapper); tsd_booted = true; return false; } diff --git a/contrib/jemalloc/include/jemalloc/internal/tsd_types.h b/contrib/jemalloc/include/jemalloc/internal/tsd_types.h index 6200af61f3dcc1..a6ae37da5a21e5 100644 --- a/contrib/jemalloc/include/jemalloc/internal/tsd_types.h +++ b/contrib/jemalloc/include/jemalloc/internal/tsd_types.h @@ -1,7 +1,7 @@ #ifndef JEMALLOC_INTERNAL_TSD_TYPES_H #define JEMALLOC_INTERNAL_TSD_TYPES_H -#define MALLOC_TSD_CLEANUPS_MAX 2 +#define MALLOC_TSD_CLEANUPS_MAX 4 typedef struct tsd_s tsd_t; typedef struct tsdn_s tsdn_t; diff --git a/contrib/jemalloc/include/jemalloc/internal/typed_list.h b/contrib/jemalloc/include/jemalloc/internal/typed_list.h new file mode 100644 index 00000000000000..6535055a1eca3d --- /dev/null +++ b/contrib/jemalloc/include/jemalloc/internal/typed_list.h @@ -0,0 +1,55 @@ +#ifndef JEMALLOC_INTERNAL_TYPED_LIST_H +#define JEMALLOC_INTERNAL_TYPED_LIST_H + +/* + * This wraps the ql module to implement a list class in a way that's a little + * bit easier to use; it handles ql_elm_new calls and provides type safety. + */ + +#define TYPED_LIST(list_type, el_type, linkage) \ +typedef struct { \ + ql_head(el_type) head; \ +} list_type##_t; \ +static inline void \ +list_type##_init(list_type##_t *list) { \ + ql_new(&list->head); \ +} \ +static inline el_type * \ +list_type##_first(const list_type##_t *list) { \ + return ql_first(&list->head); \ +} \ +static inline el_type * \ +list_type##_last(const list_type##_t *list) { \ + return ql_last(&list->head, linkage); \ +} \ +static inline void \ +list_type##_append(list_type##_t *list, el_type *item) { \ + ql_elm_new(item, linkage); \ + ql_tail_insert(&list->head, item, linkage); \ +} \ +static inline void \ +list_type##_prepend(list_type##_t *list, el_type *item) { \ + ql_elm_new(item, linkage); \ + ql_head_insert(&list->head, item, linkage); \ +} \ +static inline void \ +list_type##_replace(list_type##_t *list, el_type *to_remove, \ + el_type *to_insert) { \ + ql_elm_new(to_insert, linkage); \ + ql_after_insert(to_remove, to_insert, linkage); \ + ql_remove(&list->head, to_remove, linkage); \ +} \ +static inline void \ +list_type##_remove(list_type##_t *list, el_type *item) { \ + ql_remove(&list->head, item, linkage); \ +} \ +static inline bool \ +list_type##_empty(list_type##_t *list) { \ + return ql_empty(&list->head); \ +} \ +static inline void \ +list_type##_concat(list_type##_t *list_a, list_type##_t *list_b) { \ + ql_concat(&list_a->head, &list_b->head, linkage); \ +} + +#endif /* JEMALLOC_INTERNAL_TYPED_LIST_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/util.h b/contrib/jemalloc/include/jemalloc/internal/util.h index 304cb545afcbcb..dcb1c0a5d62b95 100644 --- a/contrib/jemalloc/include/jemalloc/internal/util.h +++ b/contrib/jemalloc/include/jemalloc/internal/util.h @@ -62,6 +62,62 @@ get_errno(void) { #endif } +JEMALLOC_ALWAYS_INLINE void +util_assume(bool b) { + if (!b) { + unreachable(); + } +} + +/* ptr should be valid. */ +JEMALLOC_ALWAYS_INLINE void +util_prefetch_read(void *ptr) { + /* + * This should arguably be a config check; but any version of GCC so old + * that it doesn't support __builtin_prefetch is also too old to build + * jemalloc. + */ +#ifdef __GNUC__ + if (config_debug) { + /* Enforce the "valid ptr" requirement. */ + *(volatile char *)ptr; + } + __builtin_prefetch(ptr, /* read or write */ 0, /* locality hint */ 3); +#else + *(volatile char *)ptr; +#endif +} + +JEMALLOC_ALWAYS_INLINE void +util_prefetch_write(void *ptr) { +#ifdef __GNUC__ + if (config_debug) { + *(volatile char *)ptr; + } + /* + * The only difference from the read variant is that this has a 1 as the + * second argument (the write hint). + */ + __builtin_prefetch(ptr, 1, 3); +#else + *(volatile char *)ptr; +#endif +} + +JEMALLOC_ALWAYS_INLINE void +util_prefetch_read_range(void *ptr, size_t sz) { + for (size_t i = 0; i < sz; i += CACHELINE) { + util_prefetch_read((void *)((uintptr_t)ptr + i)); + } +} + +JEMALLOC_ALWAYS_INLINE void +util_prefetch_write_range(void *ptr, size_t sz) { + for (size_t i = 0; i < sz; i += CACHELINE) { + util_prefetch_write((void *)((uintptr_t)ptr + i)); + } +} + #undef UTIL_INLINE #endif /* JEMALLOC_INTERNAL_UTIL_H */ diff --git a/contrib/jemalloc/include/jemalloc/internal/witness.h b/contrib/jemalloc/include/jemalloc/internal/witness.h index fff9e98cb64fa5..e81b9a0069c04f 100644 --- a/contrib/jemalloc/include/jemalloc/internal/witness.h +++ b/contrib/jemalloc/include/jemalloc/internal/witness.h @@ -7,60 +7,76 @@ /* LOCK RANKS */ /******************************************************************************/ -/* - * Witnesses with rank WITNESS_RANK_OMIT are completely ignored by the witness - * machinery. - */ - -#define WITNESS_RANK_OMIT 0U - -#define WITNESS_RANK_MIN 1U - -#define WITNESS_RANK_INIT 1U -#define WITNESS_RANK_CTL 1U -#define WITNESS_RANK_TCACHES 2U -#define WITNESS_RANK_ARENAS 3U - -#define WITNESS_RANK_BACKGROUND_THREAD_GLOBAL 4U - -#define WITNESS_RANK_PROF_DUMP 5U -#define WITNESS_RANK_PROF_BT2GCTX 6U -#define WITNESS_RANK_PROF_TDATAS 7U -#define WITNESS_RANK_PROF_TDATA 8U -#define WITNESS_RANK_PROF_LOG 9U -#define WITNESS_RANK_PROF_GCTX 10U -#define WITNESS_RANK_BACKGROUND_THREAD 11U - -/* - * Used as an argument to witness_assert_depth_to_rank() in order to validate - * depth excluding non-core locks with lower ranks. Since the rank argument to - * witness_assert_depth_to_rank() is inclusive rather than exclusive, this - * definition can have the same value as the minimally ranked core lock. - */ -#define WITNESS_RANK_CORE 12U - -#define WITNESS_RANK_DECAY 12U -#define WITNESS_RANK_TCACHE_QL 13U -#define WITNESS_RANK_EXTENT_GROW 14U -#define WITNESS_RANK_EXTENTS 15U -#define WITNESS_RANK_EXTENT_AVAIL 16U - -#define WITNESS_RANK_EXTENT_POOL 17U -#define WITNESS_RANK_RTREE 18U -#define WITNESS_RANK_BASE 19U -#define WITNESS_RANK_ARENA_LARGE 20U -#define WITNESS_RANK_HOOK 21U - -#define WITNESS_RANK_LEAF 0xffffffffU -#define WITNESS_RANK_BIN WITNESS_RANK_LEAF -#define WITNESS_RANK_ARENA_STATS WITNESS_RANK_LEAF -#define WITNESS_RANK_DSS WITNESS_RANK_LEAF -#define WITNESS_RANK_PROF_ACTIVE WITNESS_RANK_LEAF -#define WITNESS_RANK_PROF_ACCUM WITNESS_RANK_LEAF -#define WITNESS_RANK_PROF_DUMP_SEQ WITNESS_RANK_LEAF -#define WITNESS_RANK_PROF_GDUMP WITNESS_RANK_LEAF -#define WITNESS_RANK_PROF_NEXT_THR_UID WITNESS_RANK_LEAF -#define WITNESS_RANK_PROF_THREAD_ACTIVE_INIT WITNESS_RANK_LEAF +enum witness_rank_e { + /* + * Order matters within this enum listing -- higher valued locks can + * only be acquired after lower-valued ones. We use the + * auto-incrementing-ness of enum values to enforce this. + */ + + /* + * Witnesses with rank WITNESS_RANK_OMIT are completely ignored by the + * witness machinery. + */ + WITNESS_RANK_OMIT, + WITNESS_RANK_MIN, + WITNESS_RANK_INIT = WITNESS_RANK_MIN, + WITNESS_RANK_CTL, + WITNESS_RANK_TCACHES, + WITNESS_RANK_ARENAS, + WITNESS_RANK_BACKGROUND_THREAD_GLOBAL, + WITNESS_RANK_PROF_DUMP, + WITNESS_RANK_PROF_BT2GCTX, + WITNESS_RANK_PROF_TDATAS, + WITNESS_RANK_PROF_TDATA, + WITNESS_RANK_PROF_LOG, + WITNESS_RANK_PROF_GCTX, + WITNESS_RANK_PROF_RECENT_DUMP, + WITNESS_RANK_BACKGROUND_THREAD, + /* + * Used as an argument to witness_assert_depth_to_rank() in order to + * validate depth excluding non-core locks with lower ranks. Since the + * rank argument to witness_assert_depth_to_rank() is inclusive rather + * than exclusive, this definition can have the same value as the + * minimally ranked core lock. + */ + WITNESS_RANK_CORE, + WITNESS_RANK_DECAY = WITNESS_RANK_CORE, + WITNESS_RANK_TCACHE_QL, + + WITNESS_RANK_SEC_SHARD, + + WITNESS_RANK_EXTENT_GROW, + WITNESS_RANK_HPA_SHARD_GROW = WITNESS_RANK_EXTENT_GROW, + WITNESS_RANK_SAN_BUMP_ALLOC = WITNESS_RANK_EXTENT_GROW, + + WITNESS_RANK_EXTENTS, + WITNESS_RANK_HPA_SHARD = WITNESS_RANK_EXTENTS, + + WITNESS_RANK_HPA_CENTRAL_GROW, + WITNESS_RANK_HPA_CENTRAL, + + WITNESS_RANK_EDATA_CACHE, + + WITNESS_RANK_RTREE, + WITNESS_RANK_BASE, + WITNESS_RANK_ARENA_LARGE, + WITNESS_RANK_HOOK, + + WITNESS_RANK_LEAF=0x1000, + WITNESS_RANK_BIN = WITNESS_RANK_LEAF, + WITNESS_RANK_ARENA_STATS = WITNESS_RANK_LEAF, + WITNESS_RANK_COUNTER_ACCUM = WITNESS_RANK_LEAF, + WITNESS_RANK_DSS = WITNESS_RANK_LEAF, + WITNESS_RANK_PROF_ACTIVE = WITNESS_RANK_LEAF, + WITNESS_RANK_PROF_DUMP_FILENAME = WITNESS_RANK_LEAF, + WITNESS_RANK_PROF_GDUMP = WITNESS_RANK_LEAF, + WITNESS_RANK_PROF_NEXT_THR_UID = WITNESS_RANK_LEAF, + WITNESS_RANK_PROF_RECENT_ALLOC = WITNESS_RANK_LEAF, + WITNESS_RANK_PROF_STATS = WITNESS_RANK_LEAF, + WITNESS_RANK_PROF_THREAD_ACTIVE_INIT = WITNESS_RANK_LEAF, +}; +typedef enum witness_rank_e witness_rank_t; /******************************************************************************/ /* PER-WITNESS DATA */ @@ -72,7 +88,6 @@ #endif typedef struct witness_s witness_t; -typedef unsigned witness_rank_t; typedef ql_head(witness_t) witness_list_t; typedef int witness_comp_t (const witness_t *, void *, const witness_t *, void *); @@ -82,8 +97,8 @@ struct witness_s { const char *name; /* - * Witness rank, where 0 is lowest and UINT_MAX is highest. Witnesses - * must be acquired in order of increasing rank. + * Witness rank, where 0 is lowest and WITNESS_RANK_LEAF is highest. + * Witnesses must be acquired in order of increasing rank. */ witness_rank_t rank; @@ -228,26 +243,13 @@ witness_assert_not_owner(witness_tsdn_t *witness_tsdn, } } -static inline void -witness_assert_depth_to_rank(witness_tsdn_t *witness_tsdn, - witness_rank_t rank_inclusive, unsigned depth) { - witness_tsd_t *witness_tsd; - unsigned d; - witness_list_t *witnesses; - witness_t *w; - - if (!config_debug) { - return; - } +/* Returns depth. Not intended for direct use. */ +static inline unsigned +witness_depth_to_rank(witness_list_t *witnesses, witness_rank_t rank_inclusive) +{ + unsigned d = 0; + witness_t *w = ql_last(witnesses, link); - if (witness_tsdn_null(witness_tsdn)) { - return; - } - witness_tsd = witness_tsdn_tsd(witness_tsdn); - - d = 0; - witnesses = &witness_tsd->witnesses; - w = ql_last(witnesses, link); if (w != NULL) { ql_reverse_foreach(w, witnesses, link) { if (w->rank < rank_inclusive) { @@ -256,6 +258,20 @@ witness_assert_depth_to_rank(witness_tsdn_t *witness_tsdn, d++; } } + + return d; +} + +static inline void +witness_assert_depth_to_rank(witness_tsdn_t *witness_tsdn, + witness_rank_t rank_inclusive, unsigned depth) { + if (!config_debug || witness_tsdn_null(witness_tsdn)) { + return; + } + + witness_list_t *witnesses = &witness_tsdn_tsd(witness_tsdn)->witnesses; + unsigned d = witness_depth_to_rank(witnesses, rank_inclusive); + if (d != depth) { witness_depth_error(witnesses, rank_inclusive, depth); } @@ -271,6 +287,21 @@ witness_assert_lockless(witness_tsdn_t *witness_tsdn) { witness_assert_depth(witness_tsdn, 0); } +static inline void +witness_assert_positive_depth_to_rank(witness_tsdn_t *witness_tsdn, + witness_rank_t rank_inclusive) { + if (!config_debug || witness_tsdn_null(witness_tsdn)) { + return; + } + + witness_list_t *witnesses = &witness_tsdn_tsd(witness_tsdn)->witnesses; + unsigned d = witness_depth_to_rank(witnesses, rank_inclusive); + + if (d == 0) { + witness_depth_error(witnesses, rank_inclusive, 1); + } +} + static inline void witness_lock(witness_tsdn_t *witness_tsdn, witness_t *witness) { witness_tsd_t *witness_tsd; diff --git a/contrib/jemalloc/include/jemalloc/jemalloc.h b/contrib/jemalloc/include/jemalloc/jemalloc.h index c3fa08e83f2244..62403fae8b8b18 100644 --- a/contrib/jemalloc/include/jemalloc/jemalloc.h +++ b/contrib/jemalloc/include/jemalloc/jemalloc.h @@ -19,11 +19,17 @@ extern "C" { /* Defined if format(printf, ...) attribute is supported. */ #define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF +/* Defined if fallthrough attribute is supported. */ +#define JEMALLOC_HAVE_ATTR_FALLTHROUGH + +/* Defined if cold attribute is supported. */ +#define JEMALLOC_HAVE_ATTR_COLD + /* * Define overrides for non-standard allocator-related functions if they are * present on the system. */ -/* #undef JEMALLOC_OVERRIDE_MEMALIGN */ +#undef JEMALLOC_OVERRIDE_MEMALIGN #define JEMALLOC_OVERRIDE_VALLOC /* @@ -68,11 +74,12 @@ extern "C" { # define je_mallctlnametomib mallctlnametomib # define je_malloc malloc # define je_malloc_conf malloc_conf +# define je_malloc_conf_2_conf_harder malloc_conf_2_conf_harder # define je_malloc_message malloc_message # define je_malloc_stats_print malloc_stats_print # define je_malloc_usable_size malloc_usable_size # define je_mallocx mallocx -# define je_smallocx_ea6b3e973b477b8061e0076bb257dbd7f3faa756 smallocx_ea6b3e973b477b8061e0076bb257dbd7f3faa756 +# define je_smallocx_54eaed1d8b56b1aa528be3bdd1877e59c56fa90c smallocx_54eaed1d8b56b1aa528be3bdd1877e59c56fa90c # define je_nallocx nallocx # define je_posix_memalign posix_memalign # define je_rallocx rallocx @@ -80,6 +87,7 @@ extern "C" { # define je_sallocx sallocx # define je_sdallocx sdallocx # define je_xallocx xallocx +# define je_memalign memalign # define je_valloc valloc #endif @@ -91,13 +99,13 @@ extern "C" { #include #include -#define JEMALLOC_VERSION "5.2.1-0-gea6b3e973b477b8061e0076bb257dbd7f3faa756" +#define JEMALLOC_VERSION "5.3.0-0-g54eaed1d8b56b1aa528be3bdd1877e59c56fa90c" #define JEMALLOC_VERSION_MAJOR 5 -#define JEMALLOC_VERSION_MINOR 2 -#define JEMALLOC_VERSION_BUGFIX 1 +#define JEMALLOC_VERSION_MINOR 3 +#define JEMALLOC_VERSION_BUGFIX 0 #define JEMALLOC_VERSION_NREV 0 -#define JEMALLOC_VERSION_GID "ea6b3e973b477b8061e0076bb257dbd7f3faa756" -#define JEMALLOC_VERSION_GID_IDENT ea6b3e973b477b8061e0076bb257dbd7f3faa756 +#define JEMALLOC_VERSION_GID "54eaed1d8b56b1aa528be3bdd1877e59c56fa90c" +#define JEMALLOC_VERSION_GID_IDENT 54eaed1d8b56b1aa528be3bdd1877e59c56fa90c #define MALLOCX_LG_ALIGN(la) ((int)(la)) #if LG_SIZEOF_PTR == 2 @@ -158,6 +166,7 @@ extern "C" { # endif # define JEMALLOC_FORMAT_ARG(i) # define JEMALLOC_FORMAT_PRINTF(s, i) +# define JEMALLOC_FALLTHROUGH # define JEMALLOC_NOINLINE __declspec(noinline) # ifdef __cplusplus # define JEMALLOC_NOTHROW __declspec(nothrow) @@ -171,6 +180,7 @@ extern "C" { # else # define JEMALLOC_ALLOCATOR # endif +# define JEMALLOC_COLD #elif defined(JEMALLOC_HAVE_ATTR) # define JEMALLOC_ATTR(s) __attribute__((s)) # define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s)) @@ -196,11 +206,21 @@ extern "C" { # else # define JEMALLOC_FORMAT_PRINTF(s, i) # endif +# ifdef JEMALLOC_HAVE_ATTR_FALLTHROUGH +# define JEMALLOC_FALLTHROUGH JEMALLOC_ATTR(fallthrough) +# else +# define JEMALLOC_FALLTHROUGH +# endif # define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline) # define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow) # define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s)) # define JEMALLOC_RESTRICT_RETURN # define JEMALLOC_ALLOCATOR +# ifdef JEMALLOC_HAVE_ATTR_COLD +# define JEMALLOC_COLD JEMALLOC_ATTR(__cold__) +# else +# define JEMALLOC_COLD +# endif #else # define JEMALLOC_ATTR(s) # define JEMALLOC_ALIGNED(s) @@ -208,11 +228,19 @@ extern "C" { # define JEMALLOC_ALLOC_SIZE2(s1, s2) # define JEMALLOC_EXPORT # define JEMALLOC_FORMAT_PRINTF(s, i) +# define JEMALLOC_FALLTHROUGH # define JEMALLOC_NOINLINE # define JEMALLOC_NOTHROW # define JEMALLOC_SECTION(s) # define JEMALLOC_RESTRICT_RETURN # define JEMALLOC_ALLOCATOR +# define JEMALLOC_COLD +#endif + +#if (defined(__APPLE__) || defined(__FreeBSD__)) && !defined(JEMALLOC_NO_RENAME) +# define JEMALLOC_SYS_NOTHROW +#else +# define JEMALLOC_SYS_NOTHROW JEMALLOC_NOTHROW #endif /* @@ -225,21 +253,22 @@ extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque, const char *s); JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *je_malloc(size_t size) + void JEMALLOC_SYS_NOTHROW *je_malloc(size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1); JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *je_calloc(size_t num, size_t size) + void JEMALLOC_SYS_NOTHROW *je_calloc(size_t num, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2); -JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_posix_memalign(void **memptr, - size_t alignment, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(nonnull(1)); +JEMALLOC_EXPORT int JEMALLOC_SYS_NOTHROW je_posix_memalign( + void **memptr, size_t alignment, size_t size) JEMALLOC_CXX_THROW + JEMALLOC_ATTR(nonnull(1)); JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *je_aligned_alloc(size_t alignment, + void JEMALLOC_SYS_NOTHROW *je_aligned_alloc(size_t alignment, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2); JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *je_realloc(void *ptr, size_t size) + void JEMALLOC_SYS_NOTHROW *je_realloc(void *ptr, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ALLOC_SIZE(2); -JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_free(void *ptr) +JEMALLOC_EXPORT void JEMALLOC_SYS_NOTHROW je_free(void *ptr) JEMALLOC_CXX_THROW; JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN @@ -269,16 +298,20 @@ JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_malloc_stats_print( const char *opts); JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_malloc_usable_size( JEMALLOC_USABLE_SIZE_CONST void *ptr) JEMALLOC_CXX_THROW; +#ifdef JEMALLOC_HAVE_MALLOC_SIZE +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_malloc_size( + const void *ptr); +#endif #ifdef JEMALLOC_OVERRIDE_MEMALIGN JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *je_memalign(size_t alignment, size_t size) + void JEMALLOC_SYS_NOTHROW *je_memalign(size_t alignment, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc); #endif #ifdef JEMALLOC_OVERRIDE_VALLOC JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN - void JEMALLOC_NOTHROW *je_valloc(size_t size) JEMALLOC_CXX_THROW + void JEMALLOC_SYS_NOTHROW *je_valloc(size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc); #endif @@ -380,11 +413,12 @@ struct extent_hooks_s { # define mallctlnametomib je_mallctlnametomib # define malloc je_malloc # define malloc_conf je_malloc_conf +# define malloc_conf_2_conf_harder je_malloc_conf_2_conf_harder # define malloc_message je_malloc_message # define malloc_stats_print je_malloc_stats_print # define malloc_usable_size je_malloc_usable_size # define mallocx je_mallocx -# define smallocx_ea6b3e973b477b8061e0076bb257dbd7f3faa756 je_smallocx_ea6b3e973b477b8061e0076bb257dbd7f3faa756 +# define smallocx_54eaed1d8b56b1aa528be3bdd1877e59c56fa90c je_smallocx_54eaed1d8b56b1aa528be3bdd1877e59c56fa90c # define nallocx je_nallocx # define posix_memalign je_posix_memalign # define rallocx je_rallocx @@ -392,6 +426,7 @@ struct extent_hooks_s { # define sallocx je_sallocx # define sdallocx je_sdallocx # define xallocx je_xallocx +# define memalign je_memalign # define valloc je_valloc #endif @@ -412,11 +447,12 @@ struct extent_hooks_s { # undef je_mallctlnametomib # undef je_malloc # undef je_malloc_conf +# undef je_malloc_conf_2_conf_harder # undef je_malloc_message # undef je_malloc_stats_print # undef je_malloc_usable_size # undef je_mallocx -# undef je_smallocx_ea6b3e973b477b8061e0076bb257dbd7f3faa756 +# undef je_smallocx_54eaed1d8b56b1aa528be3bdd1877e59c56fa90c # undef je_nallocx # undef je_posix_memalign # undef je_rallocx @@ -424,6 +460,7 @@ struct extent_hooks_s { # undef je_sallocx # undef je_sdallocx # undef je_xallocx +# undef je_memalign # undef je_valloc #endif diff --git a/contrib/jemalloc/include/jemalloc/jemalloc_FreeBSD.h b/contrib/jemalloc/include/jemalloc/jemalloc_FreeBSD.h index e733906bc146f0..e31ed0d8cdf382 100644 --- a/contrib/jemalloc/include/jemalloc/jemalloc_FreeBSD.h +++ b/contrib/jemalloc/include/jemalloc/jemalloc_FreeBSD.h @@ -157,6 +157,7 @@ extern int __isthreaded; #define pthread_cond_wait _pthread_cond_wait #define pthread_cond_timedwait _pthread_cond_timedwait #define pthread_cond_signal _pthread_cond_signal +#define pthread_getname_np _pthread_getname_np #ifdef JEMALLOC_C_ /* diff --git a/contrib/jemalloc/src/arena.c b/contrib/jemalloc/src/arena.c index ba50e41033ff75..857b27c52fb091 100644 --- a/contrib/jemalloc/src/arena.c +++ b/contrib/jemalloc/src/arena.c @@ -1,11 +1,12 @@ -#define JEMALLOC_ARENA_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/assert.h" -#include "jemalloc/internal/div.h" +#include "jemalloc/internal/decay.h" +#include "jemalloc/internal/ehooks.h" #include "jemalloc/internal/extent_dss.h" #include "jemalloc/internal/extent_mmap.h" +#include "jemalloc/internal/san.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/rtree.h" #include "jemalloc/internal/safety_check.h" @@ -35,34 +36,37 @@ ssize_t opt_muzzy_decay_ms = MUZZY_DECAY_MS_DEFAULT; static atomic_zd_t dirty_decay_ms_default; static atomic_zd_t muzzy_decay_ms_default; -const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = { -#define STEP(step, h, x, y) \ - h, - SMOOTHSTEP -#undef STEP -}; +emap_t arena_emap_global; +pa_central_t arena_pa_central_global; -static div_info_t arena_binind_div_info[SC_NBINS]; +div_info_t arena_binind_div_info[SC_NBINS]; size_t opt_oversize_threshold = OVERSIZE_THRESHOLD_DEFAULT; size_t oversize_threshold = OVERSIZE_THRESHOLD_DEFAULT; + +uint32_t arena_bin_offsets[SC_NBINS]; +static unsigned nbins_total; + static unsigned huge_arena_ind; +const arena_config_t arena_config_default = { + /* .extent_hooks = */ (extent_hooks_t *)&ehooks_default_extent_hooks, + /* .metadata_use_hooks = */ true, +}; + /******************************************************************************/ /* * Function prototypes for static functions that are referenced prior to * definition. */ -static void arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, - arena_decay_t *decay, extents_t *extents, bool all, size_t npages_limit, - size_t npages_decay_max, bool is_background_thread); static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all); -static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, - bin_t *bin); -static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, +static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, edata_t *slab, bin_t *bin); +static void +arena_maybe_do_deferred_work(tsdn_t *tsdn, arena_t *arena, decay_t *decay, + size_t npages_new); /******************************************************************************/ @@ -72,19 +76,17 @@ arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, size_t *nactive, size_t *ndirty, size_t *nmuzzy) { *nthreads += arena_nthreads_get(arena, false); *dss = dss_prec_names[arena_dss_prec_get(arena)]; - *dirty_decay_ms = arena_dirty_decay_ms_get(arena); - *muzzy_decay_ms = arena_muzzy_decay_ms_get(arena); - *nactive += atomic_load_zu(&arena->nactive, ATOMIC_RELAXED); - *ndirty += extents_npages_get(&arena->extents_dirty); - *nmuzzy += extents_npages_get(&arena->extents_muzzy); + *dirty_decay_ms = arena_decay_ms_get(arena, extent_state_dirty); + *muzzy_decay_ms = arena_decay_ms_get(arena, extent_state_muzzy); + pa_shard_basic_stats_merge(&arena->pa_shard, nactive, ndirty, nmuzzy); } void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms, size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats, - bin_stats_t *bstats, arena_stats_large_t *lstats, - arena_stats_extents_t *estats) { + bin_stats_data_t *bstats, arena_stats_large_t *lstats, + pac_estats_t *estats, hpa_shard_stats_t *hpastats, sec_stats_t *secstats) { cassert(config_stats); arena_basic_stats_merge(tsdn, arena, nthreads, dss, dirty_decay_ms, @@ -93,122 +95,74 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, size_t base_allocated, base_resident, base_mapped, metadata_thp; base_stats_get(tsdn, arena->base, &base_allocated, &base_resident, &base_mapped, &metadata_thp); + size_t pac_mapped_sz = pac_mapped(&arena->pa_shard.pac); + astats->mapped += base_mapped + pac_mapped_sz; + astats->resident += base_resident; - arena_stats_lock(tsdn, &arena->stats); - - arena_stats_accum_zu(&astats->mapped, base_mapped - + arena_stats_read_zu(tsdn, &arena->stats, &arena->stats.mapped)); - arena_stats_accum_zu(&astats->retained, - extents_npages_get(&arena->extents_retained) << LG_PAGE); - - atomic_store_zu(&astats->extent_avail, - atomic_load_zu(&arena->extent_avail_cnt, ATOMIC_RELAXED), - ATOMIC_RELAXED); + LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx); - arena_stats_accum_u64(&astats->decay_dirty.npurge, - arena_stats_read_u64(tsdn, &arena->stats, - &arena->stats.decay_dirty.npurge)); - arena_stats_accum_u64(&astats->decay_dirty.nmadvise, - arena_stats_read_u64(tsdn, &arena->stats, - &arena->stats.decay_dirty.nmadvise)); - arena_stats_accum_u64(&astats->decay_dirty.purged, - arena_stats_read_u64(tsdn, &arena->stats, - &arena->stats.decay_dirty.purged)); - - arena_stats_accum_u64(&astats->decay_muzzy.npurge, - arena_stats_read_u64(tsdn, &arena->stats, - &arena->stats.decay_muzzy.npurge)); - arena_stats_accum_u64(&astats->decay_muzzy.nmadvise, - arena_stats_read_u64(tsdn, &arena->stats, - &arena->stats.decay_muzzy.nmadvise)); - arena_stats_accum_u64(&astats->decay_muzzy.purged, - arena_stats_read_u64(tsdn, &arena->stats, - &arena->stats.decay_muzzy.purged)); - - arena_stats_accum_zu(&astats->base, base_allocated); - arena_stats_accum_zu(&astats->internal, arena_internal_get(arena)); - arena_stats_accum_zu(&astats->metadata_thp, metadata_thp); - arena_stats_accum_zu(&astats->resident, base_resident + - (((atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) + - extents_npages_get(&arena->extents_dirty) + - extents_npages_get(&arena->extents_muzzy)) << LG_PAGE))); - arena_stats_accum_zu(&astats->abandoned_vm, atomic_load_zu( - &arena->stats.abandoned_vm, ATOMIC_RELAXED)); + astats->base += base_allocated; + atomic_load_add_store_zu(&astats->internal, arena_internal_get(arena)); + astats->metadata_thp += metadata_thp; for (szind_t i = 0; i < SC_NSIZES - SC_NBINS; i++) { - uint64_t nmalloc = arena_stats_read_u64(tsdn, &arena->stats, + uint64_t nmalloc = locked_read_u64(tsdn, + LOCKEDINT_MTX(arena->stats.mtx), &arena->stats.lstats[i].nmalloc); - arena_stats_accum_u64(&lstats[i].nmalloc, nmalloc); - arena_stats_accum_u64(&astats->nmalloc_large, nmalloc); + locked_inc_u64_unsynchronized(&lstats[i].nmalloc, nmalloc); + astats->nmalloc_large += nmalloc; - uint64_t ndalloc = arena_stats_read_u64(tsdn, &arena->stats, + uint64_t ndalloc = locked_read_u64(tsdn, + LOCKEDINT_MTX(arena->stats.mtx), &arena->stats.lstats[i].ndalloc); - arena_stats_accum_u64(&lstats[i].ndalloc, ndalloc); - arena_stats_accum_u64(&astats->ndalloc_large, ndalloc); + locked_inc_u64_unsynchronized(&lstats[i].ndalloc, ndalloc); + astats->ndalloc_large += ndalloc; - uint64_t nrequests = arena_stats_read_u64(tsdn, &arena->stats, + uint64_t nrequests = locked_read_u64(tsdn, + LOCKEDINT_MTX(arena->stats.mtx), &arena->stats.lstats[i].nrequests); - arena_stats_accum_u64(&lstats[i].nrequests, - nmalloc + nrequests); - arena_stats_accum_u64(&astats->nrequests_large, + locked_inc_u64_unsynchronized(&lstats[i].nrequests, nmalloc + nrequests); + astats->nrequests_large += nmalloc + nrequests; /* nfill == nmalloc for large currently. */ - arena_stats_accum_u64(&lstats[i].nfills, nmalloc); - arena_stats_accum_u64(&astats->nfills_large, nmalloc); + locked_inc_u64_unsynchronized(&lstats[i].nfills, nmalloc); + astats->nfills_large += nmalloc; - uint64_t nflush = arena_stats_read_u64(tsdn, &arena->stats, + uint64_t nflush = locked_read_u64(tsdn, + LOCKEDINT_MTX(arena->stats.mtx), &arena->stats.lstats[i].nflushes); - arena_stats_accum_u64(&lstats[i].nflushes, nflush); - arena_stats_accum_u64(&astats->nflushes_large, nflush); + locked_inc_u64_unsynchronized(&lstats[i].nflushes, nflush); + astats->nflushes_large += nflush; assert(nmalloc >= ndalloc); assert(nmalloc - ndalloc <= SIZE_T_MAX); size_t curlextents = (size_t)(nmalloc - ndalloc); lstats[i].curlextents += curlextents; - arena_stats_accum_zu(&astats->allocated_large, - curlextents * sz_index2size(SC_NBINS + i)); - } - - for (pszind_t i = 0; i < SC_NPSIZES; i++) { - size_t dirty, muzzy, retained, dirty_bytes, muzzy_bytes, - retained_bytes; - dirty = extents_nextents_get(&arena->extents_dirty, i); - muzzy = extents_nextents_get(&arena->extents_muzzy, i); - retained = extents_nextents_get(&arena->extents_retained, i); - dirty_bytes = extents_nbytes_get(&arena->extents_dirty, i); - muzzy_bytes = extents_nbytes_get(&arena->extents_muzzy, i); - retained_bytes = - extents_nbytes_get(&arena->extents_retained, i); - - atomic_store_zu(&estats[i].ndirty, dirty, ATOMIC_RELAXED); - atomic_store_zu(&estats[i].nmuzzy, muzzy, ATOMIC_RELAXED); - atomic_store_zu(&estats[i].nretained, retained, ATOMIC_RELAXED); - atomic_store_zu(&estats[i].dirty_bytes, dirty_bytes, - ATOMIC_RELAXED); - atomic_store_zu(&estats[i].muzzy_bytes, muzzy_bytes, - ATOMIC_RELAXED); - atomic_store_zu(&estats[i].retained_bytes, retained_bytes, - ATOMIC_RELAXED); - } - - arena_stats_unlock(tsdn, &arena->stats); - - /* tcache_bytes counts currently cached bytes. */ - atomic_store_zu(&astats->tcache_bytes, 0, ATOMIC_RELAXED); + astats->allocated_large += + curlextents * sz_index2size(SC_NBINS + i); + } + + pa_shard_stats_merge(tsdn, &arena->pa_shard, &astats->pa_shard_stats, + estats, hpastats, secstats, &astats->resident); + + LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx); + + /* Currently cached bytes and sanitizer-stashed bytes in tcache. */ + astats->tcache_bytes = 0; + astats->tcache_stashed_bytes = 0; malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); cache_bin_array_descriptor_t *descriptor; ql_foreach(descriptor, &arena->cache_bin_array_descriptor_ql, link) { - szind_t i = 0; - for (; i < SC_NBINS; i++) { - cache_bin_t *tbin = &descriptor->bins_small[i]; - arena_stats_accum_zu(&astats->tcache_bytes, - tbin->ncached * sz_index2size(i)); - } - for (; i < nhbins; i++) { - cache_bin_t *tbin = &descriptor->bins_large[i]; - arena_stats_accum_zu(&astats->tcache_bytes, - tbin->ncached * sz_index2size(i)); + for (szind_t i = 0; i < nhbins; i++) { + cache_bin_t *cache_bin = &descriptor->bins[i]; + cache_bin_sz_t ncached, nstashed; + cache_bin_nitems_get_remote(cache_bin, + &tcache_bin_info[i], &ncached, &nstashed); + + astats->tcache_bytes += ncached * sz_index2size(i); + astats->tcache_stashed_bytes += nstashed * + sz_index2size(i); } } malloc_mutex_prof_read(tsdn, @@ -224,21 +178,11 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, /* Gather per arena mutex profiling data. */ READ_ARENA_MUTEX_PROF_DATA(large_mtx, arena_prof_mutex_large); - READ_ARENA_MUTEX_PROF_DATA(extent_avail_mtx, - arena_prof_mutex_extent_avail) - READ_ARENA_MUTEX_PROF_DATA(extents_dirty.mtx, - arena_prof_mutex_extents_dirty) - READ_ARENA_MUTEX_PROF_DATA(extents_muzzy.mtx, - arena_prof_mutex_extents_muzzy) - READ_ARENA_MUTEX_PROF_DATA(extents_retained.mtx, - arena_prof_mutex_extents_retained) - READ_ARENA_MUTEX_PROF_DATA(decay_dirty.mtx, - arena_prof_mutex_decay_dirty) - READ_ARENA_MUTEX_PROF_DATA(decay_muzzy.mtx, - arena_prof_mutex_decay_muzzy) READ_ARENA_MUTEX_PROF_DATA(base->mtx, - arena_prof_mutex_base) + arena_prof_mutex_base); #undef READ_ARENA_MUTEX_PROF_DATA + pa_shard_mtx_stats_read(tsdn, &arena->pa_shard, + astats->mutex_prof_data); nstime_copy(&astats->uptime, &arena->create_time); nstime_update(&astats->uptime); @@ -247,55 +191,67 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, for (szind_t i = 0; i < SC_NBINS; i++) { for (unsigned j = 0; j < bin_infos[i].n_shards; j++) { bin_stats_merge(tsdn, &bstats[i], - &arena->bins[i].bin_shards[j]); + arena_get_bin(arena, i, j)); } } } -void -arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, extent_t *extent) { +static void +arena_background_thread_inactivity_check(tsdn_t *tsdn, arena_t *arena, + bool is_background_thread) { + if (!background_thread_enabled() || is_background_thread) { + return; + } + background_thread_info_t *info = + arena_background_thread_info_get(arena); + if (background_thread_indefinite_sleep(info)) { + arena_maybe_do_deferred_work(tsdn, arena, + &arena->pa_shard.pac.decay_dirty, 0); + } +} + +/* + * React to deferred work generated by a PAI function. + */ +void arena_handle_deferred_work(tsdn_t *tsdn, arena_t *arena) { witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); - extents_dalloc(tsdn, arena, r_extent_hooks, &arena->extents_dirty, - extent); - if (arena_dirty_decay_ms_get(arena) == 0) { + if (decay_immediately(&arena->pa_shard.pac.decay_dirty)) { arena_decay_dirty(tsdn, arena, false, true); - } else { - arena_background_thread_inactivity_check(tsdn, arena, false); } + arena_background_thread_inactivity_check(tsdn, arena, false); } static void * -arena_slab_reg_alloc(extent_t *slab, const bin_info_t *bin_info) { +arena_slab_reg_alloc(edata_t *slab, const bin_info_t *bin_info) { void *ret; - arena_slab_data_t *slab_data = extent_slab_data_get(slab); + slab_data_t *slab_data = edata_slab_data_get(slab); size_t regind; - assert(extent_nfree_get(slab) > 0); + assert(edata_nfree_get(slab) > 0); assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info)); regind = bitmap_sfu(slab_data->bitmap, &bin_info->bitmap_info); - ret = (void *)((uintptr_t)extent_addr_get(slab) + + ret = (void *)((uintptr_t)edata_addr_get(slab) + (uintptr_t)(bin_info->reg_size * regind)); - extent_nfree_dec(slab); + edata_nfree_dec(slab); return ret; } static void -arena_slab_reg_alloc_batch(extent_t *slab, const bin_info_t *bin_info, +arena_slab_reg_alloc_batch(edata_t *slab, const bin_info_t *bin_info, unsigned cnt, void** ptrs) { - arena_slab_data_t *slab_data = extent_slab_data_get(slab); + slab_data_t *slab_data = edata_slab_data_get(slab); - assert(extent_nfree_get(slab) >= cnt); + assert(edata_nfree_get(slab) >= cnt); assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info)); #if (! defined JEMALLOC_INTERNAL_POPCOUNTL) || (defined BITMAP_USE_TREE) for (unsigned i = 0; i < cnt; i++) { size_t regind = bitmap_sfu(slab_data->bitmap, &bin_info->bitmap_info); - *(ptrs + i) = (void *)((uintptr_t)extent_addr_get(slab) + + *(ptrs + i) = (void *)((uintptr_t)edata_addr_get(slab) + (uintptr_t)(bin_info->reg_size * regind)); } #else @@ -316,7 +272,7 @@ arena_slab_reg_alloc_batch(extent_t *slab, const bin_info_t *bin_info, * Load from memory locations only once, outside the * hot loop below. */ - uintptr_t base = (uintptr_t)extent_addr_get(slab); + uintptr_t base = (uintptr_t)edata_addr_get(slab); uintptr_t regsize = (uintptr_t)bin_info->reg_size; while (pop--) { size_t bit = cfs_lu(&g); @@ -328,56 +284,7 @@ arena_slab_reg_alloc_batch(extent_t *slab, const bin_info_t *bin_info, slab_data->bitmap[group] = g; } #endif - extent_nfree_sub(slab, cnt); -} - -#ifndef JEMALLOC_JET -static -#endif -size_t -arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) { - size_t diff, regind; - - /* Freeing a pointer outside the slab can cause assertion failure. */ - assert((uintptr_t)ptr >= (uintptr_t)extent_addr_get(slab)); - assert((uintptr_t)ptr < (uintptr_t)extent_past_get(slab)); - /* Freeing an interior pointer can cause assertion failure. */ - assert(((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)) % - (uintptr_t)bin_infos[binind].reg_size == 0); - - diff = (size_t)((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)); - - /* Avoid doing division with a variable divisor. */ - regind = div_compute(&arena_binind_div_info[binind], diff); - - assert(regind < bin_infos[binind].nregs); - - return regind; -} - -static void -arena_slab_reg_dalloc(extent_t *slab, arena_slab_data_t *slab_data, void *ptr) { - szind_t binind = extent_szind_get(slab); - const bin_info_t *bin_info = &bin_infos[binind]; - size_t regind = arena_slab_regind(slab, binind, ptr); - - assert(extent_nfree_get(slab) < bin_info->nregs); - /* Freeing an unallocated pointer can cause assertion failure. */ - assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind)); - - bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind); - extent_nfree_inc(slab); -} - -static void -arena_nactive_add(arena_t *arena, size_t add_pages) { - atomic_fetch_add_zu(&arena->nactive, add_pages, ATOMIC_RELAXED); -} - -static void -arena_nactive_sub(arena_t *arena, size_t sub_pages) { - assert(atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) >= sub_pages); - atomic_fetch_sub_zu(&arena->nactive, sub_pages, ATOMIC_RELAXED); + edata_nfree_sub(slab, cnt); } static void @@ -392,7 +299,7 @@ arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) { index = sz_size2index(usize); hindex = (index >= SC_NBINS) ? index - SC_NBINS : 0; - arena_stats_add_u64(tsdn, &arena->stats, + locked_inc_u64(tsdn, LOCKEDINT_MTX(arena->stats.mtx), &arena->stats.lstats[hindex].nmalloc, 1); } @@ -408,551 +315,118 @@ arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) { index = sz_size2index(usize); hindex = (index >= SC_NBINS) ? index - SC_NBINS : 0; - arena_stats_add_u64(tsdn, &arena->stats, + locked_inc_u64(tsdn, LOCKEDINT_MTX(arena->stats.mtx), &arena->stats.lstats[hindex].ndalloc, 1); } static void arena_large_ralloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t oldusize, size_t usize) { - arena_large_dalloc_stats_update(tsdn, arena, oldusize); arena_large_malloc_stats_update(tsdn, arena, usize); + arena_large_dalloc_stats_update(tsdn, arena, oldusize); } -static bool -arena_may_have_muzzy(arena_t *arena) { - return (pages_can_purge_lazy && (arena_muzzy_decay_ms_get(arena) != 0)); -} - -extent_t * +edata_t * arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, - size_t alignment, bool *zero) { - extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; + size_t alignment, bool zero) { + bool deferred_work_generated = false; + szind_t szind = sz_size2index(usize); + size_t esize = usize + sz_large_pad; - witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), - WITNESS_RANK_CORE, 0); + bool guarded = san_large_extent_decide_guard(tsdn, + arena_get_ehooks(arena), esize, alignment); + edata_t *edata = pa_alloc(tsdn, &arena->pa_shard, esize, alignment, + /* slab */ false, szind, zero, guarded, &deferred_work_generated); + assert(deferred_work_generated == false); - szind_t szind = sz_size2index(usize); - size_t mapped_add; - bool commit = true; - extent_t *extent = extents_alloc(tsdn, arena, &extent_hooks, - &arena->extents_dirty, NULL, usize, sz_large_pad, alignment, false, - szind, zero, &commit); - if (extent == NULL && arena_may_have_muzzy(arena)) { - extent = extents_alloc(tsdn, arena, &extent_hooks, - &arena->extents_muzzy, NULL, usize, sz_large_pad, alignment, - false, szind, zero, &commit); - } - size_t size = usize + sz_large_pad; - if (extent == NULL) { - extent = extent_alloc_wrapper(tsdn, arena, &extent_hooks, NULL, - usize, sz_large_pad, alignment, false, szind, zero, - &commit); + if (edata != NULL) { if (config_stats) { - /* - * extent may be NULL on OOM, but in that case - * mapped_add isn't used below, so there's no need to - * conditionlly set it to 0 here. - */ - mapped_add = size; + LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx); + arena_large_malloc_stats_update(tsdn, arena, usize); + LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx); } - } else if (config_stats) { - mapped_add = 0; } - if (extent != NULL) { - if (config_stats) { - arena_stats_lock(tsdn, &arena->stats); - arena_large_malloc_stats_update(tsdn, arena, usize); - if (mapped_add != 0) { - arena_stats_add_zu(tsdn, &arena->stats, - &arena->stats.mapped, mapped_add); - } - arena_stats_unlock(tsdn, &arena->stats); - } - arena_nactive_add(arena, size >> LG_PAGE); + if (edata != NULL && sz_large_pad != 0) { + arena_cache_oblivious_randomize(tsdn, arena, edata, alignment); } - return extent; + return edata; } void -arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { +arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, edata_t *edata) { if (config_stats) { - arena_stats_lock(tsdn, &arena->stats); + LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx); arena_large_dalloc_stats_update(tsdn, arena, - extent_usize_get(extent)); - arena_stats_unlock(tsdn, &arena->stats); + edata_usize_get(edata)); + LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx); } - arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE); } void -arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent, +arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, edata_t *edata, size_t oldusize) { - size_t usize = extent_usize_get(extent); - size_t udiff = oldusize - usize; + size_t usize = edata_usize_get(edata); if (config_stats) { - arena_stats_lock(tsdn, &arena->stats); + LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx); arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize); - arena_stats_unlock(tsdn, &arena->stats); + LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx); } - arena_nactive_sub(arena, udiff >> LG_PAGE); } void -arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent, +arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, edata_t *edata, size_t oldusize) { - size_t usize = extent_usize_get(extent); - size_t udiff = usize - oldusize; + size_t usize = edata_usize_get(edata); if (config_stats) { - arena_stats_lock(tsdn, &arena->stats); + LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx); arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize); - arena_stats_unlock(tsdn, &arena->stats); - } - arena_nactive_add(arena, udiff >> LG_PAGE); -} - -static ssize_t -arena_decay_ms_read(arena_decay_t *decay) { - return atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED); -} - -static void -arena_decay_ms_write(arena_decay_t *decay, ssize_t decay_ms) { - atomic_store_zd(&decay->time_ms, decay_ms, ATOMIC_RELAXED); -} - -static void -arena_decay_deadline_init(arena_decay_t *decay) { - /* - * Generate a new deadline that is uniformly random within the next - * epoch after the current one. - */ - nstime_copy(&decay->deadline, &decay->epoch); - nstime_add(&decay->deadline, &decay->interval); - if (arena_decay_ms_read(decay) > 0) { - nstime_t jitter; - - nstime_init(&jitter, prng_range_u64(&decay->jitter_state, - nstime_ns(&decay->interval))); - nstime_add(&decay->deadline, &jitter); - } -} - -static bool -arena_decay_deadline_reached(const arena_decay_t *decay, const nstime_t *time) { - return (nstime_compare(&decay->deadline, time) <= 0); -} - -static size_t -arena_decay_backlog_npages_limit(const arena_decay_t *decay) { - uint64_t sum; - size_t npages_limit_backlog; - unsigned i; - - /* - * For each element of decay_backlog, multiply by the corresponding - * fixed-point smoothstep decay factor. Sum the products, then divide - * to round down to the nearest whole number of pages. - */ - sum = 0; - for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) { - sum += decay->backlog[i] * h_steps[i]; - } - npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP); - - return npages_limit_backlog; -} - -static void -arena_decay_backlog_update_last(arena_decay_t *decay, size_t current_npages) { - size_t npages_delta = (current_npages > decay->nunpurged) ? - current_npages - decay->nunpurged : 0; - decay->backlog[SMOOTHSTEP_NSTEPS-1] = npages_delta; - - if (config_debug) { - if (current_npages > decay->ceil_npages) { - decay->ceil_npages = current_npages; - } - size_t npages_limit = arena_decay_backlog_npages_limit(decay); - assert(decay->ceil_npages >= npages_limit); - if (decay->ceil_npages > npages_limit) { - decay->ceil_npages = npages_limit; - } - } -} - -static void -arena_decay_backlog_update(arena_decay_t *decay, uint64_t nadvance_u64, - size_t current_npages) { - if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) { - memset(decay->backlog, 0, (SMOOTHSTEP_NSTEPS-1) * - sizeof(size_t)); - } else { - size_t nadvance_z = (size_t)nadvance_u64; - - assert((uint64_t)nadvance_z == nadvance_u64); - - memmove(decay->backlog, &decay->backlog[nadvance_z], - (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t)); - if (nadvance_z > 1) { - memset(&decay->backlog[SMOOTHSTEP_NSTEPS - - nadvance_z], 0, (nadvance_z-1) * sizeof(size_t)); - } - } - - arena_decay_backlog_update_last(decay, current_npages); -} - -static void -arena_decay_try_purge(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, - extents_t *extents, size_t current_npages, size_t npages_limit, - bool is_background_thread) { - if (current_npages > npages_limit) { - arena_decay_to_limit(tsdn, arena, decay, extents, false, - npages_limit, current_npages - npages_limit, - is_background_thread); - } -} - -static void -arena_decay_epoch_advance_helper(arena_decay_t *decay, const nstime_t *time, - size_t current_npages) { - assert(arena_decay_deadline_reached(decay, time)); - - nstime_t delta; - nstime_copy(&delta, time); - nstime_subtract(&delta, &decay->epoch); - - uint64_t nadvance_u64 = nstime_divide(&delta, &decay->interval); - assert(nadvance_u64 > 0); - - /* Add nadvance_u64 decay intervals to epoch. */ - nstime_copy(&delta, &decay->interval); - nstime_imultiply(&delta, nadvance_u64); - nstime_add(&decay->epoch, &delta); - - /* Set a new deadline. */ - arena_decay_deadline_init(decay); - - /* Update the backlog. */ - arena_decay_backlog_update(decay, nadvance_u64, current_npages); -} - -static void -arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, - extents_t *extents, const nstime_t *time, bool is_background_thread) { - size_t current_npages = extents_npages_get(extents); - arena_decay_epoch_advance_helper(decay, time, current_npages); - - size_t npages_limit = arena_decay_backlog_npages_limit(decay); - /* We may unlock decay->mtx when try_purge(). Finish logging first. */ - decay->nunpurged = (npages_limit > current_npages) ? npages_limit : - current_npages; - - if (!background_thread_enabled() || is_background_thread) { - arena_decay_try_purge(tsdn, arena, decay, extents, - current_npages, npages_limit, is_background_thread); - } -} - -static void -arena_decay_reinit(arena_decay_t *decay, ssize_t decay_ms) { - arena_decay_ms_write(decay, decay_ms); - if (decay_ms > 0) { - nstime_init(&decay->interval, (uint64_t)decay_ms * - KQU(1000000)); - nstime_idivide(&decay->interval, SMOOTHSTEP_NSTEPS); - } - - nstime_init(&decay->epoch, 0); - nstime_update(&decay->epoch); - decay->jitter_state = (uint64_t)(uintptr_t)decay; - arena_decay_deadline_init(decay); - decay->nunpurged = 0; - memset(decay->backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t)); -} - -static bool -arena_decay_init(arena_decay_t *decay, ssize_t decay_ms, - arena_stats_decay_t *stats) { - if (config_debug) { - for (size_t i = 0; i < sizeof(arena_decay_t); i++) { - assert(((char *)decay)[i] == 0); - } - decay->ceil_npages = 0; - } - if (malloc_mutex_init(&decay->mtx, "decay", WITNESS_RANK_DECAY, - malloc_mutex_rank_exclusive)) { - return true; - } - decay->purging = false; - arena_decay_reinit(decay, decay_ms); - /* Memory is zeroed, so there is no need to clear stats. */ - if (config_stats) { - decay->stats = stats; - } - return false; -} - -static bool -arena_decay_ms_valid(ssize_t decay_ms) { - if (decay_ms < -1) { - return false; - } - if (decay_ms == -1 || (uint64_t)decay_ms <= NSTIME_SEC_MAX * - KQU(1000)) { - return true; + LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx); } - return false; } -static bool -arena_maybe_decay(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, - extents_t *extents, bool is_background_thread) { - malloc_mutex_assert_owner(tsdn, &decay->mtx); - - /* Purge all or nothing if the option is disabled. */ - ssize_t decay_ms = arena_decay_ms_read(decay); - if (decay_ms <= 0) { - if (decay_ms == 0) { - arena_decay_to_limit(tsdn, arena, decay, extents, false, - 0, extents_npages_get(extents), - is_background_thread); - } - return false; - } - - nstime_t time; - nstime_init(&time, 0); - nstime_update(&time); - if (unlikely(!nstime_monotonic() && nstime_compare(&decay->epoch, &time) - > 0)) { - /* - * Time went backwards. Move the epoch back in time and - * generate a new deadline, with the expectation that time - * typically flows forward for long enough periods of time that - * epochs complete. Unfortunately, this strategy is susceptible - * to clock jitter triggering premature epoch advances, but - * clock jitter estimation and compensation isn't feasible here - * because calls into this code are event-driven. - */ - nstime_copy(&decay->epoch, &time); - arena_decay_deadline_init(decay); +/* + * In situations where we're not forcing a decay (i.e. because the user + * specifically requested it), should we purge ourselves, or wait for the + * background thread to get to it. + */ +static pac_purge_eagerness_t +arena_decide_unforced_purge_eagerness(bool is_background_thread) { + if (is_background_thread) { + return PAC_PURGE_ALWAYS; + } else if (!is_background_thread && background_thread_enabled()) { + return PAC_PURGE_NEVER; } else { - /* Verify that time does not go backwards. */ - assert(nstime_compare(&decay->epoch, &time) <= 0); + return PAC_PURGE_ON_EPOCH_ADVANCE; } - - /* - * If the deadline has been reached, advance to the current epoch and - * purge to the new limit if necessary. Note that dirty pages created - * during the current epoch are not subject to purge until a future - * epoch, so as a result purging only happens during epoch advances, or - * being triggered by background threads (scheduled event). - */ - bool advance_epoch = arena_decay_deadline_reached(decay, &time); - if (advance_epoch) { - arena_decay_epoch_advance(tsdn, arena, decay, extents, &time, - is_background_thread); - } else if (is_background_thread) { - arena_decay_try_purge(tsdn, arena, decay, extents, - extents_npages_get(extents), - arena_decay_backlog_npages_limit(decay), - is_background_thread); - } - - return advance_epoch; -} - -static ssize_t -arena_decay_ms_get(arena_decay_t *decay) { - return arena_decay_ms_read(decay); -} - -ssize_t -arena_dirty_decay_ms_get(arena_t *arena) { - return arena_decay_ms_get(&arena->decay_dirty); -} - -ssize_t -arena_muzzy_decay_ms_get(arena_t *arena) { - return arena_decay_ms_get(&arena->decay_muzzy); -} - -static bool -arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, - extents_t *extents, ssize_t decay_ms) { - if (!arena_decay_ms_valid(decay_ms)) { - return true; - } - - malloc_mutex_lock(tsdn, &decay->mtx); - /* - * Restart decay backlog from scratch, which may cause many dirty pages - * to be immediately purged. It would conceptually be possible to map - * the old backlog onto the new backlog, but there is no justification - * for such complexity since decay_ms changes are intended to be - * infrequent, either between the {-1, 0, >0} states, or a one-time - * arbitrary change during initial arena configuration. - */ - arena_decay_reinit(decay, decay_ms); - arena_maybe_decay(tsdn, arena, decay, extents, false); - malloc_mutex_unlock(tsdn, &decay->mtx); - - return false; } bool -arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena, +arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, extent_state_t state, ssize_t decay_ms) { - return arena_decay_ms_set(tsdn, arena, &arena->decay_dirty, - &arena->extents_dirty, decay_ms); -} - -bool -arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena, - ssize_t decay_ms) { - return arena_decay_ms_set(tsdn, arena, &arena->decay_muzzy, - &arena->extents_muzzy, decay_ms); -} - -static size_t -arena_stash_decayed(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_limit, - size_t npages_decay_max, extent_list_t *decay_extents) { - witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), - WITNESS_RANK_CORE, 0); - - /* Stash extents according to npages_limit. */ - size_t nstashed = 0; - extent_t *extent; - while (nstashed < npages_decay_max && - (extent = extents_evict(tsdn, arena, r_extent_hooks, extents, - npages_limit)) != NULL) { - extent_list_append(decay_extents, extent); - nstashed += extent_size_get(extent) >> LG_PAGE; - } - return nstashed; + pac_purge_eagerness_t eagerness = arena_decide_unforced_purge_eagerness( + /* is_background_thread */ false); + return pa_decay_ms_set(tsdn, &arena->pa_shard, state, decay_ms, + eagerness); } -static size_t -arena_decay_stashed(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, arena_decay_t *decay, extents_t *extents, - bool all, extent_list_t *decay_extents, bool is_background_thread) { - size_t nmadvise, nunmapped; - size_t npurged; - - if (config_stats) { - nmadvise = 0; - nunmapped = 0; - } - npurged = 0; - - ssize_t muzzy_decay_ms = arena_muzzy_decay_ms_get(arena); - for (extent_t *extent = extent_list_first(decay_extents); extent != - NULL; extent = extent_list_first(decay_extents)) { - if (config_stats) { - nmadvise++; - } - size_t npages = extent_size_get(extent) >> LG_PAGE; - npurged += npages; - extent_list_remove(decay_extents, extent); - switch (extents_state_get(extents)) { - case extent_state_active: - not_reached(); - case extent_state_dirty: - if (!all && muzzy_decay_ms != 0 && - !extent_purge_lazy_wrapper(tsdn, arena, - r_extent_hooks, extent, 0, - extent_size_get(extent))) { - extents_dalloc(tsdn, arena, r_extent_hooks, - &arena->extents_muzzy, extent); - arena_background_thread_inactivity_check(tsdn, - arena, is_background_thread); - break; - } - /* Fall through. */ - case extent_state_muzzy: - extent_dalloc_wrapper(tsdn, arena, r_extent_hooks, - extent); - if (config_stats) { - nunmapped += npages; - } - break; - case extent_state_retained: - default: - not_reached(); - } - } - - if (config_stats) { - arena_stats_lock(tsdn, &arena->stats); - arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->npurge, - 1); - arena_stats_add_u64(tsdn, &arena->stats, - &decay->stats->nmadvise, nmadvise); - arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->purged, - npurged); - arena_stats_sub_zu(tsdn, &arena->stats, &arena->stats.mapped, - nunmapped << LG_PAGE); - arena_stats_unlock(tsdn, &arena->stats); - } - - return npurged; -} - -/* - * npages_limit: Decay at most npages_decay_max pages without violating the - * invariant: (extents_npages_get(extents) >= npages_limit). We need an upper - * bound on number of pages in order to prevent unbounded growth (namely in - * stashed), otherwise unbounded new pages could be added to extents during the - * current decay run, so that the purging thread never finishes. - */ -static void -arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, - extents_t *extents, bool all, size_t npages_limit, size_t npages_decay_max, - bool is_background_thread) { - witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), - WITNESS_RANK_CORE, 1); - malloc_mutex_assert_owner(tsdn, &decay->mtx); - - if (decay->purging) { - return; - } - decay->purging = true; - malloc_mutex_unlock(tsdn, &decay->mtx); - - extent_hooks_t *extent_hooks = extent_hooks_get(arena); - - extent_list_t decay_extents; - extent_list_init(&decay_extents); - - size_t npurge = arena_stash_decayed(tsdn, arena, &extent_hooks, extents, - npages_limit, npages_decay_max, &decay_extents); - if (npurge != 0) { - size_t npurged = arena_decay_stashed(tsdn, arena, - &extent_hooks, decay, extents, all, &decay_extents, - is_background_thread); - assert(npurged == npurge); - } - - malloc_mutex_lock(tsdn, &decay->mtx); - decay->purging = false; +ssize_t +arena_decay_ms_get(arena_t *arena, extent_state_t state) { + return pa_decay_ms_get(&arena->pa_shard, state); } static bool -arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, - extents_t *extents, bool is_background_thread, bool all) { +arena_decay_impl(tsdn_t *tsdn, arena_t *arena, decay_t *decay, + pac_decay_stats_t *decay_stats, ecache_t *ecache, + bool is_background_thread, bool all) { if (all) { malloc_mutex_lock(tsdn, &decay->mtx); - arena_decay_to_limit(tsdn, arena, decay, extents, all, 0, - extents_npages_get(extents), is_background_thread); + pac_decay_all(tsdn, &arena->pa_shard.pac, decay, decay_stats, + ecache, /* fully_decay */ all); malloc_mutex_unlock(tsdn, &decay->mtx); - return false; } @@ -960,20 +434,20 @@ arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, /* No need to wait if another thread is in progress. */ return true; } - - bool epoch_advanced = arena_maybe_decay(tsdn, arena, decay, extents, - is_background_thread); + pac_purge_eagerness_t eagerness = + arena_decide_unforced_purge_eagerness(is_background_thread); + bool epoch_advanced = pac_maybe_decay_purge(tsdn, &arena->pa_shard.pac, + decay, decay_stats, ecache, eagerness); size_t npages_new; if (epoch_advanced) { /* Backlog is updated on epoch advance. */ - npages_new = decay->backlog[SMOOTHSTEP_NSTEPS-1]; + npages_new = decay_epoch_npages_delta(decay); } malloc_mutex_unlock(tsdn, &decay->mtx); if (have_background_thread && background_thread_enabled() && epoch_advanced && !is_background_thread) { - background_thread_interval_check(tsdn, arena, decay, - npages_new); + arena_maybe_do_deferred_work(tsdn, arena, decay, npages_new); } return false; @@ -982,53 +456,143 @@ arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) { - return arena_decay_impl(tsdn, arena, &arena->decay_dirty, - &arena->extents_dirty, is_background_thread, all); + return arena_decay_impl(tsdn, arena, &arena->pa_shard.pac.decay_dirty, + &arena->pa_shard.pac.stats->decay_dirty, + &arena->pa_shard.pac.ecache_dirty, is_background_thread, all); } static bool arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) { - return arena_decay_impl(tsdn, arena, &arena->decay_muzzy, - &arena->extents_muzzy, is_background_thread, all); + if (pa_shard_dont_decay_muzzy(&arena->pa_shard)) { + return false; + } + return arena_decay_impl(tsdn, arena, &arena->pa_shard.pac.decay_muzzy, + &arena->pa_shard.pac.stats->decay_muzzy, + &arena->pa_shard.pac.ecache_muzzy, is_background_thread, all); } void arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) { + if (all) { + /* + * We should take a purge of "all" to mean "save as much memory + * as possible", including flushing any caches (for situations + * like thread death, or manual purge calls). + */ + sec_flush(tsdn, &arena->pa_shard.hpa_sec); + } if (arena_decay_dirty(tsdn, arena, is_background_thread, all)) { return; } arena_decay_muzzy(tsdn, arena, is_background_thread, all); } +static bool +arena_should_decay_early(tsdn_t *tsdn, arena_t *arena, decay_t *decay, + background_thread_info_t *info, nstime_t *remaining_sleep, + size_t npages_new) { + malloc_mutex_assert_owner(tsdn, &info->mtx); + + if (malloc_mutex_trylock(tsdn, &decay->mtx)) { + return false; + } + + if (!decay_gradually(decay)) { + malloc_mutex_unlock(tsdn, &decay->mtx); + return false; + } + + nstime_init(remaining_sleep, background_thread_wakeup_time_get(info)); + if (nstime_compare(remaining_sleep, &decay->epoch) <= 0) { + malloc_mutex_unlock(tsdn, &decay->mtx); + return false; + } + nstime_subtract(remaining_sleep, &decay->epoch); + if (npages_new > 0) { + uint64_t npurge_new = decay_npages_purge_in(decay, + remaining_sleep, npages_new); + info->npages_to_purge_new += npurge_new; + } + malloc_mutex_unlock(tsdn, &decay->mtx); + return info->npages_to_purge_new > + ARENA_DEFERRED_PURGE_NPAGES_THRESHOLD; +} + +/* + * Check if deferred work needs to be done sooner than planned. + * For decay we might want to wake up earlier because of an influx of dirty + * pages. Rather than waiting for previously estimated time, we proactively + * purge those pages. + * If background thread sleeps indefinitely, always wake up because some + * deferred work has been generated. + */ static void -arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab) { - arena_nactive_sub(arena, extent_size_get(slab) >> LG_PAGE); +arena_maybe_do_deferred_work(tsdn_t *tsdn, arena_t *arena, decay_t *decay, + size_t npages_new) { + background_thread_info_t *info = arena_background_thread_info_get( + arena); + if (malloc_mutex_trylock(tsdn, &info->mtx)) { + /* + * Background thread may hold the mutex for a long period of + * time. We'd like to avoid the variance on application + * threads. So keep this non-blocking, and leave the work to a + * future epoch. + */ + return; + } + if (!background_thread_is_started(info)) { + goto label_done; + } + + nstime_t remaining_sleep; + if (background_thread_indefinite_sleep(info)) { + background_thread_wakeup_early(info, NULL); + } else if (arena_should_decay_early(tsdn, arena, decay, info, + &remaining_sleep, npages_new)) { + info->npages_to_purge_new = 0; + background_thread_wakeup_early(info, &remaining_sleep); + } +label_done: + malloc_mutex_unlock(tsdn, &info->mtx); +} - extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; - arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, slab); +/* Called from background threads. */ +void +arena_do_deferred_work(tsdn_t *tsdn, arena_t *arena) { + arena_decay(tsdn, arena, true, false); + pa_shard_do_deferred_work(tsdn, &arena->pa_shard); +} + +void +arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, edata_t *slab) { + bool deferred_work_generated = false; + pa_dalloc(tsdn, &arena->pa_shard, slab, &deferred_work_generated); + if (deferred_work_generated) { + arena_handle_deferred_work(tsdn, arena); + } } static void -arena_bin_slabs_nonfull_insert(bin_t *bin, extent_t *slab) { - assert(extent_nfree_get(slab) > 0); - extent_heap_insert(&bin->slabs_nonfull, slab); +arena_bin_slabs_nonfull_insert(bin_t *bin, edata_t *slab) { + assert(edata_nfree_get(slab) > 0); + edata_heap_insert(&bin->slabs_nonfull, slab); if (config_stats) { bin->stats.nonfull_slabs++; } } static void -arena_bin_slabs_nonfull_remove(bin_t *bin, extent_t *slab) { - extent_heap_remove(&bin->slabs_nonfull, slab); +arena_bin_slabs_nonfull_remove(bin_t *bin, edata_t *slab) { + edata_heap_remove(&bin->slabs_nonfull, slab); if (config_stats) { bin->stats.nonfull_slabs--; } } -static extent_t * +static edata_t * arena_bin_slabs_nonfull_tryget(bin_t *bin) { - extent_t *slab = extent_heap_remove_first(&bin->slabs_nonfull); + edata_t *slab = edata_heap_remove_first(&bin->slabs_nonfull); if (slab == NULL) { return NULL; } @@ -1040,30 +604,30 @@ arena_bin_slabs_nonfull_tryget(bin_t *bin) { } static void -arena_bin_slabs_full_insert(arena_t *arena, bin_t *bin, extent_t *slab) { - assert(extent_nfree_get(slab) == 0); +arena_bin_slabs_full_insert(arena_t *arena, bin_t *bin, edata_t *slab) { + assert(edata_nfree_get(slab) == 0); /* * Tracking extents is required by arena_reset, which is not allowed - * for auto arenas. Bypass this step to avoid touching the extent + * for auto arenas. Bypass this step to avoid touching the edata * linkage (often results in cache misses) for auto arenas. */ if (arena_is_auto(arena)) { return; } - extent_list_append(&bin->slabs_full, slab); + edata_list_active_append(&bin->slabs_full, slab); } static void -arena_bin_slabs_full_remove(arena_t *arena, bin_t *bin, extent_t *slab) { +arena_bin_slabs_full_remove(arena_t *arena, bin_t *bin, edata_t *slab) { if (arena_is_auto(arena)) { return; } - extent_list_remove(&bin->slabs_full, slab); + edata_list_active_remove(&bin->slabs_full, slab); } static void arena_bin_reset(tsd_t *tsd, arena_t *arena, bin_t *bin) { - extent_t *slab; + edata_t *slab; malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); if (bin->slabcur != NULL) { @@ -1073,13 +637,13 @@ arena_bin_reset(tsd_t *tsd, arena_t *arena, bin_t *bin) { arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); } - while ((slab = extent_heap_remove_first(&bin->slabs_nonfull)) != NULL) { + while ((slab = edata_heap_remove_first(&bin->slabs_nonfull)) != NULL) { malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); } - for (slab = extent_list_first(&bin->slabs_full); slab != NULL; - slab = extent_list_first(&bin->slabs_full)) { + for (slab = edata_list_active_first(&bin->slabs_full); slab != NULL; + slab = edata_list_active_first(&bin->slabs_full)) { arena_bin_slabs_full_remove(arena, bin, slab); malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); @@ -1111,16 +675,15 @@ arena_reset(tsd_t *tsd, arena_t *arena) { /* Large allocations. */ malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx); - for (extent_t *extent = extent_list_first(&arena->large); extent != - NULL; extent = extent_list_first(&arena->large)) { - void *ptr = extent_base_get(extent); + for (edata_t *edata = edata_list_active_first(&arena->large); + edata != NULL; edata = edata_list_active_first(&arena->large)) { + void *ptr = edata_base_get(edata); size_t usize; malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx); - alloc_ctx_t alloc_ctx; - rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); - rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, - (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); + emap_alloc_ctx_t alloc_ctx; + emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr, + &alloc_ctx); assert(alloc_ctx.szind != SC_NSIZES); if (config_stats || (config_prof && opt_prof)) { @@ -1131,7 +694,7 @@ arena_reset(tsd_t *tsd, arena_t *arena) { if (config_prof && opt_prof) { prof_free(tsd, ptr, usize, &alloc_ctx); } - large_dalloc(tsd_tsdn(tsd), extent); + large_dalloc(tsd_tsdn(tsd), edata); malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx); } malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx); @@ -1139,32 +702,95 @@ arena_reset(tsd_t *tsd, arena_t *arena) { /* Bins. */ for (unsigned i = 0; i < SC_NBINS; i++) { for (unsigned j = 0; j < bin_infos[i].n_shards; j++) { - arena_bin_reset(tsd, arena, - &arena->bins[i].bin_shards[j]); + arena_bin_reset(tsd, arena, arena_get_bin(arena, i, j)); } } + pa_shard_reset(tsd_tsdn(tsd), &arena->pa_shard); +} - atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED); +static void +arena_prepare_base_deletion_sync_finish(tsd_t *tsd, malloc_mutex_t **mutexes, + unsigned n_mtx) { + for (unsigned i = 0; i < n_mtx; i++) { + malloc_mutex_lock(tsd_tsdn(tsd), mutexes[i]); + malloc_mutex_unlock(tsd_tsdn(tsd), mutexes[i]); + } } +#define ARENA_DESTROY_MAX_DELAYED_MTX 32 static void -arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) { +arena_prepare_base_deletion_sync(tsd_t *tsd, malloc_mutex_t *mtx, + malloc_mutex_t **delayed_mtx, unsigned *n_delayed) { + if (!malloc_mutex_trylock(tsd_tsdn(tsd), mtx)) { + /* No contention. */ + malloc_mutex_unlock(tsd_tsdn(tsd), mtx); + return; + } + unsigned n = *n_delayed; + assert(n < ARENA_DESTROY_MAX_DELAYED_MTX); + /* Add another to the batch. */ + delayed_mtx[n++] = mtx; + + if (n == ARENA_DESTROY_MAX_DELAYED_MTX) { + arena_prepare_base_deletion_sync_finish(tsd, delayed_mtx, n); + n = 0; + } + *n_delayed = n; +} + +static void +arena_prepare_base_deletion(tsd_t *tsd, base_t *base_to_destroy) { /* - * Iterate over the retained extents and destroy them. This gives the - * extent allocator underlying the extent hooks an opportunity to unmap - * all retained memory without having to keep its own metadata - * structures. In practice, virtual memory for dss-allocated extents is - * leaked here, so best practice is to avoid dss for arenas to be - * destroyed, or provide custom extent hooks that track retained - * dss-based extents for later reuse. + * In order to coalesce, emap_try_acquire_edata_neighbor will attempt to + * check neighbor edata's state to determine eligibility. This means + * under certain conditions, the metadata from an arena can be accessed + * w/o holding any locks from that arena. In order to guarantee safe + * memory access, the metadata and the underlying base allocator needs + * to be kept alive, until all pending accesses are done. + * + * 1) with opt_retain, the arena boundary implies the is_head state + * (tracked in the rtree leaf), and the coalesce flow will stop at the + * head state branch. Therefore no cross arena metadata access + * possible. + * + * 2) w/o opt_retain, the arena id needs to be read from the edata_t, + * meaning read only cross-arena metadata access is possible. The + * coalesce attempt will stop at the arena_id mismatch, and is always + * under one of the ecache locks. To allow safe passthrough of such + * metadata accesses, the loop below will iterate through all manual + * arenas' ecache locks. As all the metadata from this base allocator + * have been unlinked from the rtree, after going through all the + * relevant ecache locks, it's safe to say that a) pending accesses are + * all finished, and b) no new access will be generated. */ - extent_hooks_t *extent_hooks = extent_hooks_get(arena); - extent_t *extent; - while ((extent = extents_evict(tsdn, arena, &extent_hooks, - &arena->extents_retained, 0)) != NULL) { - extent_destroy_wrapper(tsdn, arena, &extent_hooks, extent); + if (opt_retain) { + return; + } + unsigned destroy_ind = base_ind_get(base_to_destroy); + assert(destroy_ind >= manual_arena_base); + + tsdn_t *tsdn = tsd_tsdn(tsd); + malloc_mutex_t *delayed_mtx[ARENA_DESTROY_MAX_DELAYED_MTX]; + unsigned n_delayed = 0, total = narenas_total_get(); + for (unsigned i = 0; i < total; i++) { + if (i == destroy_ind) { + continue; + } + arena_t *arena = arena_get(tsdn, i, false); + if (arena == NULL) { + continue; + } + pac_t *pac = &arena->pa_shard.pac; + arena_prepare_base_deletion_sync(tsd, &pac->ecache_dirty.mtx, + delayed_mtx, &n_delayed); + arena_prepare_base_deletion_sync(tsd, &pac->ecache_muzzy.mtx, + delayed_mtx, &n_delayed); + arena_prepare_base_deletion_sync(tsd, &pac->ecache_retained.mtx, + delayed_mtx, &n_delayed); } + arena_prepare_base_deletion_sync_finish(tsd, delayed_mtx, n_delayed); } +#undef ARENA_DESTROY_MAX_DELAYED_MTX void arena_destroy(tsd_t *tsd, arena_t *arena) { @@ -1175,13 +801,10 @@ arena_destroy(tsd_t *tsd, arena_t *arena) { /* * No allocations have occurred since arena_reset() was called. * Furthermore, the caller (arena_i_destroy_ctl()) purged all cached - * extents, so only retained extents may remain. + * extents, so only retained extents may remain and it's safe to call + * pa_shard_destroy_retained. */ - assert(extents_npages_get(&arena->extents_dirty) == 0); - assert(extents_npages_get(&arena->extents_muzzy) == 0); - - /* Deallocate retained memory. */ - arena_destroy_retained(tsd_tsdn(tsd), arena); + pa_shard_destroy(tsd_tsdn(tsd), &arena->pa_shard); /* * Remove the arena pointer from the arenas array. We rely on the fact @@ -1197,316 +820,370 @@ arena_destroy(tsd_t *tsd, arena_t *arena) { /* * Destroy the base allocator, which manages all metadata ever mapped by - * this arena. + * this arena. The prepare function will make sure no pending access to + * the metadata in this base anymore. */ + arena_prepare_base_deletion(tsd, arena->base); base_delete(tsd_tsdn(tsd), arena->base); } -static extent_t * -arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, const bin_info_t *bin_info, - szind_t szind) { - extent_t *slab; - bool zero, commit; - - witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), - WITNESS_RANK_CORE, 0); - - zero = false; - commit = true; - slab = extent_alloc_wrapper(tsdn, arena, r_extent_hooks, NULL, - bin_info->slab_size, 0, PAGE, true, szind, &zero, &commit); - - if (config_stats && slab != NULL) { - arena_stats_mapped_add(tsdn, &arena->stats, - bin_info->slab_size); - } - - return slab; -} - -static extent_t * +static edata_t * arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned binshard, const bin_info_t *bin_info) { + bool deferred_work_generated = false; witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); - extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; - szind_t szind = sz_size2index(bin_info->reg_size); - bool zero = false; - bool commit = true; - extent_t *slab = extents_alloc(tsdn, arena, &extent_hooks, - &arena->extents_dirty, NULL, bin_info->slab_size, 0, PAGE, true, - binind, &zero, &commit); - if (slab == NULL && arena_may_have_muzzy(arena)) { - slab = extents_alloc(tsdn, arena, &extent_hooks, - &arena->extents_muzzy, NULL, bin_info->slab_size, 0, PAGE, - true, binind, &zero, &commit); + bool guarded = san_slab_extent_decide_guard(tsdn, + arena_get_ehooks(arena)); + edata_t *slab = pa_alloc(tsdn, &arena->pa_shard, bin_info->slab_size, + /* alignment */ PAGE, /* slab */ true, /* szind */ binind, + /* zero */ false, guarded, &deferred_work_generated); + + if (deferred_work_generated) { + arena_handle_deferred_work(tsdn, arena); } + if (slab == NULL) { - slab = arena_slab_alloc_hard(tsdn, arena, &extent_hooks, - bin_info, szind); - if (slab == NULL) { - return NULL; - } + return NULL; } - assert(extent_slab_get(slab)); + assert(edata_slab_get(slab)); /* Initialize slab internals. */ - arena_slab_data_t *slab_data = extent_slab_data_get(slab); - extent_nfree_binshard_set(slab, bin_info->nregs, binshard); + slab_data_t *slab_data = edata_slab_data_get(slab); + edata_nfree_binshard_set(slab, bin_info->nregs, binshard); bitmap_init(slab_data->bitmap, &bin_info->bitmap_info, false); - arena_nactive_add(arena, extent_size_get(slab) >> LG_PAGE); - return slab; } -static extent_t * -arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, bin_t *bin, - szind_t binind, unsigned binshard) { - extent_t *slab; - const bin_info_t *bin_info; - - /* Look for a usable slab. */ - slab = arena_bin_slabs_nonfull_tryget(bin); - if (slab != NULL) { - return slab; - } - /* No existing slabs have any space available. */ - - bin_info = &bin_infos[binind]; - - /* Allocate a new slab. */ - malloc_mutex_unlock(tsdn, &bin->lock); - /******************************/ - slab = arena_slab_alloc(tsdn, arena, binind, binshard, bin_info); - /********************************/ - malloc_mutex_lock(tsdn, &bin->lock); - if (slab != NULL) { - if (config_stats) { - bin->stats.nslabs++; - bin->stats.curslabs++; - } - return slab; +/* + * Before attempting the _with_fresh_slab approaches below, the _no_fresh_slab + * variants (i.e. through slabcur and nonfull) must be tried first. + */ +static void +arena_bin_refill_slabcur_with_fresh_slab(tsdn_t *tsdn, arena_t *arena, + bin_t *bin, szind_t binind, edata_t *fresh_slab) { + malloc_mutex_assert_owner(tsdn, &bin->lock); + /* Only called after slabcur and nonfull both failed. */ + assert(bin->slabcur == NULL); + assert(edata_heap_first(&bin->slabs_nonfull) == NULL); + assert(fresh_slab != NULL); + + /* A new slab from arena_slab_alloc() */ + assert(edata_nfree_get(fresh_slab) == bin_infos[binind].nregs); + if (config_stats) { + bin->stats.nslabs++; + bin->stats.curslabs++; } + bin->slabcur = fresh_slab; +} - /* - * arena_slab_alloc() failed, but another thread may have made - * sufficient memory available while this one dropped bin->lock above, - * so search one more time. - */ - slab = arena_bin_slabs_nonfull_tryget(bin); - if (slab != NULL) { - return slab; - } +/* Refill slabcur and then alloc using the fresh slab */ +static void * +arena_bin_malloc_with_fresh_slab(tsdn_t *tsdn, arena_t *arena, bin_t *bin, + szind_t binind, edata_t *fresh_slab) { + malloc_mutex_assert_owner(tsdn, &bin->lock); + arena_bin_refill_slabcur_with_fresh_slab(tsdn, arena, bin, binind, + fresh_slab); - return NULL; + return arena_slab_reg_alloc(bin->slabcur, &bin_infos[binind]); } -/* Re-fill bin->slabcur, then call arena_slab_reg_alloc(). */ -static void * -arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin, - szind_t binind, unsigned binshard) { - const bin_info_t *bin_info; - extent_t *slab; +static bool +arena_bin_refill_slabcur_no_fresh_slab(tsdn_t *tsdn, arena_t *arena, + bin_t *bin) { + malloc_mutex_assert_owner(tsdn, &bin->lock); + /* Only called after arena_slab_reg_alloc[_batch] failed. */ + assert(bin->slabcur == NULL || edata_nfree_get(bin->slabcur) == 0); - bin_info = &bin_infos[binind]; - if (!arena_is_auto(arena) && bin->slabcur != NULL) { - arena_bin_slabs_full_insert(arena, bin, bin->slabcur); - bin->slabcur = NULL; - } - slab = arena_bin_nonfull_slab_get(tsdn, arena, bin, binind, binshard); if (bin->slabcur != NULL) { - /* - * Another thread updated slabcur while this one ran without the - * bin lock in arena_bin_nonfull_slab_get(). - */ - if (extent_nfree_get(bin->slabcur) > 0) { - void *ret = arena_slab_reg_alloc(bin->slabcur, - bin_info); - if (slab != NULL) { - /* - * arena_slab_alloc() may have allocated slab, - * or it may have been pulled from - * slabs_nonfull. Therefore it is unsafe to - * make any assumptions about how slab has - * previously been used, and - * arena_bin_lower_slab() must be called, as if - * a region were just deallocated from the slab. - */ - if (extent_nfree_get(slab) == bin_info->nregs) { - arena_dalloc_bin_slab(tsdn, arena, slab, - bin); - } else { - arena_bin_lower_slab(tsdn, arena, slab, - bin); - } - } - return ret; - } - arena_bin_slabs_full_insert(arena, bin, bin->slabcur); - bin->slabcur = NULL; - } - - if (slab == NULL) { - return NULL; } - bin->slabcur = slab; - assert(extent_nfree_get(bin->slabcur) > 0); + /* Look for a usable slab. */ + bin->slabcur = arena_bin_slabs_nonfull_tryget(bin); + assert(bin->slabcur == NULL || edata_nfree_get(bin->slabcur) > 0); - return arena_slab_reg_alloc(slab, bin_info); + return (bin->slabcur == NULL); } -/* Choose a bin shard and return the locked bin. */ bin_t * -arena_bin_choose_lock(tsdn_t *tsdn, arena_t *arena, szind_t binind, - unsigned *binshard) { - bin_t *bin; +arena_bin_choose(tsdn_t *tsdn, arena_t *arena, szind_t binind, + unsigned *binshard_p) { + unsigned binshard; if (tsdn_null(tsdn) || tsd_arena_get(tsdn_tsd(tsdn)) == NULL) { - *binshard = 0; + binshard = 0; } else { - *binshard = tsd_binshardsp_get(tsdn_tsd(tsdn))->binshard[binind]; + binshard = tsd_binshardsp_get(tsdn_tsd(tsdn))->binshard[binind]; } - assert(*binshard < bin_infos[binind].n_shards); - bin = &arena->bins[binind].bin_shards[*binshard]; - malloc_mutex_lock(tsdn, &bin->lock); - - return bin; + assert(binshard < bin_infos[binind].n_shards); + if (binshard_p != NULL) { + *binshard_p = binshard; + } + return arena_get_bin(arena, binind, binshard); } void -arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, - cache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes) { - unsigned i, nfill, cnt; +arena_cache_bin_fill_small(tsdn_t *tsdn, arena_t *arena, + cache_bin_t *cache_bin, cache_bin_info_t *cache_bin_info, szind_t binind, + const unsigned nfill) { + assert(cache_bin_ncached_get_local(cache_bin, cache_bin_info) == 0); + + const bin_info_t *bin_info = &bin_infos[binind]; + + CACHE_BIN_PTR_ARRAY_DECLARE(ptrs, nfill); + cache_bin_init_ptr_array_for_fill(cache_bin, cache_bin_info, &ptrs, + nfill); + /* + * Bin-local resources are used first: 1) bin->slabcur, and 2) nonfull + * slabs. After both are exhausted, new slabs will be allocated through + * arena_slab_alloc(). + * + * Bin lock is only taken / released right before / after the while(...) + * refill loop, with new slab allocation (which has its own locking) + * kept outside of the loop. This setup facilitates flat combining, at + * the cost of the nested loop (through goto label_refill). + * + * To optimize for cases with contention and limited resources + * (e.g. hugepage-backed or non-overcommit arenas), each fill-iteration + * gets one chance of slab_alloc, and a retry of bin local resources + * after the slab allocation (regardless if slab_alloc failed, because + * the bin lock is dropped during the slab allocation). + * + * In other words, new slab allocation is allowed, as long as there was + * progress since the previous slab_alloc. This is tracked with + * made_progress below, initialized to true to jump start the first + * iteration. + * + * In other words (again), the loop will only terminate early (i.e. stop + * with filled < nfill) after going through the three steps: a) bin + * local exhausted, b) unlock and slab_alloc returns null, c) re-lock + * and bin local fails again. + */ + bool made_progress = true; + edata_t *fresh_slab = NULL; + bool alloc_and_retry = false; + unsigned filled = 0; + unsigned binshard; + bin_t *bin = arena_bin_choose(tsdn, arena, binind, &binshard); + +label_refill: + malloc_mutex_lock(tsdn, &bin->lock); + + while (filled < nfill) { + /* Try batch-fill from slabcur first. */ + edata_t *slabcur = bin->slabcur; + if (slabcur != NULL && edata_nfree_get(slabcur) > 0) { + unsigned tofill = nfill - filled; + unsigned nfree = edata_nfree_get(slabcur); + unsigned cnt = tofill < nfree ? tofill : nfree; + + arena_slab_reg_alloc_batch(slabcur, bin_info, cnt, + &ptrs.ptr[filled]); + made_progress = true; + filled += cnt; + continue; + } + /* Next try refilling slabcur from nonfull slabs. */ + if (!arena_bin_refill_slabcur_no_fresh_slab(tsdn, arena, bin)) { + assert(bin->slabcur != NULL); + continue; + } + + /* Then see if a new slab was reserved already. */ + if (fresh_slab != NULL) { + arena_bin_refill_slabcur_with_fresh_slab(tsdn, arena, + bin, binind, fresh_slab); + assert(bin->slabcur != NULL); + fresh_slab = NULL; + continue; + } + + /* Try slab_alloc if made progress (or never did slab_alloc). */ + if (made_progress) { + assert(bin->slabcur == NULL); + assert(fresh_slab == NULL); + alloc_and_retry = true; + /* Alloc a new slab then come back. */ + break; + } + + /* OOM. */ + + assert(fresh_slab == NULL); + assert(!alloc_and_retry); + break; + } /* while (filled < nfill) loop. */ + + if (config_stats && !alloc_and_retry) { + bin->stats.nmalloc += filled; + bin->stats.nrequests += cache_bin->tstats.nrequests; + bin->stats.curregs += filled; + bin->stats.nfills++; + cache_bin->tstats.nrequests = 0; + } + + malloc_mutex_unlock(tsdn, &bin->lock); - assert(tbin->ncached == 0); + if (alloc_and_retry) { + assert(fresh_slab == NULL); + assert(filled < nfill); + assert(made_progress); - if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes)) { - prof_idump(tsdn); + fresh_slab = arena_slab_alloc(tsdn, arena, binind, binshard, + bin_info); + /* fresh_slab NULL case handled in the for loop. */ + + alloc_and_retry = false; + made_progress = false; + goto label_refill; + } + assert(filled == nfill || (fresh_slab == NULL && !made_progress)); + + /* Release if allocated but not used. */ + if (fresh_slab != NULL) { + assert(edata_nfree_get(fresh_slab) == bin_info->nregs); + arena_slab_dalloc(tsdn, arena, fresh_slab); + fresh_slab = NULL; } + cache_bin_finish_fill(cache_bin, cache_bin_info, &ptrs, filled); + arena_decay_tick(tsdn, arena); +} + +size_t +arena_fill_small_fresh(tsdn_t *tsdn, arena_t *arena, szind_t binind, + void **ptrs, size_t nfill, bool zero) { + assert(binind < SC_NBINS); + const bin_info_t *bin_info = &bin_infos[binind]; + const size_t nregs = bin_info->nregs; + assert(nregs > 0); + const size_t usize = bin_info->reg_size; + + const bool manual_arena = !arena_is_auto(arena); unsigned binshard; - bin_t *bin = arena_bin_choose_lock(tsdn, arena, binind, &binshard); - - for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> - tcache->lg_fill_div[binind]); i < nfill; i += cnt) { - extent_t *slab; - if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > - 0) { - unsigned tofill = nfill - i; - cnt = tofill < extent_nfree_get(slab) ? - tofill : extent_nfree_get(slab); - arena_slab_reg_alloc_batch( - slab, &bin_infos[binind], cnt, - tbin->avail - nfill + i); - } else { - cnt = 1; - void *ptr = arena_bin_malloc_hard(tsdn, arena, bin, - binind, binshard); - /* - * OOM. tbin->avail isn't yet filled down to its first - * element, so the successful allocations (if any) must - * be moved just before tbin->avail before bailing out. - */ - if (ptr == NULL) { - if (i > 0) { - memmove(tbin->avail - i, - tbin->avail - nfill, - i * sizeof(void *)); - } - break; - } - /* Insert such that low regions get used first. */ - *(tbin->avail - nfill + i) = ptr; + bin_t *bin = arena_bin_choose(tsdn, arena, binind, &binshard); + + size_t nslab = 0; + size_t filled = 0; + edata_t *slab = NULL; + edata_list_active_t fulls; + edata_list_active_init(&fulls); + + while (filled < nfill && (slab = arena_slab_alloc(tsdn, arena, binind, + binshard, bin_info)) != NULL) { + assert((size_t)edata_nfree_get(slab) == nregs); + ++nslab; + size_t batch = nfill - filled; + if (batch > nregs) { + batch = nregs; } - if (config_fill && unlikely(opt_junk_alloc)) { - for (unsigned j = 0; j < cnt; j++) { - void* ptr = *(tbin->avail - nfill + i + j); - arena_alloc_junk_small(ptr, &bin_infos[binind], - true); + assert(batch > 0); + arena_slab_reg_alloc_batch(slab, bin_info, (unsigned)batch, + &ptrs[filled]); + assert(edata_addr_get(slab) == ptrs[filled]); + if (zero) { + memset(ptrs[filled], 0, batch * usize); + } + filled += batch; + if (batch == nregs) { + if (manual_arena) { + edata_list_active_append(&fulls, slab); } + slab = NULL; } } + + malloc_mutex_lock(tsdn, &bin->lock); + /* + * Only the last slab can be non-empty, and the last slab is non-empty + * iff slab != NULL. + */ + if (slab != NULL) { + arena_bin_lower_slab(tsdn, arena, slab, bin); + } + if (manual_arena) { + edata_list_active_concat(&bin->slabs_full, &fulls); + } + assert(edata_list_active_empty(&fulls)); if (config_stats) { - bin->stats.nmalloc += i; - bin->stats.nrequests += tbin->tstats.nrequests; - bin->stats.curregs += i; - bin->stats.nfills++; - tbin->tstats.nrequests = 0; + bin->stats.nslabs += nslab; + bin->stats.curslabs += nslab; + bin->stats.nmalloc += filled; + bin->stats.nrequests += filled; + bin->stats.curregs += filled; } malloc_mutex_unlock(tsdn, &bin->lock); - tbin->ncached = i; + arena_decay_tick(tsdn, arena); + return filled; } -void -arena_alloc_junk_small(void *ptr, const bin_info_t *bin_info, bool zero) { - if (!zero) { - memset(ptr, JEMALLOC_ALLOC_JUNK, bin_info->reg_size); +/* + * Without allocating a new slab, try arena_slab_reg_alloc() and re-fill + * bin->slabcur if necessary. + */ +static void * +arena_bin_malloc_no_fresh_slab(tsdn_t *tsdn, arena_t *arena, bin_t *bin, + szind_t binind) { + malloc_mutex_assert_owner(tsdn, &bin->lock); + if (bin->slabcur == NULL || edata_nfree_get(bin->slabcur) == 0) { + if (arena_bin_refill_slabcur_no_fresh_slab(tsdn, arena, bin)) { + return NULL; + } } -} -static void -arena_dalloc_junk_small_impl(void *ptr, const bin_info_t *bin_info) { - memset(ptr, JEMALLOC_FREE_JUNK, bin_info->reg_size); + assert(bin->slabcur != NULL && edata_nfree_get(bin->slabcur) > 0); + return arena_slab_reg_alloc(bin->slabcur, &bin_infos[binind]); } -arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small = - arena_dalloc_junk_small_impl; static void * arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) { - void *ret; - bin_t *bin; - size_t usize; - extent_t *slab; - assert(binind < SC_NBINS); - usize = sz_index2size(binind); + const bin_info_t *bin_info = &bin_infos[binind]; + size_t usize = sz_index2size(binind); unsigned binshard; - bin = arena_bin_choose_lock(tsdn, arena, binind, &binshard); - - if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 0) { - ret = arena_slab_reg_alloc(slab, &bin_infos[binind]); - } else { - ret = arena_bin_malloc_hard(tsdn, arena, bin, binind, binshard); - } + bin_t *bin = arena_bin_choose(tsdn, arena, binind, &binshard); + malloc_mutex_lock(tsdn, &bin->lock); + edata_t *fresh_slab = NULL; + void *ret = arena_bin_malloc_no_fresh_slab(tsdn, arena, bin, binind); if (ret == NULL) { malloc_mutex_unlock(tsdn, &bin->lock); - return NULL; + /******************************/ + fresh_slab = arena_slab_alloc(tsdn, arena, binind, binshard, + bin_info); + /********************************/ + malloc_mutex_lock(tsdn, &bin->lock); + /* Retry since the lock was dropped. */ + ret = arena_bin_malloc_no_fresh_slab(tsdn, arena, bin, binind); + if (ret == NULL) { + if (fresh_slab == NULL) { + /* OOM */ + malloc_mutex_unlock(tsdn, &bin->lock); + return NULL; + } + ret = arena_bin_malloc_with_fresh_slab(tsdn, arena, bin, + binind, fresh_slab); + fresh_slab = NULL; + } } - if (config_stats) { bin->stats.nmalloc++; bin->stats.nrequests++; bin->stats.curregs++; } malloc_mutex_unlock(tsdn, &bin->lock); - if (config_prof && arena_prof_accum(tsdn, arena, usize)) { - prof_idump(tsdn); - } - if (!zero) { - if (config_fill) { - if (unlikely(opt_junk_alloc)) { - arena_alloc_junk_small(ret, - &bin_infos[binind], false); - } else if (unlikely(opt_zero)) { - memset(ret, 0, usize); - } - } - } else { - if (config_fill && unlikely(opt_junk_alloc)) { - arena_alloc_junk_small(ret, &bin_infos[binind], - true); - } + if (fresh_slab != NULL) { + arena_slab_dalloc(tsdn, arena, fresh_slab); + } + if (zero) { memset(ret, 0, usize); } - arena_decay_tick(tsdn, arena); + return ret; } @@ -1533,10 +1210,17 @@ arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero, tcache_t *tcache) { void *ret; - if (usize <= SC_SMALL_MAXCLASS - && (alignment < PAGE - || (alignment == PAGE && (usize & PAGE_MASK) == 0))) { + if (usize <= SC_SMALL_MAXCLASS) { /* Small; alignment doesn't require special slab placement. */ + + /* usize should be a result of sz_sa2u() */ + assert((usize & (alignment - 1)) == 0); + + /* + * Small usize can't come from an alignment larger than a page. + */ + assert(alignment <= PAGE); + ret = arena_malloc(tsdn, arena, usize, sz_size2index(usize), zero, tcache, true); } else { @@ -1560,33 +1244,22 @@ arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize) { safety_check_set_redzone(ptr, usize, SC_LARGE_MINCLASS); } - rtree_ctx_t rtree_ctx_fallback; - rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); - - extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, rtree_ctx, - (uintptr_t)ptr, true); - arena_t *arena = extent_arena_get(extent); + edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr); szind_t szind = sz_size2index(usize); - extent_szind_set(extent, szind); - rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, - szind, false); - - prof_accum_cancel(tsdn, &arena->prof_accum, usize); + edata_szind_set(edata, szind); + emap_remap(tsdn, &arena_emap_global, edata, szind, /* slab */ false); assert(isalloc(tsdn, ptr) == usize); } static size_t -arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr) { +arena_prof_demote(tsdn_t *tsdn, edata_t *edata, const void *ptr) { cassert(config_prof); assert(ptr != NULL); - extent_szind_set(extent, SC_NBINS); - rtree_ctx_t rtree_ctx_fallback; - rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); - rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, - SC_NBINS, false); + edata_szind_set(edata, SC_NBINS); + emap_remap(tsdn, &arena_emap_global, edata, SC_NBINS, /* slab */ false); assert(isalloc(tsdn, ptr) == SC_LARGE_MINCLASS); @@ -1599,9 +1272,9 @@ arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache, cassert(config_prof); assert(opt_prof); - extent_t *extent = iealloc(tsdn, ptr); - size_t usize = extent_usize_get(extent); - size_t bumped_usize = arena_prof_demote(tsdn, extent, ptr); + edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr); + size_t usize = edata_usize_get(edata); + size_t bumped_usize = arena_prof_demote(tsdn, edata, ptr); if (config_opt_safety_checks && usize < SC_LARGE_MINCLASS) { /* * Currently, we only do redzoning for small sampled @@ -1614,17 +1287,17 @@ arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache, tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, sz_size2index(bumped_usize), slow_path); } else { - large_dalloc(tsdn, extent); + large_dalloc(tsdn, edata); } } static void -arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, bin_t *bin) { +arena_dissociate_bin_slab(arena_t *arena, edata_t *slab, bin_t *bin) { /* Dissociate slab from bin. */ if (slab == bin->slabcur) { bin->slabcur = NULL; } else { - szind_t binind = extent_szind_get(slab); + szind_t binind = edata_szind_get(slab); const bin_info_t *bin_info = &bin_infos[binind]; /* @@ -1641,24 +1314,9 @@ arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, bin_t *bin) { } static void -arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, - bin_t *bin) { - assert(slab != bin->slabcur); - - malloc_mutex_unlock(tsdn, &bin->lock); - /******************************/ - arena_slab_dalloc(tsdn, arena, slab); - /****************************/ - malloc_mutex_lock(tsdn, &bin->lock); - if (config_stats) { - bin->stats.curslabs--; - } -} - -static void -arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, +arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, edata_t *slab, bin_t *bin) { - assert(extent_nfree_get(slab) > 0); + assert(edata_nfree_get(slab) > 0); /* * Make sure that if bin->slabcur is non-NULL, it refers to the @@ -1666,9 +1324,9 @@ arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, * than proactively keeping it pointing at the oldest/lowest non-full * slab. */ - if (bin->slabcur != NULL && extent_snad_comp(bin->slabcur, slab) > 0) { + if (bin->slabcur != NULL && edata_snad_comp(bin->slabcur, slab) > 0) { /* Switch slabcur. */ - if (extent_nfree_get(bin->slabcur) > 0) { + if (edata_nfree_get(bin->slabcur) > 0) { arena_bin_slabs_nonfull_insert(bin, bin->slabcur); } else { arena_bin_slabs_full_insert(arena, bin, bin->slabcur); @@ -1683,56 +1341,54 @@ arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, } static void -arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, bin_t *bin, - szind_t binind, extent_t *slab, void *ptr, bool junked) { - arena_slab_data_t *slab_data = extent_slab_data_get(slab); - const bin_info_t *bin_info = &bin_infos[binind]; - - if (!junked && config_fill && unlikely(opt_junk_free)) { - arena_dalloc_junk_small(ptr, bin_info); - } - - arena_slab_reg_dalloc(slab, slab_data, ptr); - unsigned nfree = extent_nfree_get(slab); - if (nfree == bin_info->nregs) { - arena_dissociate_bin_slab(arena, slab, bin); - arena_dalloc_bin_slab(tsdn, arena, slab, bin); - } else if (nfree == 1 && slab != bin->slabcur) { - arena_bin_slabs_full_remove(arena, bin, slab); - arena_bin_lower_slab(tsdn, arena, slab, bin); - } +arena_dalloc_bin_slab_prepare(tsdn_t *tsdn, edata_t *slab, bin_t *bin) { + malloc_mutex_assert_owner(tsdn, &bin->lock); + assert(slab != bin->slabcur); if (config_stats) { - bin->stats.ndalloc++; - bin->stats.curregs--; + bin->stats.curslabs--; } } void -arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, bin_t *bin, - szind_t binind, extent_t *extent, void *ptr) { - arena_dalloc_bin_locked_impl(tsdn, arena, bin, binind, extent, ptr, - true); +arena_dalloc_bin_locked_handle_newly_empty(tsdn_t *tsdn, arena_t *arena, + edata_t *slab, bin_t *bin) { + arena_dissociate_bin_slab(arena, slab, bin); + arena_dalloc_bin_slab_prepare(tsdn, slab, bin); +} + +void +arena_dalloc_bin_locked_handle_newly_nonempty(tsdn_t *tsdn, arena_t *arena, + edata_t *slab, bin_t *bin) { + arena_bin_slabs_full_remove(arena, bin, slab); + arena_bin_lower_slab(tsdn, arena, slab, bin); } static void -arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) { - szind_t binind = extent_szind_get(extent); - unsigned binshard = extent_binshard_get(extent); - bin_t *bin = &arena->bins[binind].bin_shards[binshard]; +arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, edata_t *edata, void *ptr) { + szind_t binind = edata_szind_get(edata); + unsigned binshard = edata_binshard_get(edata); + bin_t *bin = arena_get_bin(arena, binind, binshard); malloc_mutex_lock(tsdn, &bin->lock); - arena_dalloc_bin_locked_impl(tsdn, arena, bin, binind, extent, ptr, - false); + arena_dalloc_bin_locked_info_t info; + arena_dalloc_bin_locked_begin(&info, binind); + bool ret = arena_dalloc_bin_locked_step(tsdn, arena, bin, + &info, binind, edata, ptr); + arena_dalloc_bin_locked_finish(tsdn, arena, bin, &info); malloc_mutex_unlock(tsdn, &bin->lock); + + if (ret) { + arena_slab_dalloc(tsdn, arena, edata); + } } void arena_dalloc_small(tsdn_t *tsdn, void *ptr) { - extent_t *extent = iealloc(tsdn, ptr); - arena_t *arena = extent_arena_get(extent); + edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr); + arena_t *arena = arena_get_from_edata(edata); - arena_dalloc_bin(tsdn, arena, extent, ptr); + arena_dalloc_bin(tsdn, arena, edata, ptr); arena_decay_tick(tsdn, arena); } @@ -1743,7 +1399,7 @@ arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, /* Calls with non-zero extra had to clamp extra. */ assert(extra == 0 || size + extra <= SC_LARGE_MAXCLASS); - extent_t *extent = iealloc(tsdn, ptr); + edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr); if (unlikely(size > SC_LARGE_MAXCLASS)) { ret = true; goto done; @@ -1766,18 +1422,19 @@ arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, goto done; } - arena_decay_tick(tsdn, extent_arena_get(extent)); + arena_t *arena = arena_get_from_edata(edata); + arena_decay_tick(tsdn, arena); ret = false; } else if (oldsize >= SC_LARGE_MINCLASS && usize_max >= SC_LARGE_MINCLASS) { - ret = large_ralloc_no_move(tsdn, extent, usize_min, usize_max, + ret = large_ralloc_no_move(tsdn, edata, usize_min, usize_max, zero); } else { ret = true; } done: - assert(extent == iealloc(tsdn, ptr)); - *newsize = extent_usize_get(extent); + assert(edata == emap_edata_lookup(tsdn, &arena_emap_global, ptr)); + *newsize = edata_usize_get(edata); return ret; } @@ -1800,7 +1457,7 @@ void * arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize, size_t size, size_t alignment, bool zero, tcache_t *tcache, hook_ralloc_args_t *hook_args) { - size_t usize = sz_s2u(size); + size_t usize = alignment == 0 ? sz_s2u(size) : sz_sa2u(size, alignment); if (unlikely(usize == 0 || size > SC_LARGE_MAXCLASS)) { return NULL; } @@ -1850,6 +1507,29 @@ arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize, return ret; } +ehooks_t * +arena_get_ehooks(arena_t *arena) { + return base_ehooks_get(arena->base); +} + +extent_hooks_t * +arena_set_extent_hooks(tsd_t *tsd, arena_t *arena, + extent_hooks_t *extent_hooks) { + background_thread_info_t *info; + if (have_background_thread) { + info = arena_background_thread_info_get(arena); + malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); + } + /* No using the HPA now that we have the custom hooks. */ + pa_shard_disable_hpa(tsd_tsdn(tsd), &arena->pa_shard); + extent_hooks_t *ret = base_extent_hooks_set(arena->base, extent_hooks); + if (have_background_thread) { + malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); + } + + return ret; +} + dss_prec_t arena_dss_prec_get(arena_t *arena) { return (dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_ACQUIRE); @@ -1871,7 +1551,7 @@ arena_dirty_decay_ms_default_get(void) { bool arena_dirty_decay_ms_default_set(ssize_t decay_ms) { - if (!arena_decay_ms_valid(decay_ms)) { + if (!decay_ms_valid(decay_ms)) { return true; } atomic_store_zd(&dirty_decay_ms_default, decay_ms, ATOMIC_RELAXED); @@ -1885,7 +1565,7 @@ arena_muzzy_decay_ms_default_get(void) { bool arena_muzzy_decay_ms_default_set(ssize_t decay_ms) { - if (!arena_decay_ms_valid(decay_ms)) { + if (!decay_ms_valid(decay_ms)) { return true; } atomic_store_zd(&muzzy_decay_ms_default, decay_ms, ATOMIC_RELAXED); @@ -1896,26 +1576,8 @@ bool arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena, size_t *old_limit, size_t *new_limit) { assert(opt_retain); - - pszind_t new_ind JEMALLOC_CC_SILENCE_INIT(0); - if (new_limit != NULL) { - size_t limit = *new_limit; - /* Grow no more than the new limit. */ - if ((new_ind = sz_psz2ind(limit + 1) - 1) >= SC_NPSIZES) { - return true; - } - } - - malloc_mutex_lock(tsd_tsdn(tsd), &arena->extent_grow_mtx); - if (old_limit != NULL) { - *old_limit = sz_pind2sz(arena->retain_grow_limit); - } - if (new_limit != NULL) { - arena->retain_grow_limit = new_ind; - } - malloc_mutex_unlock(tsd_tsdn(tsd), &arena->extent_grow_mtx); - - return false; + return pac_retain_grow_limit_get_set(tsd_tsdn(tsd), + &arena->pa_shard.pac, old_limit, new_limit); } unsigned @@ -1933,13 +1595,8 @@ arena_nthreads_dec(arena_t *arena, bool internal) { atomic_fetch_sub_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED); } -size_t -arena_extent_sn_next(arena_t *arena) { - return atomic_fetch_add_zu(&arena->extent_sn_next, 1, ATOMIC_RELAXED); -} - arena_t * -arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { +arena_new(tsdn_t *tsdn, unsigned ind, const arena_config_t *config) { arena_t *arena; base_t *base; unsigned i; @@ -1947,16 +1604,13 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { if (ind == 0) { base = b0get(); } else { - base = base_new(tsdn, ind, extent_hooks); + base = base_new(tsdn, ind, config->extent_hooks, + config->metadata_use_hooks); if (base == NULL) { return NULL; } } - unsigned nbins_total = 0; - for (i = 0; i < SC_NBINS; i++) { - nbins_total += bin_infos[i].n_shards; - } size_t arena_size = sizeof(arena_t) + sizeof(bin_t) * nbins_total; arena = (arena_t *)base_alloc(tsdn, base, arena_size, CACHELINE); if (arena == NULL) { @@ -1980,110 +1634,56 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { } } - if (config_prof) { - if (prof_accum_init(tsdn, &arena->prof_accum)) { - goto label_error; - } - } - - if (config_cache_oblivious) { - /* - * A nondeterministic seed based on the address of arena reduces - * the likelihood of lockstep non-uniform cache index - * utilization among identical concurrent processes, but at the - * cost of test repeatability. For debug builds, instead use a - * deterministic seed. - */ - atomic_store_zu(&arena->offset_state, config_debug ? ind : - (size_t)(uintptr_t)arena, ATOMIC_RELAXED); - } - - atomic_store_zu(&arena->extent_sn_next, 0, ATOMIC_RELAXED); - atomic_store_u(&arena->dss_prec, (unsigned)extent_dss_prec_get(), ATOMIC_RELAXED); - atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED); - - extent_list_init(&arena->large); + edata_list_active_init(&arena->large); if (malloc_mutex_init(&arena->large_mtx, "arena_large", WITNESS_RANK_ARENA_LARGE, malloc_mutex_rank_exclusive)) { goto label_error; } - /* - * Delay coalescing for dirty extents despite the disruptive effect on - * memory layout for best-fit extent allocation, since cached extents - * are likely to be reused soon after deallocation, and the cost of - * merging/splitting extents is non-trivial. - */ - if (extents_init(tsdn, &arena->extents_dirty, extent_state_dirty, - true)) { - goto label_error; - } - /* - * Coalesce muzzy extents immediately, because operations on them are in - * the critical path much less often than for dirty extents. - */ - if (extents_init(tsdn, &arena->extents_muzzy, extent_state_muzzy, - false)) { - goto label_error; - } - /* - * Coalesce retained extents immediately, in part because they will - * never be evicted (and therefore there's no opportunity for delayed - * coalescing), but also because operations on retained extents are not - * in the critical path. - */ - if (extents_init(tsdn, &arena->extents_retained, extent_state_retained, - false)) { - goto label_error; - } - - if (arena_decay_init(&arena->decay_dirty, - arena_dirty_decay_ms_default_get(), &arena->stats.decay_dirty)) { - goto label_error; - } - if (arena_decay_init(&arena->decay_muzzy, - arena_muzzy_decay_ms_default_get(), &arena->stats.decay_muzzy)) { - goto label_error; - } - - arena->extent_grow_next = sz_psz2ind(HUGEPAGE); - arena->retain_grow_limit = sz_psz2ind(SC_LARGE_MAXCLASS); - if (malloc_mutex_init(&arena->extent_grow_mtx, "extent_grow", - WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) { - goto label_error; - } - - extent_avail_new(&arena->extent_avail); - if (malloc_mutex_init(&arena->extent_avail_mtx, "extent_avail", - WITNESS_RANK_EXTENT_AVAIL, malloc_mutex_rank_exclusive)) { + nstime_t cur_time; + nstime_init_update(&cur_time); + if (pa_shard_init(tsdn, &arena->pa_shard, &arena_pa_central_global, + &arena_emap_global, base, ind, &arena->stats.pa_shard_stats, + LOCKEDINT_MTX(arena->stats.mtx), &cur_time, oversize_threshold, + arena_dirty_decay_ms_default_get(), + arena_muzzy_decay_ms_default_get())) { goto label_error; } /* Initialize bins. */ - uintptr_t bin_addr = (uintptr_t)arena + sizeof(arena_t); atomic_store_u(&arena->binshard_next, 0, ATOMIC_RELEASE); - for (i = 0; i < SC_NBINS; i++) { - unsigned nshards = bin_infos[i].n_shards; - arena->bins[i].bin_shards = (bin_t *)bin_addr; - bin_addr += nshards * sizeof(bin_t); - for (unsigned j = 0; j < nshards; j++) { - bool err = bin_init(&arena->bins[i].bin_shards[j]); - if (err) { - goto label_error; - } + for (i = 0; i < nbins_total; i++) { + bool err = bin_init(&arena->bins[i]); + if (err) { + goto label_error; } } - assert(bin_addr == (uintptr_t)arena + arena_size); arena->base = base; /* Set arena before creating background threads. */ arena_set(ind, arena); + arena->ind = ind; - nstime_init(&arena->create_time, 0); - nstime_update(&arena->create_time); + nstime_init_update(&arena->create_time); + + /* + * We turn on the HPA if set to. There are two exceptions: + * - Custom extent hooks (we should only return memory allocated from + * them in that case). + * - Arena 0 initialization. In this case, we're mid-bootstrapping, and + * so arena_hpa_global is not yet initialized. + */ + if (opt_hpa && ehooks_are_default(base_ehooks_get(base)) && ind != 0) { + hpa_shard_opts_t hpa_shard_opts = opt_hpa_opts; + hpa_shard_opts.deferral_allowed = background_thread_enabled(); + if (pa_shard_enable_hpa(tsdn, &arena->pa_shard, + &hpa_shard_opts, &opt_hpa_sec_opts)) { + goto label_error; + } + } /* We don't support reentrancy for arena 0 bootstrapping. */ if (ind != 0) { @@ -2129,10 +1729,12 @@ arena_choose_huge(tsd_t *tsd) { * expected for huge allocations. */ if (arena_dirty_decay_ms_default_get() > 0) { - arena_dirty_decay_ms_set(tsd_tsdn(tsd), huge_arena, 0); + arena_decay_ms_set(tsd_tsdn(tsd), huge_arena, + extent_state_dirty, 0); } if (arena_muzzy_decay_ms_default_get() > 0) { - arena_muzzy_decay_ms_set(tsd_tsdn(tsd), huge_arena, 0); + arena_decay_ms_set(tsd_tsdn(tsd), huge_arena, + extent_state_muzzy, 0); } } @@ -2167,8 +1769,8 @@ arena_is_huge(unsigned arena_ind) { return (arena_ind == huge_arena_ind); } -void -arena_boot(sc_data_t *sc_data) { +bool +arena_boot(sc_data_t *sc_data, base_t *base, bool hpa) { arena_dirty_decay_ms_default_set(opt_dirty_decay_ms); arena_muzzy_decay_ms_default_set(opt_muzzy_decay_ms); for (unsigned i = 0; i < SC_NBINS; i++) { @@ -2176,12 +1778,20 @@ arena_boot(sc_data_t *sc_data) { div_init(&arena_binind_div_info[i], (1U << sc->lg_base) + (sc->ndelta << sc->lg_delta)); } + + uint32_t cur_offset = (uint32_t)offsetof(arena_t, bins); + for (szind_t i = 0; i < SC_NBINS; i++) { + arena_bin_offsets[i] = cur_offset; + nbins_total += bin_infos[i].n_shards; + cur_offset += (uint32_t)(bin_infos[i].n_shards * sizeof(bin_t)); + } + return pa_central_init(&arena_pa_central_global, base, hpa, + &hpa_hooks_default); } void arena_prefork0(tsdn_t *tsdn, arena_t *arena) { - malloc_mutex_prefork(tsdn, &arena->decay_dirty.mtx); - malloc_mutex_prefork(tsdn, &arena->decay_muzzy.mtx); + pa_shard_prefork0(tsdn, &arena->pa_shard); } void @@ -2193,59 +1803,50 @@ arena_prefork1(tsdn_t *tsdn, arena_t *arena) { void arena_prefork2(tsdn_t *tsdn, arena_t *arena) { - malloc_mutex_prefork(tsdn, &arena->extent_grow_mtx); + pa_shard_prefork2(tsdn, &arena->pa_shard); } void arena_prefork3(tsdn_t *tsdn, arena_t *arena) { - extents_prefork(tsdn, &arena->extents_dirty); - extents_prefork(tsdn, &arena->extents_muzzy); - extents_prefork(tsdn, &arena->extents_retained); + pa_shard_prefork3(tsdn, &arena->pa_shard); } void arena_prefork4(tsdn_t *tsdn, arena_t *arena) { - malloc_mutex_prefork(tsdn, &arena->extent_avail_mtx); + pa_shard_prefork4(tsdn, &arena->pa_shard); } void arena_prefork5(tsdn_t *tsdn, arena_t *arena) { - base_prefork(tsdn, arena->base); + pa_shard_prefork5(tsdn, &arena->pa_shard); } void arena_prefork6(tsdn_t *tsdn, arena_t *arena) { - malloc_mutex_prefork(tsdn, &arena->large_mtx); + base_prefork(tsdn, arena->base); } void arena_prefork7(tsdn_t *tsdn, arena_t *arena) { - for (unsigned i = 0; i < SC_NBINS; i++) { - for (unsigned j = 0; j < bin_infos[i].n_shards; j++) { - bin_prefork(tsdn, &arena->bins[i].bin_shards[j]); - } + malloc_mutex_prefork(tsdn, &arena->large_mtx); +} + +void +arena_prefork8(tsdn_t *tsdn, arena_t *arena) { + for (unsigned i = 0; i < nbins_total; i++) { + bin_prefork(tsdn, &arena->bins[i]); } } void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) { - unsigned i; - - for (i = 0; i < SC_NBINS; i++) { - for (unsigned j = 0; j < bin_infos[i].n_shards; j++) { - bin_postfork_parent(tsdn, - &arena->bins[i].bin_shards[j]); - } + for (unsigned i = 0; i < nbins_total; i++) { + bin_postfork_parent(tsdn, &arena->bins[i]); } + malloc_mutex_postfork_parent(tsdn, &arena->large_mtx); base_postfork_parent(tsdn, arena->base); - malloc_mutex_postfork_parent(tsdn, &arena->extent_avail_mtx); - extents_postfork_parent(tsdn, &arena->extents_dirty); - extents_postfork_parent(tsdn, &arena->extents_muzzy); - extents_postfork_parent(tsdn, &arena->extents_retained); - malloc_mutex_postfork_parent(tsdn, &arena->extent_grow_mtx); - malloc_mutex_postfork_parent(tsdn, &arena->decay_dirty.mtx); - malloc_mutex_postfork_parent(tsdn, &arena->decay_muzzy.mtx); + pa_shard_postfork_parent(tsdn, &arena->pa_shard); if (config_stats) { malloc_mutex_postfork_parent(tsdn, &arena->tcache_ql_mtx); } @@ -2253,8 +1854,6 @@ arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) { void arena_postfork_child(tsdn_t *tsdn, arena_t *arena) { - unsigned i; - atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED); atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED); if (tsd_arena_get(tsdn_tsd(tsdn)) == arena) { @@ -2266,32 +1865,26 @@ arena_postfork_child(tsdn_t *tsdn, arena_t *arena) { if (config_stats) { ql_new(&arena->tcache_ql); ql_new(&arena->cache_bin_array_descriptor_ql); - tcache_t *tcache = tcache_get(tsdn_tsd(tsdn)); - if (tcache != NULL && tcache->arena == arena) { - ql_elm_new(tcache, link); - ql_tail_insert(&arena->tcache_ql, tcache, link); + tcache_slow_t *tcache_slow = tcache_slow_get(tsdn_tsd(tsdn)); + if (tcache_slow != NULL && tcache_slow->arena == arena) { + tcache_t *tcache = tcache_slow->tcache; + ql_elm_new(tcache_slow, link); + ql_tail_insert(&arena->tcache_ql, tcache_slow, link); cache_bin_array_descriptor_init( - &tcache->cache_bin_array_descriptor, - tcache->bins_small, tcache->bins_large); + &tcache_slow->cache_bin_array_descriptor, + tcache->bins); ql_tail_insert(&arena->cache_bin_array_descriptor_ql, - &tcache->cache_bin_array_descriptor, link); + &tcache_slow->cache_bin_array_descriptor, link); } } - for (i = 0; i < SC_NBINS; i++) { - for (unsigned j = 0; j < bin_infos[i].n_shards; j++) { - bin_postfork_child(tsdn, &arena->bins[i].bin_shards[j]); - } + for (unsigned i = 0; i < nbins_total; i++) { + bin_postfork_child(tsdn, &arena->bins[i]); } + malloc_mutex_postfork_child(tsdn, &arena->large_mtx); base_postfork_child(tsdn, arena->base); - malloc_mutex_postfork_child(tsdn, &arena->extent_avail_mtx); - extents_postfork_child(tsdn, &arena->extents_dirty); - extents_postfork_child(tsdn, &arena->extents_muzzy); - extents_postfork_child(tsdn, &arena->extents_retained); - malloc_mutex_postfork_child(tsdn, &arena->extent_grow_mtx); - malloc_mutex_postfork_child(tsdn, &arena->decay_dirty.mtx); - malloc_mutex_postfork_child(tsdn, &arena->decay_muzzy.mtx); + pa_shard_postfork_child(tsdn, &arena->pa_shard); if (config_stats) { malloc_mutex_postfork_child(tsdn, &arena->tcache_ql_mtx); } diff --git a/contrib/jemalloc/src/background_thread.c b/contrib/jemalloc/src/background_thread.c index 57b9b256bb9b40..3bb8d26cd3d6f2 100644 --- a/contrib/jemalloc/src/background_thread.c +++ b/contrib/jemalloc/src/background_thread.c @@ -1,4 +1,3 @@ -#define JEMALLOC_BACKGROUND_THREAD_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" @@ -54,8 +53,9 @@ pthread_create_wrapper(pthread_t *__restrict thread, const pthread_attr_t *attr, bool background_thread_create(tsd_t *tsd, unsigned arena_ind) NOT_REACHED bool background_threads_enable(tsd_t *tsd) NOT_REACHED bool background_threads_disable(tsd_t *tsd) NOT_REACHED -void background_thread_interval_check(tsdn_t *tsdn, arena_t *arena, - arena_decay_t *decay, size_t npages_new) NOT_REACHED +bool background_thread_is_started(background_thread_info_t *info) NOT_REACHED +void background_thread_wakeup_early(background_thread_info_t *info, + nstime_t *remaining_sleep) NOT_REACHED void background_thread_prefork0(tsdn_t *tsdn) NOT_REACHED void background_thread_prefork1(tsdn_t *tsdn) NOT_REACHED void background_thread_postfork_parent(tsdn_t *tsdn) NOT_REACHED @@ -74,7 +74,7 @@ background_thread_info_init(tsdn_t *tsdn, background_thread_info_t *info) { info->npages_to_purge_new = 0; if (config_stats) { info->tot_n_runs = 0; - nstime_init(&info->tot_sleep_time, 0); + nstime_init_zero(&info->tot_sleep_time); } } @@ -82,136 +82,40 @@ static inline bool set_current_thread_affinity(int cpu) { #if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY) cpu_set_t cpuset; +#else +# ifndef __NetBSD__ + cpuset_t cpuset; +# else + cpuset_t *cpuset; +# endif +#endif + +#ifndef __NetBSD__ CPU_ZERO(&cpuset); CPU_SET(cpu, &cpuset); - int ret = sched_setaffinity(0, sizeof(cpu_set_t), &cpuset); +#else + cpuset = cpuset_create(); +#endif - return (ret != 0); +#if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY) + return (sched_setaffinity(0, sizeof(cpu_set_t), &cpuset) != 0); #else - return false; +# ifndef __NetBSD__ + int ret = pthread_setaffinity_np(pthread_self(), sizeof(cpuset_t), + &cpuset); +# else + int ret = pthread_setaffinity_np(pthread_self(), cpuset_size(cpuset), + cpuset); + cpuset_destroy(cpuset); +# endif + return ret != 0; #endif } -/* Threshold for determining when to wake up the background thread. */ -#define BACKGROUND_THREAD_NPAGES_THRESHOLD UINT64_C(1024) #define BILLION UINT64_C(1000000000) /* Minimal sleep interval 100 ms. */ #define BACKGROUND_THREAD_MIN_INTERVAL_NS (BILLION / 10) -static inline size_t -decay_npurge_after_interval(arena_decay_t *decay, size_t interval) { - size_t i; - uint64_t sum = 0; - for (i = 0; i < interval; i++) { - sum += decay->backlog[i] * h_steps[i]; - } - for (; i < SMOOTHSTEP_NSTEPS; i++) { - sum += decay->backlog[i] * (h_steps[i] - h_steps[i - interval]); - } - - return (size_t)(sum >> SMOOTHSTEP_BFP); -} - -static uint64_t -arena_decay_compute_purge_interval_impl(tsdn_t *tsdn, arena_decay_t *decay, - extents_t *extents) { - if (malloc_mutex_trylock(tsdn, &decay->mtx)) { - /* Use minimal interval if decay is contended. */ - return BACKGROUND_THREAD_MIN_INTERVAL_NS; - } - - uint64_t interval; - ssize_t decay_time = atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED); - if (decay_time <= 0) { - /* Purging is eagerly done or disabled currently. */ - interval = BACKGROUND_THREAD_INDEFINITE_SLEEP; - goto label_done; - } - - uint64_t decay_interval_ns = nstime_ns(&decay->interval); - assert(decay_interval_ns > 0); - size_t npages = extents_npages_get(extents); - if (npages == 0) { - unsigned i; - for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) { - if (decay->backlog[i] > 0) { - break; - } - } - if (i == SMOOTHSTEP_NSTEPS) { - /* No dirty pages recorded. Sleep indefinitely. */ - interval = BACKGROUND_THREAD_INDEFINITE_SLEEP; - goto label_done; - } - } - if (npages <= BACKGROUND_THREAD_NPAGES_THRESHOLD) { - /* Use max interval. */ - interval = decay_interval_ns * SMOOTHSTEP_NSTEPS; - goto label_done; - } - - size_t lb = BACKGROUND_THREAD_MIN_INTERVAL_NS / decay_interval_ns; - size_t ub = SMOOTHSTEP_NSTEPS; - /* Minimal 2 intervals to ensure reaching next epoch deadline. */ - lb = (lb < 2) ? 2 : lb; - if ((decay_interval_ns * ub <= BACKGROUND_THREAD_MIN_INTERVAL_NS) || - (lb + 2 > ub)) { - interval = BACKGROUND_THREAD_MIN_INTERVAL_NS; - goto label_done; - } - - assert(lb + 2 <= ub); - size_t npurge_lb, npurge_ub; - npurge_lb = decay_npurge_after_interval(decay, lb); - if (npurge_lb > BACKGROUND_THREAD_NPAGES_THRESHOLD) { - interval = decay_interval_ns * lb; - goto label_done; - } - npurge_ub = decay_npurge_after_interval(decay, ub); - if (npurge_ub < BACKGROUND_THREAD_NPAGES_THRESHOLD) { - interval = decay_interval_ns * ub; - goto label_done; - } - - unsigned n_search = 0; - size_t target, npurge; - while ((npurge_lb + BACKGROUND_THREAD_NPAGES_THRESHOLD < npurge_ub) - && (lb + 2 < ub)) { - target = (lb + ub) / 2; - npurge = decay_npurge_after_interval(decay, target); - if (npurge > BACKGROUND_THREAD_NPAGES_THRESHOLD) { - ub = target; - npurge_ub = npurge; - } else { - lb = target; - npurge_lb = npurge; - } - assert(n_search++ < lg_floor(SMOOTHSTEP_NSTEPS) + 1); - } - interval = decay_interval_ns * (ub + lb) / 2; -label_done: - interval = (interval < BACKGROUND_THREAD_MIN_INTERVAL_NS) ? - BACKGROUND_THREAD_MIN_INTERVAL_NS : interval; - malloc_mutex_unlock(tsdn, &decay->mtx); - - return interval; -} - -/* Compute purge interval for background threads. */ -static uint64_t -arena_decay_compute_purge_interval(tsdn_t *tsdn, arena_t *arena) { - uint64_t i1, i2; - i1 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_dirty, - &arena->extents_dirty); - if (i1 == BACKGROUND_THREAD_MIN_INTERVAL_NS) { - return i1; - } - i2 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_muzzy, - &arena->extents_muzzy); - - return i1 < i2 ? i1 : i2; -} - static void background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info, uint64_t interval) { @@ -228,7 +132,8 @@ background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info, int ret; if (interval == BACKGROUND_THREAD_INDEFINITE_SLEEP) { - assert(background_thread_indefinite_sleep(info)); + background_thread_wakeup_time_set(tsdn, info, + BACKGROUND_THREAD_INDEFINITE_SLEEP); ret = pthread_cond_wait(&info->cond, &info->mtx.lock); assert(ret == 0); } else { @@ -236,8 +141,7 @@ background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info, interval <= BACKGROUND_THREAD_INDEFINITE_SLEEP); /* We need malloc clock (can be different from tv). */ nstime_t next_wakeup; - nstime_init(&next_wakeup, 0); - nstime_update(&next_wakeup); + nstime_init_update(&next_wakeup); nstime_iadd(&next_wakeup, interval); assert(nstime_ns(&next_wakeup) < BACKGROUND_THREAD_INDEFINITE_SLEEP); @@ -254,8 +158,6 @@ background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info, assert(!background_thread_indefinite_sleep(info)); ret = pthread_cond_timedwait(&info->cond, &info->mtx.lock, &ts); assert(ret == ETIMEDOUT || ret == 0); - background_thread_wakeup_time_set(tsdn, info, - BACKGROUND_THREAD_INDEFINITE_SLEEP); } if (config_stats) { gettimeofday(&tv, NULL); @@ -283,28 +185,48 @@ background_thread_pause_check(tsdn_t *tsdn, background_thread_info_t *info) { } static inline void -background_work_sleep_once(tsdn_t *tsdn, background_thread_info_t *info, unsigned ind) { - uint64_t min_interval = BACKGROUND_THREAD_INDEFINITE_SLEEP; +background_work_sleep_once(tsdn_t *tsdn, background_thread_info_t *info, + unsigned ind) { + uint64_t ns_until_deferred = BACKGROUND_THREAD_DEFERRED_MAX; unsigned narenas = narenas_total_get(); + bool slept_indefinitely = background_thread_indefinite_sleep(info); for (unsigned i = ind; i < narenas; i += max_background_threads) { arena_t *arena = arena_get(tsdn, i, false); if (!arena) { continue; } - arena_decay(tsdn, arena, true, false); - if (min_interval == BACKGROUND_THREAD_MIN_INTERVAL_NS) { + /* + * If thread was woken up from the indefinite sleep, don't + * do the work instantly, but rather check when the deferred + * work that caused this thread to wake up is scheduled for. + */ + if (!slept_indefinitely) { + arena_do_deferred_work(tsdn, arena); + } + if (ns_until_deferred <= BACKGROUND_THREAD_MIN_INTERVAL_NS) { /* Min interval will be used. */ continue; } - uint64_t interval = arena_decay_compute_purge_interval(tsdn, - arena); - assert(interval >= BACKGROUND_THREAD_MIN_INTERVAL_NS); - if (min_interval > interval) { - min_interval = interval; + uint64_t ns_arena_deferred = pa_shard_time_until_deferred_work( + tsdn, &arena->pa_shard); + if (ns_arena_deferred < ns_until_deferred) { + ns_until_deferred = ns_arena_deferred; } } - background_thread_sleep(tsdn, info, min_interval); + + uint64_t sleep_ns; + if (ns_until_deferred == BACKGROUND_THREAD_DEFERRED_MAX) { + sleep_ns = BACKGROUND_THREAD_INDEFINITE_SLEEP; + } else { + sleep_ns = + (ns_until_deferred < BACKGROUND_THREAD_MIN_INTERVAL_NS) + ? BACKGROUND_THREAD_MIN_INTERVAL_NS + : ns_until_deferred; + + } + + background_thread_sleep(tsdn, info, sleep_ns); } static bool @@ -508,7 +430,7 @@ background_thread_entry(void *ind_arg) { assert(thread_ind < max_background_threads); #ifdef JEMALLOC_HAVE_PTHREAD_SETNAME_NP pthread_setname_np(pthread_self(), "jemalloc_bg_thd"); -#elif defined(__FreeBSD__) +#elif defined(__FreeBSD__) || defined(__DragonFly__) pthread_set_name_np(pthread_self(), "jemalloc_bg_thd"); #endif if (opt_percpu_arena != percpu_arena_disabled) { @@ -608,16 +530,16 @@ background_threads_enable(tsd_t *tsd) { malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock); VARIABLE_ARRAY(bool, marked, max_background_threads); - unsigned i, nmarked; - for (i = 0; i < max_background_threads; i++) { + unsigned nmarked; + for (unsigned i = 0; i < max_background_threads; i++) { marked[i] = false; } nmarked = 0; /* Thread 0 is required and created at the end. */ marked[0] = true; /* Mark the threads we need to create for thread 0. */ - unsigned n = narenas_total_get(); - for (i = 1; i < n; i++) { + unsigned narenas = narenas_total_get(); + for (unsigned i = 1; i < narenas; i++) { if (marked[i % max_background_threads] || arena_get(tsd_tsdn(tsd), i, false) == NULL) { continue; @@ -634,7 +556,18 @@ background_threads_enable(tsd_t *tsd) { } } - return background_thread_create_locked(tsd, 0); + bool err = background_thread_create_locked(tsd, 0); + if (err) { + return true; + } + for (unsigned i = 0; i < narenas; i++) { + arena_t *arena = arena_get(tsd_tsdn(tsd), i, false); + if (arena != NULL) { + pa_shard_set_deferral_allowed(tsd_tsdn(tsd), + &arena->pa_shard, true); + } + } + return false; } bool @@ -648,92 +581,36 @@ background_threads_disable(tsd_t *tsd) { return true; } assert(n_background_threads == 0); + unsigned narenas = narenas_total_get(); + for (unsigned i = 0; i < narenas; i++) { + arena_t *arena = arena_get(tsd_tsdn(tsd), i, false); + if (arena != NULL) { + pa_shard_set_deferral_allowed(tsd_tsdn(tsd), + &arena->pa_shard, false); + } + } return false; } -/* Check if we need to signal the background thread early. */ +bool +background_thread_is_started(background_thread_info_t *info) { + return info->state == background_thread_started; +} + void -background_thread_interval_check(tsdn_t *tsdn, arena_t *arena, - arena_decay_t *decay, size_t npages_new) { - background_thread_info_t *info = arena_background_thread_info_get( - arena); - if (malloc_mutex_trylock(tsdn, &info->mtx)) { - /* - * Background thread may hold the mutex for a long period of - * time. We'd like to avoid the variance on application - * threads. So keep this non-blocking, and leave the work to a - * future epoch. - */ +background_thread_wakeup_early(background_thread_info_t *info, + nstime_t *remaining_sleep) { + /* + * This is an optimization to increase batching. At this point + * we know that background thread wakes up soon, so the time to cache + * the just freed memory is bounded and low. + */ + if (remaining_sleep != NULL && nstime_ns(remaining_sleep) < + BACKGROUND_THREAD_MIN_INTERVAL_NS) { return; } - - if (info->state != background_thread_started) { - goto label_done; - } - if (malloc_mutex_trylock(tsdn, &decay->mtx)) { - goto label_done; - } - - ssize_t decay_time = atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED); - if (decay_time <= 0) { - /* Purging is eagerly done or disabled currently. */ - goto label_done_unlock2; - } - uint64_t decay_interval_ns = nstime_ns(&decay->interval); - assert(decay_interval_ns > 0); - - nstime_t diff; - nstime_init(&diff, background_thread_wakeup_time_get(info)); - if (nstime_compare(&diff, &decay->epoch) <= 0) { - goto label_done_unlock2; - } - nstime_subtract(&diff, &decay->epoch); - if (nstime_ns(&diff) < BACKGROUND_THREAD_MIN_INTERVAL_NS) { - goto label_done_unlock2; - } - - if (npages_new > 0) { - size_t n_epoch = (size_t)(nstime_ns(&diff) / decay_interval_ns); - /* - * Compute how many new pages we would need to purge by the next - * wakeup, which is used to determine if we should signal the - * background thread. - */ - uint64_t npurge_new; - if (n_epoch >= SMOOTHSTEP_NSTEPS) { - npurge_new = npages_new; - } else { - uint64_t h_steps_max = h_steps[SMOOTHSTEP_NSTEPS - 1]; - assert(h_steps_max >= - h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]); - npurge_new = npages_new * (h_steps_max - - h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]); - npurge_new >>= SMOOTHSTEP_BFP; - } - info->npages_to_purge_new += npurge_new; - } - - bool should_signal; - if (info->npages_to_purge_new > BACKGROUND_THREAD_NPAGES_THRESHOLD) { - should_signal = true; - } else if (unlikely(background_thread_indefinite_sleep(info)) && - (extents_npages_get(&arena->extents_dirty) > 0 || - extents_npages_get(&arena->extents_muzzy) > 0 || - info->npages_to_purge_new > 0)) { - should_signal = true; - } else { - should_signal = false; - } - - if (should_signal) { - info->npages_to_purge_new = 0; - pthread_cond_signal(&info->cond); - } -label_done_unlock2: - malloc_mutex_unlock(tsdn, &decay->mtx); -label_done: - malloc_mutex_unlock(tsdn, &info->mtx); + pthread_cond_signal(&info->cond); } void @@ -794,9 +671,11 @@ background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) { return true; } - stats->num_threads = n_background_threads; + nstime_init_zero(&stats->run_interval); + memset(&stats->max_counter_per_bg_thd, 0, sizeof(mutex_prof_data_t)); + uint64_t num_runs = 0; - nstime_init(&stats->run_interval, 0); + stats->num_threads = n_background_threads; for (unsigned i = 0; i < max_background_threads; i++) { background_thread_info_t *info = &background_thread_info[i]; if (malloc_mutex_trylock(tsdn, &info->mtx)) { @@ -809,6 +688,8 @@ background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) { if (info->state != background_thread_stopped) { num_runs += info->tot_n_runs; nstime_add(&stats->run_interval, &info->tot_sleep_time); + malloc_mutex_prof_max_update(tsdn, + &stats->max_counter_per_bg_thd, &info->mtx); } malloc_mutex_unlock(tsdn, &info->mtx); } @@ -892,7 +773,7 @@ background_thread_boot0(void) { } bool -background_thread_boot1(tsdn_t *tsdn) { +background_thread_boot1(tsdn_t *tsdn, base_t *base) { #ifdef JEMALLOC_BACKGROUND_THREAD assert(have_background_thread); assert(narenas_total_get() > 0); @@ -911,7 +792,7 @@ background_thread_boot1(tsdn_t *tsdn) { } background_thread_info = (background_thread_info_t *)base_alloc(tsdn, - b0get(), opt_max_background_threads * + base, opt_max_background_threads * sizeof(background_thread_info_t), CACHELINE); if (background_thread_info == NULL) { return true; diff --git a/contrib/jemalloc/src/base.c b/contrib/jemalloc/src/base.c index f3c61661a20a86..7f4d67564b7466 100644 --- a/contrib/jemalloc/src/base.c +++ b/contrib/jemalloc/src/base.c @@ -1,4 +1,3 @@ -#define JEMALLOC_BASE_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" @@ -7,6 +6,15 @@ #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/sz.h" +/* + * In auto mode, arenas switch to huge pages for the base allocator on the + * second base block. a0 switches to thp on the 5th block (after 20 megabytes + * of metadata), since more metadata (e.g. rtree nodes) come from a0's base. + */ + +#define BASE_AUTO_THP_THRESHOLD 2 +#define BASE_AUTO_THP_THRESHOLD_A0 5 + /******************************************************************************/ /* Data. */ @@ -29,7 +37,7 @@ metadata_thp_madvise(void) { } static void * -base_map(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, size_t size) { +base_map(tsdn_t *tsdn, ehooks_t *ehooks, unsigned ind, size_t size) { void *addr; bool zero = true; bool commit = true; @@ -37,22 +45,21 @@ base_map(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, size_t size) /* Use huge page sizes and alignment regardless of opt_metadata_thp. */ assert(size == HUGEPAGE_CEILING(size)); size_t alignment = HUGEPAGE; - if (extent_hooks == &extent_hooks_default) { + if (ehooks_are_default(ehooks)) { addr = extent_alloc_mmap(NULL, size, alignment, &zero, &commit); + if (have_madvise_huge && addr) { + pages_set_thp_state(addr, size); + } } else { - /* No arena context as we are creating new arenas. */ - tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn); - pre_reentrancy(tsd, NULL); - addr = extent_hooks->alloc(extent_hooks, NULL, size, alignment, - &zero, &commit, ind); - post_reentrancy(tsd); + addr = ehooks_alloc(tsdn, ehooks, NULL, size, alignment, &zero, + &commit); } return addr; } static void -base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr, +base_unmap(tsdn_t *tsdn, ehooks_t *ehooks, unsigned ind, void *addr, size_t size) { /* * Cascade through dalloc, decommit, purge_forced, and purge_lazy, @@ -64,7 +71,7 @@ base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr, * may in fact want the end state of all associated virtual memory to be * in some consistent-but-allocated state. */ - if (extent_hooks == &extent_hooks_default) { + if (ehooks_are_default(ehooks)) { if (!extent_dalloc_mmap(addr, size)) { goto label_done; } @@ -80,31 +87,19 @@ base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr, /* Nothing worked. This should never happen. */ not_reached(); } else { - tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn); - pre_reentrancy(tsd, NULL); - if (extent_hooks->dalloc != NULL && - !extent_hooks->dalloc(extent_hooks, addr, size, true, - ind)) { - goto label_post_reentrancy; + if (!ehooks_dalloc(tsdn, ehooks, addr, size, true)) { + goto label_done; } - if (extent_hooks->decommit != NULL && - !extent_hooks->decommit(extent_hooks, addr, size, 0, size, - ind)) { - goto label_post_reentrancy; + if (!ehooks_decommit(tsdn, ehooks, addr, size, 0, size)) { + goto label_done; } - if (extent_hooks->purge_forced != NULL && - !extent_hooks->purge_forced(extent_hooks, addr, size, 0, - size, ind)) { - goto label_post_reentrancy; + if (!ehooks_purge_forced(tsdn, ehooks, addr, size, 0, size)) { + goto label_done; } - if (extent_hooks->purge_lazy != NULL && - !extent_hooks->purge_lazy(extent_hooks, addr, size, 0, size, - ind)) { - goto label_post_reentrancy; + if (!ehooks_purge_lazy(tsdn, ehooks, addr, size, 0, size)) { + goto label_done; } /* Nothing worked. That's the application's problem. */ - label_post_reentrancy: - post_reentrancy(tsd); } label_done: if (metadata_thp_madvise()) { @@ -116,14 +111,14 @@ base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr, } static void -base_extent_init(size_t *extent_sn_next, extent_t *extent, void *addr, +base_edata_init(size_t *extent_sn_next, edata_t *edata, void *addr, size_t size) { size_t sn; sn = *extent_sn_next; (*extent_sn_next)++; - extent_binit(extent, addr, size, sn); + edata_binit(edata, addr, size, sn); } static size_t @@ -169,7 +164,7 @@ base_auto_thp_switch(tsdn_t *tsdn, base_t *base) { pages_huge(block, block->size); if (config_stats) { base->n_thp += HUGEPAGE_CEILING(block->size - - extent_bsize_get(&block->extent)) >> LG_HUGEPAGE; + edata_bsize_get(&block->edata)) >> LG_HUGEPAGE; } block = block->next; assert(block == NULL || (base_ind_get(base) == 0)); @@ -177,34 +172,34 @@ base_auto_thp_switch(tsdn_t *tsdn, base_t *base) { } static void * -base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size, +base_extent_bump_alloc_helper(edata_t *edata, size_t *gap_size, size_t size, size_t alignment) { void *ret; assert(alignment == ALIGNMENT_CEILING(alignment, QUANTUM)); assert(size == ALIGNMENT_CEILING(size, alignment)); - *gap_size = ALIGNMENT_CEILING((uintptr_t)extent_addr_get(extent), - alignment) - (uintptr_t)extent_addr_get(extent); - ret = (void *)((uintptr_t)extent_addr_get(extent) + *gap_size); - assert(extent_bsize_get(extent) >= *gap_size + size); - extent_binit(extent, (void *)((uintptr_t)extent_addr_get(extent) + - *gap_size + size), extent_bsize_get(extent) - *gap_size - size, - extent_sn_get(extent)); + *gap_size = ALIGNMENT_CEILING((uintptr_t)edata_addr_get(edata), + alignment) - (uintptr_t)edata_addr_get(edata); + ret = (void *)((uintptr_t)edata_addr_get(edata) + *gap_size); + assert(edata_bsize_get(edata) >= *gap_size + size); + edata_binit(edata, (void *)((uintptr_t)edata_addr_get(edata) + + *gap_size + size), edata_bsize_get(edata) - *gap_size - size, + edata_sn_get(edata)); return ret; } static void -base_extent_bump_alloc_post(base_t *base, extent_t *extent, size_t gap_size, +base_extent_bump_alloc_post(base_t *base, edata_t *edata, size_t gap_size, void *addr, size_t size) { - if (extent_bsize_get(extent) > 0) { + if (edata_bsize_get(edata) > 0) { /* * Compute the index for the largest size class that does not * exceed extent's size. */ szind_t index_floor = - sz_size2index(extent_bsize_get(extent) + 1) - 1; - extent_heap_insert(&base->avail[index_floor], extent); + sz_size2index(edata_bsize_get(edata) + 1) - 1; + edata_heap_insert(&base->avail[index_floor], edata); } if (config_stats) { @@ -229,13 +224,13 @@ base_extent_bump_alloc_post(base_t *base, extent_t *extent, size_t gap_size, } static void * -base_extent_bump_alloc(base_t *base, extent_t *extent, size_t size, +base_extent_bump_alloc(base_t *base, edata_t *edata, size_t size, size_t alignment) { void *ret; size_t gap_size; - ret = base_extent_bump_alloc_helper(extent, &gap_size, size, alignment); - base_extent_bump_alloc_post(base, extent, gap_size, ret, size); + ret = base_extent_bump_alloc_helper(edata, &gap_size, size, alignment); + base_extent_bump_alloc_post(base, edata, gap_size, ret, size); return ret; } @@ -245,8 +240,8 @@ base_extent_bump_alloc(base_t *base, extent_t *extent, size_t size, * On success a pointer to the initialized base_block_t header is returned. */ static base_block_t * -base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks, - unsigned ind, pszind_t *pind_last, size_t *extent_sn_next, size_t size, +base_block_alloc(tsdn_t *tsdn, base_t *base, ehooks_t *ehooks, unsigned ind, + pszind_t *pind_last, size_t *extent_sn_next, size_t size, size_t alignment) { alignment = ALIGNMENT_CEILING(alignment, QUANTUM); size_t usize = ALIGNMENT_CEILING(size, alignment); @@ -267,7 +262,7 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks, size_t next_block_size = HUGEPAGE_CEILING(sz_pind2sz(pind_next)); size_t block_size = (min_block_size > next_block_size) ? min_block_size : next_block_size; - base_block_t *block = (base_block_t *)base_map(tsdn, extent_hooks, ind, + base_block_t *block = (base_block_t *)base_map(tsdn, ehooks, ind, block_size); if (block == NULL) { return NULL; @@ -295,7 +290,7 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks, block->size = block_size; block->next = NULL; assert(block_size >= header_size); - base_extent_init(extent_sn_next, &block->extent, + base_edata_init(extent_sn_next, &block->edata, (void *)((uintptr_t)block + header_size), block_size - header_size); return block; } @@ -304,17 +299,17 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks, * Allocate an extent that is at least as large as specified size, with * specified alignment. */ -static extent_t * +static edata_t * base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) { malloc_mutex_assert_owner(tsdn, &base->mtx); - extent_hooks_t *extent_hooks = base_extent_hooks_get(base); + ehooks_t *ehooks = base_ehooks_get_for_metadata(base); /* * Drop mutex during base_block_alloc(), because an extent hook will be * called. */ malloc_mutex_unlock(tsdn, &base->mtx); - base_block_t *block = base_block_alloc(tsdn, base, extent_hooks, + base_block_t *block = base_block_alloc(tsdn, base, ehooks, base_ind_get(base), &base->pind_last, &base->extent_sn_next, size, alignment); malloc_mutex_lock(tsdn, &base->mtx); @@ -338,7 +333,7 @@ base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) { assert(base->resident <= base->mapped); assert(base->n_thp << LG_HUGEPAGE <= base->mapped); } - return &block->extent; + return &block->edata; } base_t * @@ -347,10 +342,22 @@ b0get(void) { } base_t * -base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { +base_new(tsdn_t *tsdn, unsigned ind, const extent_hooks_t *extent_hooks, + bool metadata_use_hooks) { pszind_t pind_last = 0; size_t extent_sn_next = 0; - base_block_t *block = base_block_alloc(tsdn, NULL, extent_hooks, ind, + + /* + * The base will contain the ehooks eventually, but it itself is + * allocated using them. So we use some stack ehooks to bootstrap its + * memory, and then initialize the ehooks within the base_t. + */ + ehooks_t fake_ehooks; + ehooks_init(&fake_ehooks, metadata_use_hooks ? + (extent_hooks_t *)extent_hooks : + (extent_hooks_t *)&ehooks_default_extent_hooks, ind); + + base_block_t *block = base_block_alloc(tsdn, NULL, &fake_ehooks, ind, &pind_last, &extent_sn_next, sizeof(base_t), QUANTUM); if (block == NULL) { return NULL; @@ -359,13 +366,15 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { size_t gap_size; size_t base_alignment = CACHELINE; size_t base_size = ALIGNMENT_CEILING(sizeof(base_t), base_alignment); - base_t *base = (base_t *)base_extent_bump_alloc_helper(&block->extent, + base_t *base = (base_t *)base_extent_bump_alloc_helper(&block->edata, &gap_size, base_size, base_alignment); - base->ind = ind; - atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELAXED); + ehooks_init(&base->ehooks, (extent_hooks_t *)extent_hooks, ind); + ehooks_init(&base->ehooks_base, metadata_use_hooks ? + (extent_hooks_t *)extent_hooks : + (extent_hooks_t *)&ehooks_default_extent_hooks, ind); if (malloc_mutex_init(&base->mtx, "base", WITNESS_RANK_BASE, malloc_mutex_rank_exclusive)) { - base_unmap(tsdn, extent_hooks, ind, block, block->size); + base_unmap(tsdn, &fake_ehooks, ind, block, block->size); return NULL; } base->pind_last = pind_last; @@ -373,7 +382,7 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { base->blocks = block; base->auto_thp_switched = false; for (szind_t i = 0; i < SC_NSIZES; i++) { - extent_heap_new(&base->avail[i]); + edata_heap_new(&base->avail[i]); } if (config_stats) { base->allocated = sizeof(base_block_t); @@ -386,7 +395,7 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { assert(base->resident <= base->mapped); assert(base->n_thp << LG_HUGEPAGE <= base->mapped); } - base_extent_bump_alloc_post(base, &block->extent, gap_size, base, + base_extent_bump_alloc_post(base, &block->edata, gap_size, base, base_size); return base; @@ -394,26 +403,31 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { void base_delete(tsdn_t *tsdn, base_t *base) { - extent_hooks_t *extent_hooks = base_extent_hooks_get(base); + ehooks_t *ehooks = base_ehooks_get_for_metadata(base); base_block_t *next = base->blocks; do { base_block_t *block = next; next = block->next; - base_unmap(tsdn, extent_hooks, base_ind_get(base), block, + base_unmap(tsdn, ehooks, base_ind_get(base), block, block->size); } while (next != NULL); } -extent_hooks_t * -base_extent_hooks_get(base_t *base) { - return (extent_hooks_t *)atomic_load_p(&base->extent_hooks, - ATOMIC_ACQUIRE); +ehooks_t * +base_ehooks_get(base_t *base) { + return &base->ehooks; +} + +ehooks_t * +base_ehooks_get_for_metadata(base_t *base) { + return &base->ehooks_base; } extent_hooks_t * base_extent_hooks_set(base_t *base, extent_hooks_t *extent_hooks) { - extent_hooks_t *old_extent_hooks = base_extent_hooks_get(base); - atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELEASE); + extent_hooks_t *old_extent_hooks = + ehooks_get_extent_hooks_ptr(&base->ehooks); + ehooks_init(&base->ehooks, extent_hooks, ehooks_ind_get(&base->ehooks)); return old_extent_hooks; } @@ -424,28 +438,28 @@ base_alloc_impl(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment, size_t usize = ALIGNMENT_CEILING(size, alignment); size_t asize = usize + alignment - QUANTUM; - extent_t *extent = NULL; + edata_t *edata = NULL; malloc_mutex_lock(tsdn, &base->mtx); for (szind_t i = sz_size2index(asize); i < SC_NSIZES; i++) { - extent = extent_heap_remove_first(&base->avail[i]); - if (extent != NULL) { + edata = edata_heap_remove_first(&base->avail[i]); + if (edata != NULL) { /* Use existing space. */ break; } } - if (extent == NULL) { + if (edata == NULL) { /* Try to allocate more space. */ - extent = base_extent_alloc(tsdn, base, usize, alignment); + edata = base_extent_alloc(tsdn, base, usize, alignment); } void *ret; - if (extent == NULL) { + if (edata == NULL) { ret = NULL; goto label_return; } - ret = base_extent_bump_alloc(base, extent, usize, alignment); + ret = base_extent_bump_alloc(base, edata, usize, alignment); if (esn != NULL) { - *esn = extent_sn_get(extent); + *esn = (size_t)edata_sn_get(edata); } label_return: malloc_mutex_unlock(tsdn, &base->mtx); @@ -465,16 +479,16 @@ base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) { return base_alloc_impl(tsdn, base, size, alignment, NULL); } -extent_t * -base_alloc_extent(tsdn_t *tsdn, base_t *base) { +edata_t * +base_alloc_edata(tsdn_t *tsdn, base_t *base) { size_t esn; - extent_t *extent = base_alloc_impl(tsdn, base, sizeof(extent_t), - CACHELINE, &esn); - if (extent == NULL) { + edata_t *edata = base_alloc_impl(tsdn, base, sizeof(edata_t), + EDATA_ALIGNMENT, &esn); + if (edata == NULL) { return NULL; } - extent_esn_set(extent, esn); - return extent; + edata_esn_set(edata, esn); + return edata; } void @@ -509,6 +523,7 @@ base_postfork_child(tsdn_t *tsdn, base_t *base) { bool base_boot(tsdn_t *tsdn) { - b0 = base_new(tsdn, 0, (extent_hooks_t *)&extent_hooks_default); + b0 = base_new(tsdn, 0, (extent_hooks_t *)&ehooks_default_extent_hooks, + /* metadata_use_hooks */ true); return (b0 == NULL); } diff --git a/contrib/jemalloc/src/bin.c b/contrib/jemalloc/src/bin.c index bca6b12c352b40..fa20458705ac8c 100644 --- a/contrib/jemalloc/src/bin.c +++ b/contrib/jemalloc/src/bin.c @@ -6,26 +6,6 @@ #include "jemalloc/internal/sc.h" #include "jemalloc/internal/witness.h" -bin_info_t bin_infos[SC_NBINS]; - -static void -bin_infos_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], - bin_info_t bin_infos[SC_NBINS]) { - for (unsigned i = 0; i < SC_NBINS; i++) { - bin_info_t *bin_info = &bin_infos[i]; - sc_t *sc = &sc_data->sc[i]; - bin_info->reg_size = ((size_t)1U << sc->lg_base) - + ((size_t)sc->ndelta << sc->lg_delta); - bin_info->slab_size = (sc->pgs << LG_PAGE); - bin_info->nregs = - (uint32_t)(bin_info->slab_size / bin_info->reg_size); - bin_info->n_shards = bin_shard_sizes[i]; - bitmap_info_t bitmap_info = BITMAP_INFO_INITIALIZER( - bin_info->nregs); - bin_info->bitmap_info = bitmap_info; - } -} - bool bin_update_shard_size(unsigned bin_shard_sizes[SC_NBINS], size_t start_size, size_t end_size, size_t nshards) { @@ -58,12 +38,6 @@ bin_shard_sizes_boot(unsigned bin_shard_sizes[SC_NBINS]) { } } -void -bin_boot(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) { - assert(sc_data->initialized); - bin_infos_init(sc_data, bin_shard_sizes, bin_infos); -} - bool bin_init(bin_t *bin) { if (malloc_mutex_init(&bin->lock, "bin", WITNESS_RANK_BIN, @@ -71,8 +45,8 @@ bin_init(bin_t *bin) { return true; } bin->slabcur = NULL; - extent_heap_new(&bin->slabs_nonfull); - extent_list_init(&bin->slabs_full); + edata_heap_new(&bin->slabs_nonfull); + edata_list_active_init(&bin->slabs_full); if (config_stats) { memset(&bin->stats, 0, sizeof(bin_stats_t)); } diff --git a/contrib/jemalloc/src/bin_info.c b/contrib/jemalloc/src/bin_info.c new file mode 100644 index 00000000000000..8629ef8817dfbd --- /dev/null +++ b/contrib/jemalloc/src/bin_info.c @@ -0,0 +1,30 @@ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/bin_info.h" + +bin_info_t bin_infos[SC_NBINS]; + +static void +bin_infos_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], + bin_info_t infos[SC_NBINS]) { + for (unsigned i = 0; i < SC_NBINS; i++) { + bin_info_t *bin_info = &infos[i]; + sc_t *sc = &sc_data->sc[i]; + bin_info->reg_size = ((size_t)1U << sc->lg_base) + + ((size_t)sc->ndelta << sc->lg_delta); + bin_info->slab_size = (sc->pgs << LG_PAGE); + bin_info->nregs = + (uint32_t)(bin_info->slab_size / bin_info->reg_size); + bin_info->n_shards = bin_shard_sizes[i]; + bitmap_info_t bitmap_info = BITMAP_INFO_INITIALIZER( + bin_info->nregs); + bin_info->bitmap_info = bitmap_info; + } +} + +void +bin_info_boot(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) { + assert(sc_data->initialized); + bin_infos_init(sc_data, bin_shard_sizes, bin_infos); +} diff --git a/contrib/jemalloc/src/bitmap.c b/contrib/jemalloc/src/bitmap.c index 468b3178ebfa62..0ccedc5db5bc8f 100644 --- a/contrib/jemalloc/src/bitmap.c +++ b/contrib/jemalloc/src/bitmap.c @@ -1,4 +1,3 @@ -#define JEMALLOC_BITMAP_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" diff --git a/contrib/jemalloc/src/buf_writer.c b/contrib/jemalloc/src/buf_writer.c new file mode 100644 index 00000000000000..7c6f79403c0016 --- /dev/null +++ b/contrib/jemalloc/src/buf_writer.c @@ -0,0 +1,144 @@ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/buf_writer.h" +#include "jemalloc/internal/malloc_io.h" + +static void * +buf_writer_allocate_internal_buf(tsdn_t *tsdn, size_t buf_len) { +#ifdef JEMALLOC_JET + if (buf_len > SC_LARGE_MAXCLASS) { + return NULL; + } +#else + assert(buf_len <= SC_LARGE_MAXCLASS); +#endif + return iallocztm(tsdn, buf_len, sz_size2index(buf_len), false, NULL, + true, arena_get(tsdn, 0, false), true); +} + +static void +buf_writer_free_internal_buf(tsdn_t *tsdn, void *buf) { + if (buf != NULL) { + idalloctm(tsdn, buf, NULL, NULL, true, true); + } +} + +static void +buf_writer_assert(buf_writer_t *buf_writer) { + assert(buf_writer != NULL); + assert(buf_writer->write_cb != NULL); + if (buf_writer->buf != NULL) { + assert(buf_writer->buf_size > 0); + } else { + assert(buf_writer->buf_size == 0); + assert(buf_writer->internal_buf); + } + assert(buf_writer->buf_end <= buf_writer->buf_size); +} + +bool +buf_writer_init(tsdn_t *tsdn, buf_writer_t *buf_writer, write_cb_t *write_cb, + void *cbopaque, char *buf, size_t buf_len) { + if (write_cb != NULL) { + buf_writer->write_cb = write_cb; + } else { + buf_writer->write_cb = je_malloc_message != NULL ? + je_malloc_message : wrtmessage; + } + buf_writer->cbopaque = cbopaque; + assert(buf_len >= 2); + if (buf != NULL) { + buf_writer->buf = buf; + buf_writer->internal_buf = false; + } else { + buf_writer->buf = buf_writer_allocate_internal_buf(tsdn, + buf_len); + buf_writer->internal_buf = true; + } + if (buf_writer->buf != NULL) { + buf_writer->buf_size = buf_len - 1; /* Allowing for '\0'. */ + } else { + buf_writer->buf_size = 0; + } + buf_writer->buf_end = 0; + buf_writer_assert(buf_writer); + return buf_writer->buf == NULL; +} + +void +buf_writer_flush(buf_writer_t *buf_writer) { + buf_writer_assert(buf_writer); + if (buf_writer->buf == NULL) { + return; + } + buf_writer->buf[buf_writer->buf_end] = '\0'; + buf_writer->write_cb(buf_writer->cbopaque, buf_writer->buf); + buf_writer->buf_end = 0; + buf_writer_assert(buf_writer); +} + +void +buf_writer_cb(void *buf_writer_arg, const char *s) { + buf_writer_t *buf_writer = (buf_writer_t *)buf_writer_arg; + buf_writer_assert(buf_writer); + if (buf_writer->buf == NULL) { + buf_writer->write_cb(buf_writer->cbopaque, s); + return; + } + size_t i, slen, n; + for (i = 0, slen = strlen(s); i < slen; i += n) { + if (buf_writer->buf_end == buf_writer->buf_size) { + buf_writer_flush(buf_writer); + } + size_t s_remain = slen - i; + size_t buf_remain = buf_writer->buf_size - buf_writer->buf_end; + n = s_remain < buf_remain ? s_remain : buf_remain; + memcpy(buf_writer->buf + buf_writer->buf_end, s + i, n); + buf_writer->buf_end += n; + buf_writer_assert(buf_writer); + } + assert(i == slen); +} + +void +buf_writer_terminate(tsdn_t *tsdn, buf_writer_t *buf_writer) { + buf_writer_assert(buf_writer); + buf_writer_flush(buf_writer); + if (buf_writer->internal_buf) { + buf_writer_free_internal_buf(tsdn, buf_writer->buf); + } +} + +void +buf_writer_pipe(buf_writer_t *buf_writer, read_cb_t *read_cb, + void *read_cbopaque) { + /* + * A tiny local buffer in case the buffered writer failed to allocate + * at init. + */ + static char backup_buf[16]; + static buf_writer_t backup_buf_writer; + + buf_writer_assert(buf_writer); + assert(read_cb != NULL); + if (buf_writer->buf == NULL) { + buf_writer_init(TSDN_NULL, &backup_buf_writer, + buf_writer->write_cb, buf_writer->cbopaque, backup_buf, + sizeof(backup_buf)); + buf_writer = &backup_buf_writer; + } + assert(buf_writer->buf != NULL); + ssize_t nread = 0; + do { + buf_writer->buf_end += nread; + buf_writer_assert(buf_writer); + if (buf_writer->buf_end == buf_writer->buf_size) { + buf_writer_flush(buf_writer); + } + nread = read_cb(read_cbopaque, + buf_writer->buf + buf_writer->buf_end, + buf_writer->buf_size - buf_writer->buf_end); + } while (nread > 0); + buf_writer_flush(buf_writer); +} diff --git a/contrib/jemalloc/src/cache_bin.c b/contrib/jemalloc/src/cache_bin.c new file mode 100644 index 00000000000000..9ae072a0ee6e53 --- /dev/null +++ b/contrib/jemalloc/src/cache_bin.c @@ -0,0 +1,99 @@ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/bit_util.h" +#include "jemalloc/internal/cache_bin.h" +#include "jemalloc/internal/safety_check.h" + +void +cache_bin_info_init(cache_bin_info_t *info, + cache_bin_sz_t ncached_max) { + assert(ncached_max <= CACHE_BIN_NCACHED_MAX); + size_t stack_size = (size_t)ncached_max * sizeof(void *); + assert(stack_size < ((size_t)1 << (sizeof(cache_bin_sz_t) * 8))); + info->ncached_max = (cache_bin_sz_t)ncached_max; +} + +void +cache_bin_info_compute_alloc(cache_bin_info_t *infos, szind_t ninfos, + size_t *size, size_t *alignment) { + /* For the total bin stack region (per tcache), reserve 2 more slots so + * that + * 1) the empty position can be safely read on the fast path before + * checking "is_empty"; and + * 2) the cur_ptr can go beyond the empty position by 1 step safely on + * the fast path (i.e. no overflow). + */ + *size = sizeof(void *) * 2; + for (szind_t i = 0; i < ninfos; i++) { + assert(infos[i].ncached_max > 0); + *size += infos[i].ncached_max * sizeof(void *); + } + + /* + * Align to at least PAGE, to minimize the # of TLBs needed by the + * smaller sizes; also helps if the larger sizes don't get used at all. + */ + *alignment = PAGE; +} + +void +cache_bin_preincrement(cache_bin_info_t *infos, szind_t ninfos, void *alloc, + size_t *cur_offset) { + if (config_debug) { + size_t computed_size; + size_t computed_alignment; + + /* Pointer should be as aligned as we asked for. */ + cache_bin_info_compute_alloc(infos, ninfos, &computed_size, + &computed_alignment); + assert(((uintptr_t)alloc & (computed_alignment - 1)) == 0); + } + + *(uintptr_t *)((uintptr_t)alloc + *cur_offset) = + cache_bin_preceding_junk; + *cur_offset += sizeof(void *); +} + +void +cache_bin_postincrement(cache_bin_info_t *infos, szind_t ninfos, void *alloc, + size_t *cur_offset) { + *(uintptr_t *)((uintptr_t)alloc + *cur_offset) = + cache_bin_trailing_junk; + *cur_offset += sizeof(void *); +} + +void +cache_bin_init(cache_bin_t *bin, cache_bin_info_t *info, void *alloc, + size_t *cur_offset) { + /* + * The full_position points to the lowest available space. Allocations + * will access the slots toward higher addresses (for the benefit of + * adjacent prefetch). + */ + void *stack_cur = (void *)((uintptr_t)alloc + *cur_offset); + void *full_position = stack_cur; + uint16_t bin_stack_size = info->ncached_max * sizeof(void *); + + *cur_offset += bin_stack_size; + void *empty_position = (void *)((uintptr_t)alloc + *cur_offset); + + /* Init to the empty position. */ + bin->stack_head = (void **)empty_position; + bin->low_bits_low_water = (uint16_t)(uintptr_t)bin->stack_head; + bin->low_bits_full = (uint16_t)(uintptr_t)full_position; + bin->low_bits_empty = (uint16_t)(uintptr_t)empty_position; + cache_bin_sz_t free_spots = cache_bin_diff(bin, + bin->low_bits_full, (uint16_t)(uintptr_t)bin->stack_head, + /* racy */ false); + assert(free_spots == bin_stack_size); + assert(cache_bin_ncached_get_local(bin, info) == 0); + assert(cache_bin_empty_position_get(bin) == empty_position); + + assert(bin_stack_size > 0 || empty_position == full_position); +} + +bool +cache_bin_still_zero_initialized(cache_bin_t *bin) { + return bin->stack_head == NULL; +} diff --git a/contrib/jemalloc/src/ckh.c b/contrib/jemalloc/src/ckh.c index 1bf6df5a115ebe..8db4319c51d6fe 100644 --- a/contrib/jemalloc/src/ckh.c +++ b/contrib/jemalloc/src/ckh.c @@ -34,7 +34,6 @@ * respectively. * ******************************************************************************/ -#define JEMALLOC_CKH_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/ckh.h" @@ -357,14 +356,14 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh) { } bool -ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, +ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *ckh_hash, ckh_keycomp_t *keycomp) { bool ret; size_t mincells, usize; unsigned lg_mincells; assert(minitems > 0); - assert(hash != NULL); + assert(ckh_hash != NULL); assert(keycomp != NULL); #ifdef CKH_COUNT @@ -393,7 +392,7 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, } ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS; ckh->lg_curbuckets = lg_mincells - LG_CKH_BUCKET_CELLS; - ckh->hash = hash; + ckh->hash = ckh_hash; ckh->keycomp = keycomp; usize = sz_sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE); diff --git a/contrib/jemalloc/src/counter.c b/contrib/jemalloc/src/counter.c new file mode 100644 index 00000000000000..8f1ae3af45edc8 --- /dev/null +++ b/contrib/jemalloc/src/counter.c @@ -0,0 +1,30 @@ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/counter.h" + +bool +counter_accum_init(counter_accum_t *counter, uint64_t interval) { + if (LOCKEDINT_MTX_INIT(counter->mtx, "counter_accum", + WITNESS_RANK_COUNTER_ACCUM, malloc_mutex_rank_exclusive)) { + return true; + } + locked_init_u64_unsynchronized(&counter->accumbytes, 0); + counter->interval = interval; + return false; +} + +void +counter_prefork(tsdn_t *tsdn, counter_accum_t *counter) { + LOCKEDINT_MTX_PREFORK(tsdn, counter->mtx); +} + +void +counter_postfork_parent(tsdn_t *tsdn, counter_accum_t *counter) { + LOCKEDINT_MTX_POSTFORK_PARENT(tsdn, counter->mtx); +} + +void +counter_postfork_child(tsdn_t *tsdn, counter_accum_t *counter) { + LOCKEDINT_MTX_POSTFORK_CHILD(tsdn, counter->mtx); +} diff --git a/contrib/jemalloc/src/ctl.c b/contrib/jemalloc/src/ctl.c index 48afaa61f4ee5a..135271bafae78f 100644 --- a/contrib/jemalloc/src/ctl.c +++ b/contrib/jemalloc/src/ctl.c @@ -1,4 +1,3 @@ -#define JEMALLOC_CTL_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" @@ -6,8 +5,16 @@ #include "jemalloc/internal/ctl.h" #include "jemalloc/internal/extent_dss.h" #include "jemalloc/internal/extent_mmap.h" +#include "jemalloc/internal/inspect.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/nstime.h" +#include "jemalloc/internal/peak_event.h" +#include "jemalloc/internal/prof_data.h" +#include "jemalloc/internal/prof_log.h" +#include "jemalloc/internal/prof_recent.h" +#include "jemalloc/internal/prof_stats.h" +#include "jemalloc/internal/prof_sys.h" +#include "jemalloc/internal/safety_check.h" #include "jemalloc/internal/sc.h" #include "jemalloc/internal/util.h" @@ -60,6 +67,8 @@ CTL_PROTO(background_thread) CTL_PROTO(max_background_threads) CTL_PROTO(thread_tcache_enabled) CTL_PROTO(thread_tcache_flush) +CTL_PROTO(thread_peak_read) +CTL_PROTO(thread_peak_reset) CTL_PROTO(thread_prof_name) CTL_PROTO(thread_prof_active) CTL_PROTO(thread_arena) @@ -67,6 +76,7 @@ CTL_PROTO(thread_allocated) CTL_PROTO(thread_allocatedp) CTL_PROTO(thread_deallocated) CTL_PROTO(thread_deallocatedp) +CTL_PROTO(thread_idle) CTL_PROTO(config_cache_oblivious) CTL_PROTO(config_debug) CTL_PROTO(config_fill) @@ -81,7 +91,20 @@ CTL_PROTO(config_utrace) CTL_PROTO(config_xmalloc) CTL_PROTO(opt_abort) CTL_PROTO(opt_abort_conf) +CTL_PROTO(opt_cache_oblivious) +CTL_PROTO(opt_trust_madvise) CTL_PROTO(opt_confirm_conf) +CTL_PROTO(opt_hpa) +CTL_PROTO(opt_hpa_slab_max_alloc) +CTL_PROTO(opt_hpa_hugification_threshold) +CTL_PROTO(opt_hpa_hugify_delay_ms) +CTL_PROTO(opt_hpa_min_purge_interval_ms) +CTL_PROTO(opt_hpa_dirty_mult) +CTL_PROTO(opt_hpa_sec_nshards) +CTL_PROTO(opt_hpa_sec_max_alloc) +CTL_PROTO(opt_hpa_sec_max_bytes) +CTL_PROTO(opt_hpa_sec_bytes_after_flush) +CTL_PROTO(opt_hpa_sec_batch_fill_extra) CTL_PROTO(opt_metadata_thp) CTL_PROTO(opt_retain) CTL_PROTO(opt_dss) @@ -89,19 +112,31 @@ CTL_PROTO(opt_narenas) CTL_PROTO(opt_percpu_arena) CTL_PROTO(opt_oversize_threshold) CTL_PROTO(opt_background_thread) +CTL_PROTO(opt_mutex_max_spin) CTL_PROTO(opt_max_background_threads) CTL_PROTO(opt_dirty_decay_ms) CTL_PROTO(opt_muzzy_decay_ms) CTL_PROTO(opt_stats_print) CTL_PROTO(opt_stats_print_opts) +CTL_PROTO(opt_stats_interval) +CTL_PROTO(opt_stats_interval_opts) CTL_PROTO(opt_junk) CTL_PROTO(opt_zero) CTL_PROTO(opt_utrace) CTL_PROTO(opt_xmalloc) +CTL_PROTO(opt_experimental_infallible_new) CTL_PROTO(opt_tcache) +CTL_PROTO(opt_tcache_max) +CTL_PROTO(opt_tcache_nslots_small_min) +CTL_PROTO(opt_tcache_nslots_small_max) +CTL_PROTO(opt_tcache_nslots_large) +CTL_PROTO(opt_lg_tcache_nslots_mul) +CTL_PROTO(opt_tcache_gc_incr_bytes) +CTL_PROTO(opt_tcache_gc_delay_bytes) +CTL_PROTO(opt_lg_tcache_flush_small_div) +CTL_PROTO(opt_lg_tcache_flush_large_div) CTL_PROTO(opt_thp) CTL_PROTO(opt_lg_extent_max_active_fit) -CTL_PROTO(opt_lg_tcache_max) CTL_PROTO(opt_prof) CTL_PROTO(opt_prof_prefix) CTL_PROTO(opt_prof_active) @@ -111,7 +146,14 @@ CTL_PROTO(opt_lg_prof_interval) CTL_PROTO(opt_prof_gdump) CTL_PROTO(opt_prof_final) CTL_PROTO(opt_prof_leak) +CTL_PROTO(opt_prof_leak_error) CTL_PROTO(opt_prof_accum) +CTL_PROTO(opt_prof_recent_alloc_max) +CTL_PROTO(opt_prof_stats) +CTL_PROTO(opt_prof_sys_thread_name) +CTL_PROTO(opt_prof_time_res) +CTL_PROTO(opt_lg_san_uaf_align) +CTL_PROTO(opt_zero_realloc) CTL_PROTO(tcache_create) CTL_PROTO(tcache_flush) CTL_PROTO(tcache_destroy) @@ -121,6 +163,7 @@ CTL_PROTO(arena_i_purge) CTL_PROTO(arena_i_reset) CTL_PROTO(arena_i_destroy) CTL_PROTO(arena_i_dss) +CTL_PROTO(arena_i_oversize_threshold) CTL_PROTO(arena_i_dirty_decay_ms) CTL_PROTO(arena_i_muzzy_decay_ms) CTL_PROTO(arena_i_extent_hooks) @@ -148,11 +191,18 @@ CTL_PROTO(prof_thread_active_init) CTL_PROTO(prof_active) CTL_PROTO(prof_dump) CTL_PROTO(prof_gdump) +CTL_PROTO(prof_prefix) CTL_PROTO(prof_reset) CTL_PROTO(prof_interval) CTL_PROTO(lg_prof_sample) CTL_PROTO(prof_log_start) CTL_PROTO(prof_log_stop) +CTL_PROTO(prof_stats_bins_i_live) +CTL_PROTO(prof_stats_bins_i_accum) +INDEX_PROTO(prof_stats_bins_i) +CTL_PROTO(prof_stats_lextents_i_live) +CTL_PROTO(prof_stats_lextents_i_accum) +INDEX_PROTO(prof_stats_lextents_i) CTL_PROTO(stats_arenas_i_small_allocated) CTL_PROTO(stats_arenas_i_small_nmalloc) CTL_PROTO(stats_arenas_i_small_ndalloc) @@ -188,6 +238,39 @@ CTL_PROTO(stats_arenas_i_extents_j_dirty_bytes) CTL_PROTO(stats_arenas_i_extents_j_muzzy_bytes) CTL_PROTO(stats_arenas_i_extents_j_retained_bytes) INDEX_PROTO(stats_arenas_i_extents_j) +CTL_PROTO(stats_arenas_i_hpa_shard_npurge_passes) +CTL_PROTO(stats_arenas_i_hpa_shard_npurges) +CTL_PROTO(stats_arenas_i_hpa_shard_nhugifies) +CTL_PROTO(stats_arenas_i_hpa_shard_ndehugifies) + +/* We have a set of stats for full slabs. */ +CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_npageslabs_nonhuge) +CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_npageslabs_huge) +CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_nactive_nonhuge) +CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_nactive_huge) +CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_ndirty_nonhuge) +CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_ndirty_huge) + +/* A parallel set for the empty slabs. */ +CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_npageslabs_nonhuge) +CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_npageslabs_huge) +CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_nactive_nonhuge) +CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_nactive_huge) +CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_ndirty_nonhuge) +CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_ndirty_huge) + +/* + * And one for the slabs that are neither empty nor full, but indexed by how + * full they are. + */ +CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_nonhuge) +CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_huge) +CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_nonhuge) +CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_huge) +CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_nonhuge) +CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_huge) + +INDEX_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j) CTL_PROTO(stats_arenas_i_nthreads) CTL_PROTO(stats_arenas_i_uptime) CTL_PROTO(stats_arenas_i_dss) @@ -209,8 +292,10 @@ CTL_PROTO(stats_arenas_i_base) CTL_PROTO(stats_arenas_i_internal) CTL_PROTO(stats_arenas_i_metadata_thp) CTL_PROTO(stats_arenas_i_tcache_bytes) +CTL_PROTO(stats_arenas_i_tcache_stashed_bytes) CTL_PROTO(stats_arenas_i_resident) CTL_PROTO(stats_arenas_i_abandoned_vm) +CTL_PROTO(stats_arenas_i_hpa_sec_bytes) INDEX_PROTO(stats_arenas_i) CTL_PROTO(stats_allocated) CTL_PROTO(stats_active) @@ -222,12 +307,21 @@ CTL_PROTO(stats_metadata_thp) CTL_PROTO(stats_resident) CTL_PROTO(stats_mapped) CTL_PROTO(stats_retained) +CTL_PROTO(stats_zero_reallocs) CTL_PROTO(experimental_hooks_install) CTL_PROTO(experimental_hooks_remove) +CTL_PROTO(experimental_hooks_prof_backtrace) +CTL_PROTO(experimental_hooks_prof_dump) +CTL_PROTO(experimental_hooks_safety_check_abort) +CTL_PROTO(experimental_thread_activity_callback) CTL_PROTO(experimental_utilization_query) CTL_PROTO(experimental_utilization_batch_query) CTL_PROTO(experimental_arenas_i_pactivep) INDEX_PROTO(experimental_arenas_i) +CTL_PROTO(experimental_prof_recent_alloc_max) +CTL_PROTO(experimental_prof_recent_alloc_dump) +CTL_PROTO(experimental_batch_alloc) +CTL_PROTO(experimental_arenas_create_ext) #define MUTEX_STATS_CTL_PROTO_GEN(n) \ CTL_PROTO(stats_##n##_num_ops) \ @@ -275,6 +369,11 @@ static const ctl_named_node_t thread_tcache_node[] = { {NAME("flush"), CTL(thread_tcache_flush)} }; +static const ctl_named_node_t thread_peak_node[] = { + {NAME("read"), CTL(thread_peak_read)}, + {NAME("reset"), CTL(thread_peak_reset)}, +}; + static const ctl_named_node_t thread_prof_node[] = { {NAME("name"), CTL(thread_prof_name)}, {NAME("active"), CTL(thread_prof_active)} @@ -287,7 +386,9 @@ static const ctl_named_node_t thread_node[] = { {NAME("deallocated"), CTL(thread_deallocated)}, {NAME("deallocatedp"), CTL(thread_deallocatedp)}, {NAME("tcache"), CHILD(named, thread_tcache)}, - {NAME("prof"), CHILD(named, thread_prof)} + {NAME("peak"), CHILD(named, thread_peak)}, + {NAME("prof"), CHILD(named, thread_prof)}, + {NAME("idle"), CTL(thread_idle)} }; static const ctl_named_node_t config_node[] = { @@ -308,27 +409,60 @@ static const ctl_named_node_t config_node[] = { static const ctl_named_node_t opt_node[] = { {NAME("abort"), CTL(opt_abort)}, {NAME("abort_conf"), CTL(opt_abort_conf)}, + {NAME("cache_oblivious"), CTL(opt_cache_oblivious)}, + {NAME("trust_madvise"), CTL(opt_trust_madvise)}, {NAME("confirm_conf"), CTL(opt_confirm_conf)}, + {NAME("hpa"), CTL(opt_hpa)}, + {NAME("hpa_slab_max_alloc"), CTL(opt_hpa_slab_max_alloc)}, + {NAME("hpa_hugification_threshold"), + CTL(opt_hpa_hugification_threshold)}, + {NAME("hpa_hugify_delay_ms"), CTL(opt_hpa_hugify_delay_ms)}, + {NAME("hpa_min_purge_interval_ms"), CTL(opt_hpa_min_purge_interval_ms)}, + {NAME("hpa_dirty_mult"), CTL(opt_hpa_dirty_mult)}, + {NAME("hpa_sec_nshards"), CTL(opt_hpa_sec_nshards)}, + {NAME("hpa_sec_max_alloc"), CTL(opt_hpa_sec_max_alloc)}, + {NAME("hpa_sec_max_bytes"), CTL(opt_hpa_sec_max_bytes)}, + {NAME("hpa_sec_bytes_after_flush"), + CTL(opt_hpa_sec_bytes_after_flush)}, + {NAME("hpa_sec_batch_fill_extra"), + CTL(opt_hpa_sec_batch_fill_extra)}, {NAME("metadata_thp"), CTL(opt_metadata_thp)}, {NAME("retain"), CTL(opt_retain)}, {NAME("dss"), CTL(opt_dss)}, {NAME("narenas"), CTL(opt_narenas)}, {NAME("percpu_arena"), CTL(opt_percpu_arena)}, {NAME("oversize_threshold"), CTL(opt_oversize_threshold)}, + {NAME("mutex_max_spin"), CTL(opt_mutex_max_spin)}, {NAME("background_thread"), CTL(opt_background_thread)}, {NAME("max_background_threads"), CTL(opt_max_background_threads)}, {NAME("dirty_decay_ms"), CTL(opt_dirty_decay_ms)}, {NAME("muzzy_decay_ms"), CTL(opt_muzzy_decay_ms)}, {NAME("stats_print"), CTL(opt_stats_print)}, {NAME("stats_print_opts"), CTL(opt_stats_print_opts)}, + {NAME("stats_interval"), CTL(opt_stats_interval)}, + {NAME("stats_interval_opts"), CTL(opt_stats_interval_opts)}, {NAME("junk"), CTL(opt_junk)}, {NAME("zero"), CTL(opt_zero)}, {NAME("utrace"), CTL(opt_utrace)}, {NAME("xmalloc"), CTL(opt_xmalloc)}, + {NAME("experimental_infallible_new"), + CTL(opt_experimental_infallible_new)}, {NAME("tcache"), CTL(opt_tcache)}, + {NAME("tcache_max"), CTL(opt_tcache_max)}, + {NAME("tcache_nslots_small_min"), + CTL(opt_tcache_nslots_small_min)}, + {NAME("tcache_nslots_small_max"), + CTL(opt_tcache_nslots_small_max)}, + {NAME("tcache_nslots_large"), CTL(opt_tcache_nslots_large)}, + {NAME("lg_tcache_nslots_mul"), CTL(opt_lg_tcache_nslots_mul)}, + {NAME("tcache_gc_incr_bytes"), CTL(opt_tcache_gc_incr_bytes)}, + {NAME("tcache_gc_delay_bytes"), CTL(opt_tcache_gc_delay_bytes)}, + {NAME("lg_tcache_flush_small_div"), + CTL(opt_lg_tcache_flush_small_div)}, + {NAME("lg_tcache_flush_large_div"), + CTL(opt_lg_tcache_flush_large_div)}, {NAME("thp"), CTL(opt_thp)}, {NAME("lg_extent_max_active_fit"), CTL(opt_lg_extent_max_active_fit)}, - {NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)}, {NAME("prof"), CTL(opt_prof)}, {NAME("prof_prefix"), CTL(opt_prof_prefix)}, {NAME("prof_active"), CTL(opt_prof_active)}, @@ -338,7 +472,14 @@ static const ctl_named_node_t opt_node[] = { {NAME("prof_gdump"), CTL(opt_prof_gdump)}, {NAME("prof_final"), CTL(opt_prof_final)}, {NAME("prof_leak"), CTL(opt_prof_leak)}, - {NAME("prof_accum"), CTL(opt_prof_accum)} + {NAME("prof_leak_error"), CTL(opt_prof_leak_error)}, + {NAME("prof_accum"), CTL(opt_prof_accum)}, + {NAME("prof_recent_alloc_max"), CTL(opt_prof_recent_alloc_max)}, + {NAME("prof_stats"), CTL(opt_prof_stats)}, + {NAME("prof_sys_thread_name"), CTL(opt_prof_sys_thread_name)}, + {NAME("prof_time_resolution"), CTL(opt_prof_time_res)}, + {NAME("lg_san_uaf_align"), CTL(opt_lg_san_uaf_align)}, + {NAME("zero_realloc"), CTL(opt_zero_realloc)} }; static const ctl_named_node_t tcache_node[] = { @@ -354,6 +495,11 @@ static const ctl_named_node_t arena_i_node[] = { {NAME("reset"), CTL(arena_i_reset)}, {NAME("destroy"), CTL(arena_i_destroy)}, {NAME("dss"), CTL(arena_i_dss)}, + /* + * Undocumented for now, since we anticipate an arena API in flux after + * we cut the last 5-series release. + */ + {NAME("oversize_threshold"), CTL(arena_i_oversize_threshold)}, {NAME("dirty_decay_ms"), CTL(arena_i_dirty_decay_ms)}, {NAME("muzzy_decay_ms"), CTL(arena_i_muzzy_decay_ms)}, {NAME("extent_hooks"), CTL(arena_i_extent_hooks)}, @@ -408,17 +554,51 @@ static const ctl_named_node_t arenas_node[] = { {NAME("lookup"), CTL(arenas_lookup)} }; +static const ctl_named_node_t prof_stats_bins_i_node[] = { + {NAME("live"), CTL(prof_stats_bins_i_live)}, + {NAME("accum"), CTL(prof_stats_bins_i_accum)} +}; + +static const ctl_named_node_t super_prof_stats_bins_i_node[] = { + {NAME(""), CHILD(named, prof_stats_bins_i)} +}; + +static const ctl_indexed_node_t prof_stats_bins_node[] = { + {INDEX(prof_stats_bins_i)} +}; + +static const ctl_named_node_t prof_stats_lextents_i_node[] = { + {NAME("live"), CTL(prof_stats_lextents_i_live)}, + {NAME("accum"), CTL(prof_stats_lextents_i_accum)} +}; + +static const ctl_named_node_t super_prof_stats_lextents_i_node[] = { + {NAME(""), CHILD(named, prof_stats_lextents_i)} +}; + +static const ctl_indexed_node_t prof_stats_lextents_node[] = { + {INDEX(prof_stats_lextents_i)} +}; + +static const ctl_named_node_t prof_stats_node[] = { + {NAME("bins"), CHILD(indexed, prof_stats_bins)}, + {NAME("lextents"), CHILD(indexed, prof_stats_lextents)}, +}; + static const ctl_named_node_t prof_node[] = { {NAME("thread_active_init"), CTL(prof_thread_active_init)}, {NAME("active"), CTL(prof_active)}, {NAME("dump"), CTL(prof_dump)}, {NAME("gdump"), CTL(prof_gdump)}, + {NAME("prefix"), CTL(prof_prefix)}, {NAME("reset"), CTL(prof_reset)}, {NAME("interval"), CTL(prof_interval)}, {NAME("lg_sample"), CTL(lg_prof_sample)}, {NAME("log_start"), CTL(prof_log_start)}, - {NAME("log_stop"), CTL(prof_log_stop)} + {NAME("log_stop"), CTL(prof_log_stop)}, + {NAME("stats"), CHILD(named, prof_stats)} }; + static const ctl_named_node_t stats_arenas_i_small_node[] = { {NAME("allocated"), CTL(stats_arenas_i_small_allocated)}, {NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)}, @@ -521,6 +701,75 @@ MUTEX_PROF_ARENA_MUTEXES #undef OP }; +static const ctl_named_node_t stats_arenas_i_hpa_shard_full_slabs_node[] = { + {NAME("npageslabs_nonhuge"), + CTL(stats_arenas_i_hpa_shard_full_slabs_npageslabs_nonhuge)}, + {NAME("npageslabs_huge"), + CTL(stats_arenas_i_hpa_shard_full_slabs_npageslabs_huge)}, + {NAME("nactive_nonhuge"), + CTL(stats_arenas_i_hpa_shard_full_slabs_nactive_nonhuge)}, + {NAME("nactive_huge"), + CTL(stats_arenas_i_hpa_shard_full_slabs_nactive_huge)}, + {NAME("ndirty_nonhuge"), + CTL(stats_arenas_i_hpa_shard_full_slabs_ndirty_nonhuge)}, + {NAME("ndirty_huge"), + CTL(stats_arenas_i_hpa_shard_full_slabs_ndirty_huge)} +}; + +static const ctl_named_node_t stats_arenas_i_hpa_shard_empty_slabs_node[] = { + {NAME("npageslabs_nonhuge"), + CTL(stats_arenas_i_hpa_shard_empty_slabs_npageslabs_nonhuge)}, + {NAME("npageslabs_huge"), + CTL(stats_arenas_i_hpa_shard_empty_slabs_npageslabs_huge)}, + {NAME("nactive_nonhuge"), + CTL(stats_arenas_i_hpa_shard_empty_slabs_nactive_nonhuge)}, + {NAME("nactive_huge"), + CTL(stats_arenas_i_hpa_shard_empty_slabs_nactive_huge)}, + {NAME("ndirty_nonhuge"), + CTL(stats_arenas_i_hpa_shard_empty_slabs_ndirty_nonhuge)}, + {NAME("ndirty_huge"), + CTL(stats_arenas_i_hpa_shard_empty_slabs_ndirty_huge)} +}; + +static const ctl_named_node_t stats_arenas_i_hpa_shard_nonfull_slabs_j_node[] = { + {NAME("npageslabs_nonhuge"), + CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_nonhuge)}, + {NAME("npageslabs_huge"), + CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_huge)}, + {NAME("nactive_nonhuge"), + CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_nonhuge)}, + {NAME("nactive_huge"), + CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_huge)}, + {NAME("ndirty_nonhuge"), + CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_nonhuge)}, + {NAME("ndirty_huge"), + CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_huge)} +}; + +static const ctl_named_node_t super_stats_arenas_i_hpa_shard_nonfull_slabs_j_node[] = { + {NAME(""), + CHILD(named, stats_arenas_i_hpa_shard_nonfull_slabs_j)} +}; + +static const ctl_indexed_node_t stats_arenas_i_hpa_shard_nonfull_slabs_node[] = +{ + {INDEX(stats_arenas_i_hpa_shard_nonfull_slabs_j)} +}; + +static const ctl_named_node_t stats_arenas_i_hpa_shard_node[] = { + {NAME("full_slabs"), CHILD(named, + stats_arenas_i_hpa_shard_full_slabs)}, + {NAME("empty_slabs"), CHILD(named, + stats_arenas_i_hpa_shard_empty_slabs)}, + {NAME("nonfull_slabs"), CHILD(indexed, + stats_arenas_i_hpa_shard_nonfull_slabs)}, + + {NAME("npurge_passes"), CTL(stats_arenas_i_hpa_shard_npurge_passes)}, + {NAME("npurges"), CTL(stats_arenas_i_hpa_shard_npurges)}, + {NAME("nhugifies"), CTL(stats_arenas_i_hpa_shard_nhugifies)}, + {NAME("ndehugifies"), CTL(stats_arenas_i_hpa_shard_ndehugifies)} +}; + static const ctl_named_node_t stats_arenas_i_node[] = { {NAME("nthreads"), CTL(stats_arenas_i_nthreads)}, {NAME("uptime"), CTL(stats_arenas_i_uptime)}, @@ -543,14 +792,18 @@ static const ctl_named_node_t stats_arenas_i_node[] = { {NAME("internal"), CTL(stats_arenas_i_internal)}, {NAME("metadata_thp"), CTL(stats_arenas_i_metadata_thp)}, {NAME("tcache_bytes"), CTL(stats_arenas_i_tcache_bytes)}, + {NAME("tcache_stashed_bytes"), + CTL(stats_arenas_i_tcache_stashed_bytes)}, {NAME("resident"), CTL(stats_arenas_i_resident)}, {NAME("abandoned_vm"), CTL(stats_arenas_i_abandoned_vm)}, + {NAME("hpa_sec_bytes"), CTL(stats_arenas_i_hpa_sec_bytes)}, {NAME("small"), CHILD(named, stats_arenas_i_small)}, {NAME("large"), CHILD(named, stats_arenas_i_large)}, {NAME("bins"), CHILD(indexed, stats_arenas_i_bins)}, {NAME("lextents"), CHILD(indexed, stats_arenas_i_lextents)}, {NAME("extents"), CHILD(indexed, stats_arenas_i_extents)}, - {NAME("mutexes"), CHILD(named, stats_arenas_i_mutexes)} + {NAME("mutexes"), CHILD(named, stats_arenas_i_mutexes)}, + {NAME("hpa_shard"), CHILD(named, stats_arenas_i_hpa_shard)} }; static const ctl_named_node_t super_stats_arenas_i_node[] = { {NAME(""), CHILD(named, stats_arenas_i)} @@ -589,12 +842,21 @@ static const ctl_named_node_t stats_node[] = { {NAME("background_thread"), CHILD(named, stats_background_thread)}, {NAME("mutexes"), CHILD(named, stats_mutexes)}, - {NAME("arenas"), CHILD(indexed, stats_arenas)} + {NAME("arenas"), CHILD(indexed, stats_arenas)}, + {NAME("zero_reallocs"), CTL(stats_zero_reallocs)}, }; static const ctl_named_node_t experimental_hooks_node[] = { {NAME("install"), CTL(experimental_hooks_install)}, - {NAME("remove"), CTL(experimental_hooks_remove)} + {NAME("remove"), CTL(experimental_hooks_remove)}, + {NAME("prof_backtrace"), CTL(experimental_hooks_prof_backtrace)}, + {NAME("prof_dump"), CTL(experimental_hooks_prof_dump)}, + {NAME("safety_check_abort"), CTL(experimental_hooks_safety_check_abort)}, +}; + +static const ctl_named_node_t experimental_thread_node[] = { + {NAME("activity_callback"), + CTL(experimental_thread_activity_callback)} }; static const ctl_named_node_t experimental_utilization_node[] = { @@ -613,10 +875,19 @@ static const ctl_indexed_node_t experimental_arenas_node[] = { {INDEX(experimental_arenas_i)} }; +static const ctl_named_node_t experimental_prof_recent_node[] = { + {NAME("alloc_max"), CTL(experimental_prof_recent_alloc_max)}, + {NAME("alloc_dump"), CTL(experimental_prof_recent_alloc_dump)}, +}; + static const ctl_named_node_t experimental_node[] = { {NAME("hooks"), CHILD(named, experimental_hooks)}, {NAME("utilization"), CHILD(named, experimental_utilization)}, - {NAME("arenas"), CHILD(indexed, experimental_arenas)} + {NAME("arenas"), CHILD(indexed, experimental_arenas)}, + {NAME("arenas_create_ext"), CTL(experimental_arenas_create_ext)}, + {NAME("prof_recent"), CHILD(named, experimental_prof_recent)}, + {NAME("batch_alloc"), CTL(experimental_batch_alloc)}, + {NAME("thread"), CHILD(named, experimental_thread)} }; static const ctl_named_node_t root_node[] = { @@ -650,28 +921,13 @@ static const ctl_named_node_t super_root_node[] = { * synchronized by the ctl mutex. */ static void -ctl_accum_arena_stats_u64(arena_stats_u64_t *dst, arena_stats_u64_t *src) { -#ifdef JEMALLOC_ATOMIC_U64 - uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED); - uint64_t cur_src = atomic_load_u64(src, ATOMIC_RELAXED); - atomic_store_u64(dst, cur_dst + cur_src, ATOMIC_RELAXED); -#else - *dst += *src; -#endif -} - -/* Likewise: with ctl mutex synchronization, reading is simple. */ -static uint64_t -ctl_arena_stats_read_u64(arena_stats_u64_t *p) { -#ifdef JEMALLOC_ATOMIC_U64 - return atomic_load_u64(p, ATOMIC_RELAXED); -#else - return *p; -#endif +ctl_accum_locked_u64(locked_u64_t *dst, locked_u64_t *src) { + locked_inc_u64_unsynchronized(dst, + locked_read_u64_unsynchronized(src)); } static void -accum_atomic_zu(atomic_zu_t *dst, atomic_zu_t *src) { +ctl_accum_atomic_zu(atomic_zu_t *dst, atomic_zu_t *src) { size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED); size_t cur_src = atomic_load_zu(src, ATOMIC_RELAXED); atomic_store_zu(dst, cur_dst + cur_src, ATOMIC_RELAXED); @@ -783,11 +1039,15 @@ ctl_arena_clear(ctl_arena_t *ctl_arena) { ctl_arena->astats->nfills_small = 0; ctl_arena->astats->nflushes_small = 0; memset(ctl_arena->astats->bstats, 0, SC_NBINS * - sizeof(bin_stats_t)); + sizeof(bin_stats_data_t)); memset(ctl_arena->astats->lstats, 0, (SC_NSIZES - SC_NBINS) * sizeof(arena_stats_large_t)); memset(ctl_arena->astats->estats, 0, SC_NPSIZES * - sizeof(arena_stats_extents_t)); + sizeof(pac_estats_t)); + memset(&ctl_arena->astats->hpastats, 0, + sizeof(hpa_shard_stats_t)); + memset(&ctl_arena->astats->secstats, 0, + sizeof(sec_stats_t)); } } @@ -801,22 +1061,19 @@ ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_t *ctl_arena, arena_t *arena) { &ctl_arena->muzzy_decay_ms, &ctl_arena->pactive, &ctl_arena->pdirty, &ctl_arena->pmuzzy, &ctl_arena->astats->astats, ctl_arena->astats->bstats, - ctl_arena->astats->lstats, ctl_arena->astats->estats); + ctl_arena->astats->lstats, ctl_arena->astats->estats, + &ctl_arena->astats->hpastats, &ctl_arena->astats->secstats); for (i = 0; i < SC_NBINS; i++) { - ctl_arena->astats->allocated_small += - ctl_arena->astats->bstats[i].curregs * + bin_stats_t *bstats = + &ctl_arena->astats->bstats[i].stats_data; + ctl_arena->astats->allocated_small += bstats->curregs * sz_index2size(i); - ctl_arena->astats->nmalloc_small += - ctl_arena->astats->bstats[i].nmalloc; - ctl_arena->astats->ndalloc_small += - ctl_arena->astats->bstats[i].ndalloc; - ctl_arena->astats->nrequests_small += - ctl_arena->astats->bstats[i].nrequests; - ctl_arena->astats->nfills_small += - ctl_arena->astats->bstats[i].nfills; - ctl_arena->astats->nflushes_small += - ctl_arena->astats->bstats[i].nflushes; + ctl_arena->astats->nmalloc_small += bstats->nmalloc; + ctl_arena->astats->ndalloc_small += bstats->ndalloc; + ctl_arena->astats->nrequests_small += bstats->nrequests; + ctl_arena->astats->nfills_small += bstats->nfills; + ctl_arena->astats->nflushes_small += bstats->nflushes; } } else { arena_basic_stats_merge(tsdn, arena, &ctl_arena->nthreads, @@ -848,27 +1105,32 @@ ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena, ctl_arena_stats_t *astats = ctl_arena->astats; if (!destroyed) { - accum_atomic_zu(&sdstats->astats.mapped, - &astats->astats.mapped); - accum_atomic_zu(&sdstats->astats.retained, - &astats->astats.retained); - accum_atomic_zu(&sdstats->astats.extent_avail, - &astats->astats.extent_avail); - } - - ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.npurge, - &astats->astats.decay_dirty.npurge); - ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.nmadvise, - &astats->astats.decay_dirty.nmadvise); - ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.purged, - &astats->astats.decay_dirty.purged); - - ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.npurge, - &astats->astats.decay_muzzy.npurge); - ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.nmadvise, - &astats->astats.decay_muzzy.nmadvise); - ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.purged, - &astats->astats.decay_muzzy.purged); + sdstats->astats.mapped += astats->astats.mapped; + sdstats->astats.pa_shard_stats.pac_stats.retained + += astats->astats.pa_shard_stats.pac_stats.retained; + sdstats->astats.pa_shard_stats.edata_avail + += astats->astats.pa_shard_stats.edata_avail; + } + + ctl_accum_locked_u64( + &sdstats->astats.pa_shard_stats.pac_stats.decay_dirty.npurge, + &astats->astats.pa_shard_stats.pac_stats.decay_dirty.npurge); + ctl_accum_locked_u64( + &sdstats->astats.pa_shard_stats.pac_stats.decay_dirty.nmadvise, + &astats->astats.pa_shard_stats.pac_stats.decay_dirty.nmadvise); + ctl_accum_locked_u64( + &sdstats->astats.pa_shard_stats.pac_stats.decay_dirty.purged, + &astats->astats.pa_shard_stats.pac_stats.decay_dirty.purged); + + ctl_accum_locked_u64( + &sdstats->astats.pa_shard_stats.pac_stats.decay_muzzy.npurge, + &astats->astats.pa_shard_stats.pac_stats.decay_muzzy.npurge); + ctl_accum_locked_u64( + &sdstats->astats.pa_shard_stats.pac_stats.decay_muzzy.nmadvise, + &astats->astats.pa_shard_stats.pac_stats.decay_muzzy.nmadvise); + ctl_accum_locked_u64( + &sdstats->astats.pa_shard_stats.pac_stats.decay_muzzy.purged, + &astats->astats.pa_shard_stats.pac_stats.decay_muzzy.purged); #define OP(mtx) malloc_mutex_prof_merge( \ &(sdstats->astats.mutex_prof_data[ \ @@ -878,14 +1140,11 @@ ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena, MUTEX_PROF_ARENA_MUTEXES #undef OP if (!destroyed) { - accum_atomic_zu(&sdstats->astats.base, - &astats->astats.base); - accum_atomic_zu(&sdstats->astats.internal, + sdstats->astats.base += astats->astats.base; + sdstats->astats.resident += astats->astats.resident; + sdstats->astats.metadata_thp += astats->astats.metadata_thp; + ctl_accum_atomic_zu(&sdstats->astats.internal, &astats->astats.internal); - accum_atomic_zu(&sdstats->astats.resident, - &astats->astats.resident); - accum_atomic_zu(&sdstats->astats.metadata_thp, - &astats->astats.metadata_thp); } else { assert(atomic_load_zu( &astats->astats.internal, ATOMIC_RELAXED) == 0); @@ -903,23 +1162,23 @@ MUTEX_PROF_ARENA_MUTEXES sdstats->nflushes_small += astats->nflushes_small; if (!destroyed) { - accum_atomic_zu(&sdstats->astats.allocated_large, - &astats->astats.allocated_large); + sdstats->astats.allocated_large += + astats->astats.allocated_large; } else { - assert(atomic_load_zu(&astats->astats.allocated_large, - ATOMIC_RELAXED) == 0); + assert(astats->astats.allocated_large == 0); } - ctl_accum_arena_stats_u64(&sdstats->astats.nmalloc_large, - &astats->astats.nmalloc_large); - ctl_accum_arena_stats_u64(&sdstats->astats.ndalloc_large, - &astats->astats.ndalloc_large); - ctl_accum_arena_stats_u64(&sdstats->astats.nrequests_large, - &astats->astats.nrequests_large); - accum_atomic_zu(&sdstats->astats.abandoned_vm, - &astats->astats.abandoned_vm); - - accum_atomic_zu(&sdstats->astats.tcache_bytes, - &astats->astats.tcache_bytes); + sdstats->astats.nmalloc_large += astats->astats.nmalloc_large; + sdstats->astats.ndalloc_large += astats->astats.ndalloc_large; + sdstats->astats.nrequests_large + += astats->astats.nrequests_large; + sdstats->astats.nflushes_large += astats->astats.nflushes_large; + ctl_accum_atomic_zu( + &sdstats->astats.pa_shard_stats.pac_stats.abandoned_vm, + &astats->astats.pa_shard_stats.pac_stats.abandoned_vm); + + sdstats->astats.tcache_bytes += astats->astats.tcache_bytes; + sdstats->astats.tcache_stashed_bytes += + astats->astats.tcache_stashed_bytes; if (ctl_arena->arena_ind == 0) { sdstats->astats.uptime = astats->astats.uptime; @@ -927,29 +1186,26 @@ MUTEX_PROF_ARENA_MUTEXES /* Merge bin stats. */ for (i = 0; i < SC_NBINS; i++) { - sdstats->bstats[i].nmalloc += astats->bstats[i].nmalloc; - sdstats->bstats[i].ndalloc += astats->bstats[i].ndalloc; - sdstats->bstats[i].nrequests += - astats->bstats[i].nrequests; + bin_stats_t *bstats = &astats->bstats[i].stats_data; + bin_stats_t *merged = &sdstats->bstats[i].stats_data; + merged->nmalloc += bstats->nmalloc; + merged->ndalloc += bstats->ndalloc; + merged->nrequests += bstats->nrequests; if (!destroyed) { - sdstats->bstats[i].curregs += - astats->bstats[i].curregs; + merged->curregs += bstats->curregs; } else { - assert(astats->bstats[i].curregs == 0); + assert(bstats->curregs == 0); } - sdstats->bstats[i].nfills += astats->bstats[i].nfills; - sdstats->bstats[i].nflushes += - astats->bstats[i].nflushes; - sdstats->bstats[i].nslabs += astats->bstats[i].nslabs; - sdstats->bstats[i].reslabs += astats->bstats[i].reslabs; + merged->nfills += bstats->nfills; + merged->nflushes += bstats->nflushes; + merged->nslabs += bstats->nslabs; + merged->reslabs += bstats->reslabs; if (!destroyed) { - sdstats->bstats[i].curslabs += - astats->bstats[i].curslabs; - sdstats->bstats[i].nonfull_slabs += - astats->bstats[i].nonfull_slabs; + merged->curslabs += bstats->curslabs; + merged->nonfull_slabs += bstats->nonfull_slabs; } else { - assert(astats->bstats[i].curslabs == 0); - assert(astats->bstats[i].nonfull_slabs == 0); + assert(bstats->curslabs == 0); + assert(bstats->nonfull_slabs == 0); } malloc_mutex_prof_merge(&sdstats->bstats[i].mutex_data, &astats->bstats[i].mutex_data); @@ -957,11 +1213,11 @@ MUTEX_PROF_ARENA_MUTEXES /* Merge stats for large allocations. */ for (i = 0; i < SC_NSIZES - SC_NBINS; i++) { - ctl_accum_arena_stats_u64(&sdstats->lstats[i].nmalloc, + ctl_accum_locked_u64(&sdstats->lstats[i].nmalloc, &astats->lstats[i].nmalloc); - ctl_accum_arena_stats_u64(&sdstats->lstats[i].ndalloc, + ctl_accum_locked_u64(&sdstats->lstats[i].ndalloc, &astats->lstats[i].ndalloc); - ctl_accum_arena_stats_u64(&sdstats->lstats[i].nrequests, + ctl_accum_locked_u64(&sdstats->lstats[i].nrequests, &astats->lstats[i].nrequests); if (!destroyed) { sdstats->lstats[i].curlextents += @@ -973,19 +1229,21 @@ MUTEX_PROF_ARENA_MUTEXES /* Merge extents stats. */ for (i = 0; i < SC_NPSIZES; i++) { - accum_atomic_zu(&sdstats->estats[i].ndirty, - &astats->estats[i].ndirty); - accum_atomic_zu(&sdstats->estats[i].nmuzzy, - &astats->estats[i].nmuzzy); - accum_atomic_zu(&sdstats->estats[i].nretained, - &astats->estats[i].nretained); - accum_atomic_zu(&sdstats->estats[i].dirty_bytes, - &astats->estats[i].dirty_bytes); - accum_atomic_zu(&sdstats->estats[i].muzzy_bytes, - &astats->estats[i].muzzy_bytes); - accum_atomic_zu(&sdstats->estats[i].retained_bytes, - &astats->estats[i].retained_bytes); + sdstats->estats[i].ndirty += astats->estats[i].ndirty; + sdstats->estats[i].nmuzzy += astats->estats[i].nmuzzy; + sdstats->estats[i].nretained + += astats->estats[i].nretained; + sdstats->estats[i].dirty_bytes + += astats->estats[i].dirty_bytes; + sdstats->estats[i].muzzy_bytes + += astats->estats[i].muzzy_bytes; + sdstats->estats[i].retained_bytes + += astats->estats[i].retained_bytes; } + + /* Merge HPA stats. */ + hpa_shard_stats_accum(&sdstats->hpastats, &astats->hpastats); + sec_stats_accum(&sdstats->secstats, &astats->secstats); } } @@ -1001,7 +1259,7 @@ ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, ctl_arena_t *ctl_sdarena, } static unsigned -ctl_arena_init(tsd_t *tsd, extent_hooks_t *extent_hooks) { +ctl_arena_init(tsd_t *tsd, const arena_config_t *config) { unsigned arena_ind; ctl_arena_t *ctl_arena; @@ -1019,7 +1277,7 @@ ctl_arena_init(tsd_t *tsd, extent_hooks_t *extent_hooks) { } /* Initialize new arena. */ - if (arena_init(tsd_tsdn(tsd), arena_ind, extent_hooks) == NULL) { + if (arena_init(tsd_tsdn(tsd), arena_ind, config) == NULL) { return UINT_MAX; } @@ -1036,8 +1294,11 @@ ctl_background_thread_stats_read(tsdn_t *tsdn) { if (!have_background_thread || background_thread_stats_read(tsdn, stats)) { memset(stats, 0, sizeof(background_thread_stats_t)); - nstime_init(&stats->run_interval, 0); + nstime_init_zero(&stats->run_interval); } + malloc_mutex_prof_copy( + &ctl_stats->mutex_prof_data[global_prof_mutex_max_per_bg_thd], + &stats->max_counter_per_bg_thd); } static void @@ -1069,21 +1330,17 @@ ctl_refresh(tsdn_t *tsdn) { if (config_stats) { ctl_stats->allocated = ctl_sarena->astats->allocated_small + - atomic_load_zu(&ctl_sarena->astats->astats.allocated_large, - ATOMIC_RELAXED); + ctl_sarena->astats->astats.allocated_large; ctl_stats->active = (ctl_sarena->pactive << LG_PAGE); - ctl_stats->metadata = atomic_load_zu( - &ctl_sarena->astats->astats.base, ATOMIC_RELAXED) + + ctl_stats->metadata = ctl_sarena->astats->astats.base + atomic_load_zu(&ctl_sarena->astats->astats.internal, ATOMIC_RELAXED); - ctl_stats->metadata_thp = atomic_load_zu( - &ctl_sarena->astats->astats.metadata_thp, ATOMIC_RELAXED); - ctl_stats->resident = atomic_load_zu( - &ctl_sarena->astats->astats.resident, ATOMIC_RELAXED); - ctl_stats->mapped = atomic_load_zu( - &ctl_sarena->astats->astats.mapped, ATOMIC_RELAXED); - ctl_stats->retained = atomic_load_zu( - &ctl_sarena->astats->astats.retained, ATOMIC_RELAXED); + ctl_stats->resident = ctl_sarena->astats->astats.resident; + ctl_stats->metadata_thp = + ctl_sarena->astats->astats.metadata_thp; + ctl_stats->mapped = ctl_sarena->astats->astats.mapped; + ctl_stats->retained = ctl_sarena->astats->astats + .pa_shard_stats.pac_stats.retained; ctl_background_thread_stats_read(tsdn); @@ -1093,8 +1350,20 @@ ctl_refresh(tsdn_t *tsdn) { malloc_mutex_unlock(tsdn, &mtx); if (config_prof && opt_prof) { - READ_GLOBAL_MUTEX_PROF_DATA(global_prof_mutex_prof, - bt2gctx_mtx); + READ_GLOBAL_MUTEX_PROF_DATA( + global_prof_mutex_prof, bt2gctx_mtx); + READ_GLOBAL_MUTEX_PROF_DATA( + global_prof_mutex_prof_thds_data, tdatas_mtx); + READ_GLOBAL_MUTEX_PROF_DATA( + global_prof_mutex_prof_dump, prof_dump_mtx); + READ_GLOBAL_MUTEX_PROF_DATA( + global_prof_mutex_prof_recent_alloc, + prof_recent_alloc_mtx); + READ_GLOBAL_MUTEX_PROF_DATA( + global_prof_mutex_prof_recent_dump, + prof_recent_dump_mtx); + READ_GLOBAL_MUTEX_PROF_DATA( + global_prof_mutex_prof_stats, prof_stats_mtx); } if (have_background_thread) { READ_GLOBAL_MUTEX_PROF_DATA( @@ -1191,8 +1460,9 @@ ctl_init(tsd_t *tsd) { } static int -ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp, - size_t *mibp, size_t *depthp) { +ctl_lookup(tsdn_t *tsdn, const ctl_named_node_t *starting_node, + const char *name, const ctl_named_node_t **ending_nodep, size_t *mibp, + size_t *depthp) { int ret; const char *elm, *tdot, *dot; size_t elen, i, j; @@ -1206,7 +1476,7 @@ ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp, ret = ENOENT; goto label_return; } - node = super_root_node; + node = starting_node; for (i = 0; i < *depthp; i++) { assert(node); assert(node->nchildren > 0); @@ -1220,10 +1490,6 @@ ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp, if (strlen(child->name) == elen && strncmp(elm, child->name, elen) == 0) { node = child; - if (nodesp != NULL) { - nodesp[i] = - (const ctl_node_t *)node; - } mibp[i] = j; break; } @@ -1250,13 +1516,11 @@ ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp, goto label_return; } - if (nodesp != NULL) { - nodesp[i] = (const ctl_node_t *)node; - } mibp[i] = (size_t)index; } - if (node->ctl != NULL) { + /* Reached the end? */ + if (node->ctl != NULL || *dot == '\0') { /* Terminal node. */ if (*dot != '\0') { /* @@ -1272,16 +1536,14 @@ ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp, } /* Update elm. */ - if (*dot == '\0') { - /* No more elements. */ - ret = ENOENT; - goto label_return; - } elm = &dot[1]; dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : strchr(elm, '\0'); elen = (size_t)((uintptr_t)dot - (uintptr_t)elm); } + if (ending_nodep != NULL) { + *ending_nodep = node; + } ret = 0; label_return: @@ -1293,7 +1555,6 @@ ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; size_t depth; - ctl_node_t const *nodes[CTL_MAX_DEPTH]; size_t mib[CTL_MAX_DEPTH]; const ctl_named_node_t *node; @@ -1303,12 +1564,12 @@ ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp, } depth = CTL_MAX_DEPTH; - ret = ctl_lookup(tsd_tsdn(tsd), name, nodes, mib, &depth); + ret = ctl_lookup(tsd_tsdn(tsd), super_root_node, name, &node, mib, + &depth); if (ret != 0) { goto label_return; } - node = ctl_named_node(nodes[depth-1]); if (node != NULL && node->ctl) { ret = node->ctl(tsd, mib, depth, oldp, oldlenp, newp, newlen); } else { @@ -1329,26 +1590,19 @@ ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp) { goto label_return; } - ret = ctl_lookup(tsd_tsdn(tsd), name, NULL, mibp, miblenp); + ret = ctl_lookup(tsd_tsdn(tsd), super_root_node, name, NULL, mibp, + miblenp); label_return: return(ret); } -int -ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) { +static int +ctl_lookupbymib(tsdn_t *tsdn, const ctl_named_node_t **ending_nodep, + const size_t *mib, size_t miblen) { int ret; - const ctl_named_node_t *node; - size_t i; - - if (!ctl_initialized && ctl_init(tsd)) { - ret = EAGAIN; - goto label_return; - } - /* Iterate down the tree. */ - node = super_root_node; - for (i = 0; i < miblen; i++) { + const ctl_named_node_t *node = super_root_node; + for (size_t i = 0; i < miblen; i++) { assert(node); assert(node->nchildren > 0); if (ctl_named_node(node->children) != NULL) { @@ -1363,13 +1617,36 @@ ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, /* Indexed element. */ inode = ctl_indexed_node(node->children); - node = inode->index(tsd_tsdn(tsd), mib, miblen, mib[i]); + node = inode->index(tsdn, mib, miblen, mib[i]); if (node == NULL) { ret = ENOENT; goto label_return; } } } + assert(ending_nodep != NULL); + *ending_nodep = node; + ret = 0; + +label_return: + return(ret); +} + +int +ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + const ctl_named_node_t *node; + + if (!ctl_initialized && ctl_init(tsd)) { + ret = EAGAIN; + goto label_return; + } + + ret = ctl_lookupbymib(tsd_tsdn(tsd), &node, mib, miblen); + if (ret != 0) { + goto label_return; + } /* Call the ctl function. */ if (node && node->ctl) { @@ -1383,6 +1660,81 @@ ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, return(ret); } +int +ctl_mibnametomib(tsd_t *tsd, size_t *mib, size_t miblen, const char *name, + size_t *miblenp) { + int ret; + const ctl_named_node_t *node; + + if (!ctl_initialized && ctl_init(tsd)) { + ret = EAGAIN; + goto label_return; + } + + ret = ctl_lookupbymib(tsd_tsdn(tsd), &node, mib, miblen); + if (ret != 0) { + goto label_return; + } + if (node == NULL || node->ctl != NULL) { + ret = ENOENT; + goto label_return; + } + + assert(miblenp != NULL); + assert(*miblenp >= miblen); + *miblenp -= miblen; + ret = ctl_lookup(tsd_tsdn(tsd), node, name, NULL, mib + miblen, + miblenp); + *miblenp += miblen; +label_return: + return(ret); +} + +int +ctl_bymibname(tsd_t *tsd, size_t *mib, size_t miblen, const char *name, + size_t *miblenp, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + const ctl_named_node_t *node; + + if (!ctl_initialized && ctl_init(tsd)) { + ret = EAGAIN; + goto label_return; + } + + ret = ctl_lookupbymib(tsd_tsdn(tsd), &node, mib, miblen); + if (ret != 0) { + goto label_return; + } + if (node == NULL || node->ctl != NULL) { + ret = ENOENT; + goto label_return; + } + + assert(miblenp != NULL); + assert(*miblenp >= miblen); + *miblenp -= miblen; + /* + * The same node supplies the starting node and stores the ending node. + */ + ret = ctl_lookup(tsd_tsdn(tsd), node, name, &node, mib + miblen, + miblenp); + *miblenp += miblen; + if (ret != 0) { + goto label_return; + } + + if (node != NULL && node->ctl) { + ret = node->ctl(tsd, mib, *miblenp, oldp, oldlenp, newp, + newlen); + } else { + /* The name refers to a partial path through the ctl tree. */ + ret = ENOENT; + } + +label_return: + return(ret); +} + bool ctl_boot(void) { if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL, @@ -1410,6 +1762,11 @@ ctl_postfork_child(tsdn_t *tsdn) { malloc_mutex_postfork_child(tsdn, &ctl_mtx); } +void +ctl_mtx_assert_held(tsdn_t *tsdn) { + malloc_mutex_assert_owner(tsdn, &ctl_mtx); +} + /******************************************************************************/ /* *_ctl() functions. */ @@ -1427,6 +1784,7 @@ ctl_postfork_child(tsdn_t *tsdn) { } \ } while (0) +/* Can read or write, but not both. */ #define READ_XOR_WRITE() do { \ if ((oldp != NULL && oldlenp != NULL) && (newp != NULL || \ newlen != 0)) { \ @@ -1435,12 +1793,31 @@ ctl_postfork_child(tsdn_t *tsdn) { } \ } while (0) +/* Can neither read nor write. */ +#define NEITHER_READ_NOR_WRITE() do { \ + if (oldp != NULL || oldlenp != NULL || newp != NULL || \ + newlen != 0) { \ + ret = EPERM; \ + goto label_return; \ + } \ +} while (0) + +/* Verify that the space provided is enough. */ +#define VERIFY_READ(t) do { \ + if (oldp == NULL || oldlenp == NULL || *oldlenp != sizeof(t)) { \ + *oldlenp = 0; \ + ret = EINVAL; \ + goto label_return; \ + } \ +} while (0) + #define READ(v, t) do { \ if (oldp != NULL && oldlenp != NULL) { \ if (*oldlenp != sizeof(t)) { \ size_t copylen = (sizeof(t) <= *oldlenp) \ ? sizeof(t) : *oldlenp; \ memcpy(oldp, (void *)&(v), copylen); \ + *oldlenp = copylen; \ ret = EINVAL; \ goto label_return; \ } \ @@ -1458,6 +1835,14 @@ ctl_postfork_child(tsdn_t *tsdn) { } \ } while (0) +#define ASSURED_WRITE(v, t) do { \ + if (newp == NULL || newlen != sizeof(t)) { \ + ret = EINVAL; \ + goto label_return; \ + } \ + (v) = *(t *)newp; \ +} while (0) + #define MIB_UNSIGNED(v, i) do { \ if (mib[i] > UINT_MAX) { \ ret = EFAULT; \ @@ -1497,8 +1882,8 @@ label_return: \ #define CTL_RO_CGEN(c, n, v, t) \ static int \ -n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ - void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \ int ret; \ t oldval; \ \ @@ -1540,8 +1925,8 @@ label_return: \ */ #define CTL_RO_NL_CGEN(c, n, v, t) \ static int \ -n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ - void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \ int ret; \ t oldval; \ \ @@ -1559,8 +1944,8 @@ label_return: \ #define CTL_RO_NL_GEN(n, v, t) \ static int \ -n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ - void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \ int ret; \ t oldval; \ \ @@ -1573,29 +1958,10 @@ label_return: \ return ret; \ } -#define CTL_TSD_RO_NL_CGEN(c, n, m, t) \ -static int \ -n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ - size_t *oldlenp, void *newp, size_t newlen) { \ - int ret; \ - t oldval; \ - \ - if (!(c)) { \ - return ENOENT; \ - } \ - READONLY(); \ - oldval = (m(tsd)); \ - READ(oldval, t); \ - \ - ret = 0; \ -label_return: \ - return ret; \ -} - #define CTL_RO_CONFIG_GEN(n, t) \ static int \ -n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ - void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \ int ret; \ t oldval; \ \ @@ -1761,7 +2127,34 @@ CTL_RO_CONFIG_GEN(config_xmalloc, bool) CTL_RO_NL_GEN(opt_abort, opt_abort, bool) CTL_RO_NL_GEN(opt_abort_conf, opt_abort_conf, bool) +CTL_RO_NL_GEN(opt_cache_oblivious, opt_cache_oblivious, bool) +CTL_RO_NL_GEN(opt_trust_madvise, opt_trust_madvise, bool) CTL_RO_NL_GEN(opt_confirm_conf, opt_confirm_conf, bool) + +/* HPA options. */ +CTL_RO_NL_GEN(opt_hpa, opt_hpa, bool) +CTL_RO_NL_GEN(opt_hpa_hugification_threshold, + opt_hpa_opts.hugification_threshold, size_t) +CTL_RO_NL_GEN(opt_hpa_hugify_delay_ms, opt_hpa_opts.hugify_delay_ms, uint64_t) +CTL_RO_NL_GEN(opt_hpa_min_purge_interval_ms, opt_hpa_opts.min_purge_interval_ms, + uint64_t) + +/* + * This will have to change before we publicly document this option; fxp_t and + * its representation are internal implementation details. + */ +CTL_RO_NL_GEN(opt_hpa_dirty_mult, opt_hpa_opts.dirty_mult, fxp_t) +CTL_RO_NL_GEN(opt_hpa_slab_max_alloc, opt_hpa_opts.slab_max_alloc, size_t) + +/* HPA SEC options */ +CTL_RO_NL_GEN(opt_hpa_sec_nshards, opt_hpa_sec_opts.nshards, size_t) +CTL_RO_NL_GEN(opt_hpa_sec_max_alloc, opt_hpa_sec_opts.max_alloc, size_t) +CTL_RO_NL_GEN(opt_hpa_sec_max_bytes, opt_hpa_sec_opts.max_bytes, size_t) +CTL_RO_NL_GEN(opt_hpa_sec_bytes_after_flush, opt_hpa_sec_opts.bytes_after_flush, + size_t) +CTL_RO_NL_GEN(opt_hpa_sec_batch_fill_extra, opt_hpa_sec_opts.batch_fill_extra, + size_t) + CTL_RO_NL_GEN(opt_metadata_thp, metadata_thp_mode_names[opt_metadata_thp], const char *) CTL_RO_NL_GEN(opt_retain, opt_retain, bool) @@ -1769,6 +2162,7 @@ CTL_RO_NL_GEN(opt_dss, opt_dss, const char *) CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned) CTL_RO_NL_GEN(opt_percpu_arena, percpu_arena_mode_names[opt_percpu_arena], const char *) +CTL_RO_NL_GEN(opt_mutex_max_spin, opt_mutex_max_spin, int64_t) CTL_RO_NL_GEN(opt_oversize_threshold, opt_oversize_threshold, size_t) CTL_RO_NL_GEN(opt_background_thread, opt_background_thread, bool) CTL_RO_NL_GEN(opt_max_background_threads, opt_max_background_threads, size_t) @@ -1776,15 +2170,31 @@ CTL_RO_NL_GEN(opt_dirty_decay_ms, opt_dirty_decay_ms, ssize_t) CTL_RO_NL_GEN(opt_muzzy_decay_ms, opt_muzzy_decay_ms, ssize_t) CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool) CTL_RO_NL_GEN(opt_stats_print_opts, opt_stats_print_opts, const char *) +CTL_RO_NL_GEN(opt_stats_interval, opt_stats_interval, int64_t) +CTL_RO_NL_GEN(opt_stats_interval_opts, opt_stats_interval_opts, const char *) CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *) CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool) CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool) CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool) +CTL_RO_NL_CGEN(config_enable_cxx, opt_experimental_infallible_new, + opt_experimental_infallible_new, bool) CTL_RO_NL_GEN(opt_tcache, opt_tcache, bool) +CTL_RO_NL_GEN(opt_tcache_max, opt_tcache_max, size_t) +CTL_RO_NL_GEN(opt_tcache_nslots_small_min, opt_tcache_nslots_small_min, + unsigned) +CTL_RO_NL_GEN(opt_tcache_nslots_small_max, opt_tcache_nslots_small_max, + unsigned) +CTL_RO_NL_GEN(opt_tcache_nslots_large, opt_tcache_nslots_large, unsigned) +CTL_RO_NL_GEN(opt_lg_tcache_nslots_mul, opt_lg_tcache_nslots_mul, ssize_t) +CTL_RO_NL_GEN(opt_tcache_gc_incr_bytes, opt_tcache_gc_incr_bytes, size_t) +CTL_RO_NL_GEN(opt_tcache_gc_delay_bytes, opt_tcache_gc_delay_bytes, size_t) +CTL_RO_NL_GEN(opt_lg_tcache_flush_small_div, opt_lg_tcache_flush_small_div, + unsigned) +CTL_RO_NL_GEN(opt_lg_tcache_flush_large_div, opt_lg_tcache_flush_large_div, + unsigned) CTL_RO_NL_GEN(opt_thp, thp_mode_names[opt_thp], const char *) CTL_RO_NL_GEN(opt_lg_extent_max_active_fit, opt_lg_extent_max_active_fit, size_t) -CTL_RO_NL_GEN(opt_lg_tcache_max, opt_lg_tcache_max, ssize_t) CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool) CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *) CTL_RO_NL_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) @@ -1796,6 +2206,18 @@ CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t) CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool) CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool) CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool) +CTL_RO_NL_CGEN(config_prof, opt_prof_leak_error, opt_prof_leak_error, bool) +CTL_RO_NL_CGEN(config_prof, opt_prof_recent_alloc_max, + opt_prof_recent_alloc_max, ssize_t) +CTL_RO_NL_CGEN(config_prof, opt_prof_stats, opt_prof_stats, bool) +CTL_RO_NL_CGEN(config_prof, opt_prof_sys_thread_name, opt_prof_sys_thread_name, + bool) +CTL_RO_NL_CGEN(config_prof, opt_prof_time_res, + prof_time_res_mode_names[opt_prof_time_res], const char *) +CTL_RO_NL_CGEN(config_uaf_detection, opt_lg_san_uaf_align, + opt_lg_san_uaf_align, ssize_t) +CTL_RO_NL_GEN(opt_zero_realloc, + zero_realloc_mode_names[opt_zero_realloc_action], const char *) /******************************************************************************/ @@ -1843,10 +2265,11 @@ thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, goto label_return; } /* Set new arena/tcache associations. */ - arena_migrate(tsd, oldind, newind); + arena_migrate(tsd, oldarena, newarena); if (tcache_available(tsd)) { tcache_arena_reassociate(tsd_tsdn(tsd), - tsd_tcachep_get(tsd), newarena); + tsd_tcache_slowp_get(tsd), tsd_tcachep_get(tsd), + newarena); } } @@ -1855,14 +2278,10 @@ thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, return ret; } -CTL_TSD_RO_NL_CGEN(config_stats, thread_allocated, tsd_thread_allocated_get, - uint64_t) -CTL_TSD_RO_NL_CGEN(config_stats, thread_allocatedp, tsd_thread_allocatedp_get, - uint64_t *) -CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocated, tsd_thread_deallocated_get, - uint64_t) -CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp, - tsd_thread_deallocatedp_get, uint64_t *) +CTL_RO_NL_GEN(thread_allocated, tsd_thread_allocated_get(tsd), uint64_t) +CTL_RO_NL_GEN(thread_allocatedp, tsd_thread_allocatedp_get(tsd), uint64_t *) +CTL_RO_NL_GEN(thread_deallocated, tsd_thread_deallocated_get(tsd), uint64_t) +CTL_RO_NL_GEN(thread_deallocatedp, tsd_thread_deallocatedp_get(tsd), uint64_t *) static int thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib, @@ -1897,8 +2316,7 @@ thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib, goto label_return; } - READONLY(); - WRITEONLY(); + NEITHER_READ_NOR_WRITE(); tcache_flush(tsd); @@ -1907,13 +2325,45 @@ thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib, return ret; } +static int +thread_peak_read_ctl(tsd_t *tsd, const size_t *mib, + size_t miblen, void *oldp, size_t *oldlenp, void *newp, + size_t newlen) { + int ret; + if (!config_stats) { + return ENOENT; + } + READONLY(); + peak_event_update(tsd); + uint64_t result = peak_event_max(tsd); + READ(result, uint64_t); + ret = 0; +label_return: + return ret; +} + +static int +thread_peak_reset_ctl(tsd_t *tsd, const size_t *mib, + size_t miblen, void *oldp, size_t *oldlenp, void *newp, + size_t newlen) { + int ret; + if (!config_stats) { + return ENOENT; + } + NEITHER_READ_NOR_WRITE(); + peak_event_zero(tsd); + ret = 0; +label_return: + return ret; +} + static int thread_prof_name_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; - if (!config_prof) { + if (!config_prof || !opt_prof) { return ENOENT; } @@ -1950,8 +2400,12 @@ thread_prof_active_ctl(tsd_t *tsd, const size_t *mib, return ENOENT; } - oldval = prof_thread_active_get(tsd); + oldval = opt_prof ? prof_thread_active_get(tsd) : false; if (newp != NULL) { + if (!opt_prof) { + ret = ENOENT; + goto label_return; + } if (newlen != sizeof(bool)) { ret = EINVAL; goto label_return; @@ -1968,6 +2422,39 @@ thread_prof_active_ctl(tsd_t *tsd, const size_t *mib, return ret; } +static int +thread_idle_ctl(tsd_t *tsd, const size_t *mib, + size_t miblen, void *oldp, size_t *oldlenp, void *newp, + size_t newlen) { + int ret; + + NEITHER_READ_NOR_WRITE(); + + if (tcache_available(tsd)) { + tcache_flush(tsd); + } + /* + * This heuristic is perhaps not the most well-considered. But it + * matches the only idling policy we have experience with in the status + * quo. Over time we should investigate more principled approaches. + */ + if (opt_narenas > ncpus * 2) { + arena_t *arena = arena_choose(tsd, NULL); + if (arena != NULL) { + arena_decay(tsd_tsdn(tsd), arena, false, true); + } + /* + * The missing arena case is not actually an error; a thread + * might be idle before it associates itself to one. This is + * unusual, but not wrong. + */ + } + + ret = 0; +label_return: + return ret; +} + /******************************************************************************/ static int @@ -1977,7 +2464,8 @@ tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, unsigned tcache_ind; READONLY(); - if (tcaches_create(tsd, &tcache_ind)) { + VERIFY_READ(unsigned); + if (tcaches_create(tsd, b0get(), &tcache_ind)) { ret = EFAULT; goto label_return; } @@ -1995,12 +2483,7 @@ tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, unsigned tcache_ind; WRITEONLY(); - tcache_ind = UINT_MAX; - WRITE(tcache_ind, unsigned); - if (tcache_ind == UINT_MAX) { - ret = EFAULT; - goto label_return; - } + ASSURED_WRITE(tcache_ind, unsigned); tcaches_flush(tsd, tcache_ind); ret = 0; @@ -2015,12 +2498,7 @@ tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, unsigned tcache_ind; WRITEONLY(); - tcache_ind = UINT_MAX; - WRITE(tcache_ind, unsigned); - if (tcache_ind == UINT_MAX) { - ret = EFAULT; - goto label_return; - } + ASSURED_WRITE(tcache_ind, unsigned); tcaches_destroy(tsd, tcache_ind); ret = 0; @@ -2105,8 +2583,7 @@ arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, int ret; unsigned arena_ind; - READONLY(); - WRITEONLY(); + NEITHER_READ_NOR_WRITE(); MIB_UNSIGNED(arena_ind, 1); arena_i_decay(tsd_tsdn(tsd), arena_ind, false); @@ -2121,8 +2598,7 @@ arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, int ret; unsigned arena_ind; - READONLY(); - WRITEONLY(); + NEITHER_READ_NOR_WRITE(); MIB_UNSIGNED(arena_ind, 1); arena_i_decay(tsd_tsdn(tsd), arena_ind, true); @@ -2137,8 +2613,7 @@ arena_i_reset_destroy_helper(tsd_t *tsd, const size_t *mib, size_t miblen, arena_t **arena) { int ret; - READONLY(); - WRITEONLY(); + NEITHER_READ_NOR_WRITE(); MIB_UNSIGNED(*arena_ind, 1); *arena = arena_get(tsd_tsdn(tsd), *arena_ind, false); @@ -2211,6 +2686,8 @@ arena_i_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, arena_t *arena; ctl_arena_t *ctl_darena, *ctl_arena; + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); + ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp, newp, newlen, &arena_ind, &arena); if (ret != 0) { @@ -2241,6 +2718,8 @@ arena_i_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, assert(ret == 0); label_return: + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + return ret; } @@ -2306,8 +2785,40 @@ arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, } static int -arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen, - void *oldp, size_t *oldlenp, void *newp, size_t newlen, bool dirty) { +arena_i_oversize_threshold_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + + unsigned arena_ind; + MIB_UNSIGNED(arena_ind, 1); + + arena_t *arena = arena_get(tsd_tsdn(tsd), arena_ind, false); + if (arena == NULL) { + ret = EFAULT; + goto label_return; + } + + if (oldp != NULL && oldlenp != NULL) { + size_t oldval = atomic_load_zu( + &arena->pa_shard.pac.oversize_threshold, ATOMIC_RELAXED); + READ(oldval, size_t); + } + if (newp != NULL) { + if (newlen != sizeof(size_t)) { + ret = EINVAL; + goto label_return; + } + atomic_store_zu(&arena->pa_shard.pac.oversize_threshold, + *(size_t *)newp, ATOMIC_RELAXED); + } + ret = 0; +label_return: + return ret; +} + +static int +arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen, bool dirty) { int ret; unsigned arena_ind; arena_t *arena; @@ -2318,10 +2829,10 @@ arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen, ret = EFAULT; goto label_return; } + extent_state_t state = dirty ? extent_state_dirty : extent_state_muzzy; if (oldp != NULL && oldlenp != NULL) { - size_t oldval = dirty ? arena_dirty_decay_ms_get(arena) : - arena_muzzy_decay_ms_get(arena); + size_t oldval = arena_decay_ms_get(arena, state); READ(oldval, ssize_t); } if (newp != NULL) { @@ -2340,9 +2851,9 @@ arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen, goto label_return; } } - if (dirty ? arena_dirty_decay_ms_set(tsd_tsdn(tsd), arena, - *(ssize_t *)newp) : arena_muzzy_decay_ms_set(tsd_tsdn(tsd), - arena, *(ssize_t *)newp)) { + + if (arena_decay_ms_set(tsd_tsdn(tsd), arena, state, + *(ssize_t *)newp)) { ret = EFAULT; goto label_return; } @@ -2385,15 +2896,18 @@ arena_i_extent_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, goto label_return; } old_extent_hooks = - (extent_hooks_t *)&extent_hooks_default; + (extent_hooks_t *)&ehooks_default_extent_hooks; READ(old_extent_hooks, extent_hooks_t *); if (newp != NULL) { /* Initialize a new arena as a side effect. */ extent_hooks_t *new_extent_hooks JEMALLOC_CC_SILENCE_INIT(NULL); WRITE(new_extent_hooks, extent_hooks_t *); + arena_config_t config = arena_config_default; + config.extent_hooks = new_extent_hooks; + arena = arena_init(tsd_tsdn(tsd), arena_ind, - new_extent_hooks); + &config); if (arena == NULL) { ret = EFAULT; goto label_return; @@ -2404,11 +2918,13 @@ arena_i_extent_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, extent_hooks_t *new_extent_hooks JEMALLOC_CC_SILENCE_INIT(NULL); WRITE(new_extent_hooks, extent_hooks_t *); - old_extent_hooks = extent_hooks_set(tsd, arena, - new_extent_hooks); + old_extent_hooks = arena_set_extent_hooks(tsd, + arena, new_extent_hooks); READ(old_extent_hooks, extent_hooks_t *); } else { - old_extent_hooks = extent_hooks_get(arena); + old_extent_hooks = + ehooks_get_extent_hooks_ptr( + arena_get_ehooks(arena)); READ(old_extent_hooks, extent_hooks_t *); } } @@ -2493,10 +3009,6 @@ arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); READONLY(); - if (*oldlenp != sizeof(unsigned)) { - ret = EINVAL; - goto label_return; - } narenas = ctl_arenas->narenas; READ(narenas, unsigned); @@ -2582,14 +3094,14 @@ static int arenas_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; - extent_hooks_t *extent_hooks; unsigned arena_ind; malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); - extent_hooks = (extent_hooks_t *)&extent_hooks_default; - WRITE(extent_hooks, extent_hooks_t *); - if ((arena_ind = ctl_arena_init(tsd, extent_hooks)) == UINT_MAX) { + VERIFY_READ(unsigned); + arena_config_t config = arena_config_default; + WRITE(config.extent_hooks, extent_hooks_t *); + if ((arena_ind = ctl_arena_init(tsd, &config)) == UINT_MAX) { ret = EAGAIN; goto label_return; } @@ -2601,6 +3113,30 @@ arenas_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, return ret; } +static int +experimental_arenas_create_ext_ctl(tsd_t *tsd, + const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + unsigned arena_ind; + + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); + + arena_config_t config = arena_config_default; + VERIFY_READ(unsigned); + WRITE(config, arena_config_t); + + if ((arena_ind = ctl_arena_init(tsd, &config)) == UINT_MAX) { + ret = EAGAIN; + goto label_return; + } + READ(arena_ind, unsigned); + ret = 0; +label_return: + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + return ret; +} + static int arenas_lookup_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, @@ -2608,20 +3144,22 @@ arenas_lookup_ctl(tsd_t *tsd, const size_t *mib, int ret; unsigned arena_ind; void *ptr; - extent_t *extent; + edata_t *edata; arena_t *arena; ptr = NULL; ret = EINVAL; malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); WRITE(ptr, void *); - extent = iealloc(tsd_tsdn(tsd), ptr); - if (extent == NULL) + edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr); + if (edata == NULL) { goto label_return; + } - arena = extent_arena_get(extent); - if (arena == NULL) + arena = arena_get_from_edata(edata); + if (arena == NULL) { goto label_return; + } arena_ind = arena_ind_get(arena); READ(arena_ind, unsigned); @@ -2646,6 +3184,10 @@ prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, } if (newp != NULL) { + if (!opt_prof) { + ret = ENOENT; + goto label_return; + } if (newlen != sizeof(bool)) { ret = EINVAL; goto label_return; @@ -2653,7 +3195,8 @@ prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, oldval = prof_thread_active_init_set(tsd_tsdn(tsd), *(bool *)newp); } else { - oldval = prof_thread_active_init_get(tsd_tsdn(tsd)); + oldval = opt_prof ? prof_thread_active_init_get(tsd_tsdn(tsd)) : + false; } READ(oldval, bool); @@ -2669,7 +3212,8 @@ prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, bool oldval; if (!config_prof) { - return ENOENT; + ret = ENOENT; + goto label_return; } if (newp != NULL) { @@ -2677,9 +3221,20 @@ prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, ret = EINVAL; goto label_return; } - oldval = prof_active_set(tsd_tsdn(tsd), *(bool *)newp); + bool val = *(bool *)newp; + if (!opt_prof) { + if (val) { + ret = ENOENT; + goto label_return; + } else { + /* No change needed (already off). */ + oldval = false; + } + } else { + oldval = prof_active_set(tsd_tsdn(tsd), val); + } } else { - oldval = prof_active_get(tsd_tsdn(tsd)); + oldval = opt_prof ? prof_active_get(tsd_tsdn(tsd)) : false; } READ(oldval, bool); @@ -2694,7 +3249,7 @@ prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, int ret; const char *filename = NULL; - if (!config_prof) { + if (!config_prof || !opt_prof) { return ENOENT; } @@ -2722,13 +3277,17 @@ prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, } if (newp != NULL) { + if (!opt_prof) { + ret = ENOENT; + goto label_return; + } if (newlen != sizeof(bool)) { ret = EINVAL; goto label_return; } oldval = prof_gdump_set(tsd_tsdn(tsd), *(bool *)newp); } else { - oldval = prof_gdump_get(tsd_tsdn(tsd)); + oldval = opt_prof ? prof_gdump_get(tsd_tsdn(tsd)) : false; } READ(oldval, bool); @@ -2737,13 +3296,33 @@ prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, return ret; } +static int +prof_prefix_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + const char *prefix = NULL; + + if (!config_prof || !opt_prof) { + return ENOENT; + } + + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); + WRITEONLY(); + WRITE(prefix, const char *); + + ret = prof_prefix_set(tsd_tsdn(tsd), prefix) ? EFAULT : 0; +label_return: + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + return ret; +} + static int prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; size_t lg_sample = lg_prof_sample; - if (!config_prof) { + if (!config_prof || !opt_prof) { return ENOENT; } @@ -2770,7 +3349,7 @@ prof_log_start_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, const char *filename = NULL; - if (!config_prof) { + if (!config_prof || !opt_prof) { return ENOENT; } @@ -2790,7 +3369,7 @@ prof_log_start_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, static int prof_log_stop_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { - if (!config_prof) { + if (!config_prof || !opt_prof) { return ENOENT; } @@ -2801,6 +3380,87 @@ prof_log_stop_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, return 0; } +static int +experimental_hooks_prof_backtrace_ctl(tsd_t *tsd, const size_t *mib, + size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + + if (oldp == NULL && newp == NULL) { + ret = EINVAL; + goto label_return; + } + if (oldp != NULL) { + prof_backtrace_hook_t old_hook = + prof_backtrace_hook_get(); + READ(old_hook, prof_backtrace_hook_t); + } + if (newp != NULL) { + if (!opt_prof) { + ret = ENOENT; + goto label_return; + } + prof_backtrace_hook_t new_hook JEMALLOC_CC_SILENCE_INIT(NULL); + WRITE(new_hook, prof_backtrace_hook_t); + if (new_hook == NULL) { + ret = EINVAL; + goto label_return; + } + prof_backtrace_hook_set(new_hook); + } + ret = 0; +label_return: + return ret; +} + +static int +experimental_hooks_prof_dump_ctl(tsd_t *tsd, const size_t *mib, + size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + + if (oldp == NULL && newp == NULL) { + ret = EINVAL; + goto label_return; + } + if (oldp != NULL) { + prof_dump_hook_t old_hook = + prof_dump_hook_get(); + READ(old_hook, prof_dump_hook_t); + } + if (newp != NULL) { + if (!opt_prof) { + ret = ENOENT; + goto label_return; + } + prof_dump_hook_t new_hook JEMALLOC_CC_SILENCE_INIT(NULL); + WRITE(new_hook, prof_dump_hook_t); + prof_dump_hook_set(new_hook); + } + ret = 0; +label_return: + return ret; +} + +/* For integration test purpose only. No plan to move out of experimental. */ +static int +experimental_hooks_safety_check_abort_ctl(tsd_t *tsd, const size_t *mib, + size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + + WRITEONLY(); + if (newp != NULL) { + if (newlen != sizeof(safety_check_abort_hook_t)) { + ret = EINVAL; + goto label_return; + } + safety_check_abort_hook_t hook JEMALLOC_CC_SILENCE_INIT(NULL); + WRITE(hook, safety_check_abort_hook_t); + safety_check_set_abort(hook); + } + ret = 0; +label_return: + return ret; +} + /******************************************************************************/ CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats->allocated, size_t) @@ -2818,6 +3478,9 @@ CTL_RO_CGEN(config_stats, stats_background_thread_num_runs, CTL_RO_CGEN(config_stats, stats_background_thread_run_interval, nstime_ns(&ctl_stats->background_thread.run_interval), uint64_t) +CTL_RO_CGEN(config_stats, stats_zero_reallocs, + atomic_load_zu(&zero_realloc_count, ATOMIC_RELAXED), size_t) + CTL_RO_GEN(stats_arenas_i_dss, arenas_i(mib[2])->dss, const char *) CTL_RO_GEN(stats_arenas_i_dirty_decay_ms, arenas_i(mib[2])->dirty_decay_ms, ssize_t) @@ -2830,55 +3493,61 @@ CTL_RO_GEN(stats_arenas_i_pactive, arenas_i(mib[2])->pactive, size_t) CTL_RO_GEN(stats_arenas_i_pdirty, arenas_i(mib[2])->pdirty, size_t) CTL_RO_GEN(stats_arenas_i_pmuzzy, arenas_i(mib[2])->pmuzzy, size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_mapped, - atomic_load_zu(&arenas_i(mib[2])->astats->astats.mapped, ATOMIC_RELAXED), - size_t) + arenas_i(mib[2])->astats->astats.mapped, size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_retained, - atomic_load_zu(&arenas_i(mib[2])->astats->astats.retained, ATOMIC_RELAXED), - size_t) + arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.retained, size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_extent_avail, - atomic_load_zu(&arenas_i(mib[2])->astats->astats.extent_avail, - ATOMIC_RELAXED), - size_t) + arenas_i(mib[2])->astats->astats.pa_shard_stats.edata_avail, size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_npurge, - ctl_arena_stats_read_u64( - &arenas_i(mib[2])->astats->astats.decay_dirty.npurge), uint64_t) + locked_read_u64_unsynchronized( + &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_dirty.npurge), + uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_nmadvise, - ctl_arena_stats_read_u64( - &arenas_i(mib[2])->astats->astats.decay_dirty.nmadvise), uint64_t) + locked_read_u64_unsynchronized( + &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_dirty.nmadvise), + uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_purged, - ctl_arena_stats_read_u64( - &arenas_i(mib[2])->astats->astats.decay_dirty.purged), uint64_t) + locked_read_u64_unsynchronized( + &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_dirty.purged), + uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_npurge, - ctl_arena_stats_read_u64( - &arenas_i(mib[2])->astats->astats.decay_muzzy.npurge), uint64_t) + locked_read_u64_unsynchronized( + &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_muzzy.npurge), + uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_nmadvise, - ctl_arena_stats_read_u64( - &arenas_i(mib[2])->astats->astats.decay_muzzy.nmadvise), uint64_t) + locked_read_u64_unsynchronized( + &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_muzzy.nmadvise), + uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_purged, - ctl_arena_stats_read_u64( - &arenas_i(mib[2])->astats->astats.decay_muzzy.purged), uint64_t) + locked_read_u64_unsynchronized( + &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_muzzy.purged), + uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_base, - atomic_load_zu(&arenas_i(mib[2])->astats->astats.base, ATOMIC_RELAXED), + arenas_i(mib[2])->astats->astats.base, size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_internal, atomic_load_zu(&arenas_i(mib[2])->astats->astats.internal, ATOMIC_RELAXED), size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_thp, - atomic_load_zu(&arenas_i(mib[2])->astats->astats.metadata_thp, - ATOMIC_RELAXED), size_t) + arenas_i(mib[2])->astats->astats.metadata_thp, size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_tcache_bytes, - atomic_load_zu(&arenas_i(mib[2])->astats->astats.tcache_bytes, - ATOMIC_RELAXED), size_t) + arenas_i(mib[2])->astats->astats.tcache_bytes, size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_tcache_stashed_bytes, + arenas_i(mib[2])->astats->astats.tcache_stashed_bytes, size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_resident, - atomic_load_zu(&arenas_i(mib[2])->astats->astats.resident, ATOMIC_RELAXED), + arenas_i(mib[2])->astats->astats.resident, size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_abandoned_vm, - atomic_load_zu(&arenas_i(mib[2])->astats->astats.abandoned_vm, + atomic_load_zu( + &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.abandoned_vm, ATOMIC_RELAXED), size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_sec_bytes, + arenas_i(mib[2])->astats->secstats.bytes, size_t) + CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated, arenas_i(mib[2])->astats->allocated_small, size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc, @@ -2892,27 +3561,21 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_small_nfills, CTL_RO_CGEN(config_stats, stats_arenas_i_small_nflushes, arenas_i(mib[2])->astats->nflushes_small, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated, - atomic_load_zu(&arenas_i(mib[2])->astats->astats.allocated_large, - ATOMIC_RELAXED), size_t) + arenas_i(mib[2])->astats->astats.allocated_large, size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc, - ctl_arena_stats_read_u64( - &arenas_i(mib[2])->astats->astats.nmalloc_large), uint64_t) + arenas_i(mib[2])->astats->astats.nmalloc_large, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc, - ctl_arena_stats_read_u64( - &arenas_i(mib[2])->astats->astats.ndalloc_large), uint64_t) + arenas_i(mib[2])->astats->astats.ndalloc_large, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests, - ctl_arena_stats_read_u64( - &arenas_i(mib[2])->astats->astats.nrequests_large), uint64_t) + arenas_i(mib[2])->astats->astats.nrequests_large, uint64_t) /* * Note: "nmalloc_large" here instead of "nfills" in the read. This is * intentional (large has no batch fill). */ CTL_RO_CGEN(config_stats, stats_arenas_i_large_nfills, - ctl_arena_stats_read_u64( - &arenas_i(mib[2])->astats->astats.nmalloc_large), uint64_t) + arenas_i(mib[2])->astats->astats.nmalloc_large, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_large_nflushes, - ctl_arena_stats_read_u64( - &arenas_i(mib[2])->astats->astats.nflushes_large), uint64_t) + arenas_i(mib[2])->astats->astats.nflushes_large, uint64_t) /* Lock profiling related APIs below. */ #define RO_MUTEX_CTL_GEN(n, l) \ @@ -2972,9 +3635,13 @@ stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib, } if (config_prof && opt_prof) { MUTEX_PROF_RESET(bt2gctx_mtx); + MUTEX_PROF_RESET(tdatas_mtx); + MUTEX_PROF_RESET(prof_dump_mtx); + MUTEX_PROF_RESET(prof_recent_alloc_mtx); + MUTEX_PROF_RESET(prof_recent_dump_mtx); + MUTEX_PROF_RESET(prof_stats_mtx); } - /* Per arena mutexes. */ unsigned n = narenas_total_get(); @@ -2984,18 +3651,18 @@ stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib, continue; } MUTEX_PROF_RESET(arena->large_mtx); - MUTEX_PROF_RESET(arena->extent_avail_mtx); - MUTEX_PROF_RESET(arena->extents_dirty.mtx); - MUTEX_PROF_RESET(arena->extents_muzzy.mtx); - MUTEX_PROF_RESET(arena->extents_retained.mtx); - MUTEX_PROF_RESET(arena->decay_dirty.mtx); - MUTEX_PROF_RESET(arena->decay_muzzy.mtx); + MUTEX_PROF_RESET(arena->pa_shard.edata_cache.mtx); + MUTEX_PROF_RESET(arena->pa_shard.pac.ecache_dirty.mtx); + MUTEX_PROF_RESET(arena->pa_shard.pac.ecache_muzzy.mtx); + MUTEX_PROF_RESET(arena->pa_shard.pac.ecache_retained.mtx); + MUTEX_PROF_RESET(arena->pa_shard.pac.decay_dirty.mtx); + MUTEX_PROF_RESET(arena->pa_shard.pac.decay_muzzy.mtx); MUTEX_PROF_RESET(arena->tcache_ql_mtx); MUTEX_PROF_RESET(arena->base->mtx); - for (szind_t i = 0; i < SC_NBINS; i++) { - for (unsigned j = 0; j < bin_infos[i].n_shards; j++) { - bin_t *bin = &arena->bins[i].bin_shards[j]; + for (szind_t j = 0; j < SC_NBINS; j++) { + for (unsigned k = 0; k < bin_infos[j].n_shards; k++) { + bin_t *bin = arena_get_bin(arena, j, k); MUTEX_PROF_RESET(bin->lock); } } @@ -3005,25 +3672,25 @@ stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib, } CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc, - arenas_i(mib[2])->astats->bstats[mib[4]].nmalloc, uint64_t) + arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nmalloc, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc, - arenas_i(mib[2])->astats->bstats[mib[4]].ndalloc, uint64_t) + arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.ndalloc, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests, - arenas_i(mib[2])->astats->bstats[mib[4]].nrequests, uint64_t) + arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nrequests, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curregs, - arenas_i(mib[2])->astats->bstats[mib[4]].curregs, size_t) + arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.curregs, size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nfills, - arenas_i(mib[2])->astats->bstats[mib[4]].nfills, uint64_t) + arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nfills, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nflushes, - arenas_i(mib[2])->astats->bstats[mib[4]].nflushes, uint64_t) + arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nflushes, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nslabs, - arenas_i(mib[2])->astats->bstats[mib[4]].nslabs, uint64_t) + arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nslabs, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreslabs, - arenas_i(mib[2])->astats->bstats[mib[4]].reslabs, uint64_t) + arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.reslabs, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curslabs, - arenas_i(mib[2])->astats->bstats[mib[4]].curslabs, size_t) + arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.curslabs, size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nonfull_slabs, - arenas_i(mib[2])->astats->bstats[mib[4]].nonfull_slabs, size_t) + arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nonfull_slabs, size_t) static const ctl_named_node_t * stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, @@ -3035,13 +3702,13 @@ stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, } CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nmalloc, - ctl_arena_stats_read_u64( + locked_read_u64_unsynchronized( &arenas_i(mib[2])->astats->lstats[mib[4]].nmalloc), uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_ndalloc, - ctl_arena_stats_read_u64( + locked_read_u64_unsynchronized( &arenas_i(mib[2])->astats->lstats[mib[4]].ndalloc), uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nrequests, - ctl_arena_stats_read_u64( + locked_read_u64_unsynchronized( &arenas_i(mib[2])->astats->lstats[mib[4]].nrequests), uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_curlextents, arenas_i(mib[2])->astats->lstats[mib[4]].curlextents, size_t) @@ -3056,29 +3723,17 @@ stats_arenas_i_lextents_j_index(tsdn_t *tsdn, const size_t *mib, } CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_ndirty, - atomic_load_zu( - &arenas_i(mib[2])->astats->estats[mib[4]].ndirty, - ATOMIC_RELAXED), size_t); + arenas_i(mib[2])->astats->estats[mib[4]].ndirty, size_t); CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_nmuzzy, - atomic_load_zu( - &arenas_i(mib[2])->astats->estats[mib[4]].nmuzzy, - ATOMIC_RELAXED), size_t); + arenas_i(mib[2])->astats->estats[mib[4]].nmuzzy, size_t); CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_nretained, - atomic_load_zu( - &arenas_i(mib[2])->astats->estats[mib[4]].nretained, - ATOMIC_RELAXED), size_t); + arenas_i(mib[2])->astats->estats[mib[4]].nretained, size_t); CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_dirty_bytes, - atomic_load_zu( - &arenas_i(mib[2])->astats->estats[mib[4]].dirty_bytes, - ATOMIC_RELAXED), size_t); + arenas_i(mib[2])->astats->estats[mib[4]].dirty_bytes, size_t); CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_muzzy_bytes, - atomic_load_zu( - &arenas_i(mib[2])->astats->estats[mib[4]].muzzy_bytes, - ATOMIC_RELAXED), size_t); + arenas_i(mib[2])->astats->estats[mib[4]].muzzy_bytes, size_t); CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_retained_bytes, - atomic_load_zu( - &arenas_i(mib[2])->astats->estats[mib[4]].retained_bytes, - ATOMIC_RELAXED), size_t); + arenas_i(mib[2])->astats->estats[mib[4]].retained_bytes, size_t); static const ctl_named_node_t * stats_arenas_i_extents_j_index(tsdn_t *tsdn, const size_t *mib, @@ -3089,6 +3744,82 @@ stats_arenas_i_extents_j_index(tsdn_t *tsdn, const size_t *mib, return super_stats_arenas_i_extents_j_node; } +CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_npurge_passes, + arenas_i(mib[2])->astats->hpastats.nonderived_stats.npurge_passes, uint64_t); +CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_npurges, + arenas_i(mib[2])->astats->hpastats.nonderived_stats.npurges, uint64_t); +CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nhugifies, + arenas_i(mib[2])->astats->hpastats.nonderived_stats.nhugifies, uint64_t); +CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_ndehugifies, + arenas_i(mib[2])->astats->hpastats.nonderived_stats.ndehugifies, uint64_t); + +/* Full, nonhuge */ +CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_npageslabs_nonhuge, + arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[0].npageslabs, + size_t); +CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_nactive_nonhuge, + arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[0].nactive, size_t); +CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_ndirty_nonhuge, + arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[0].ndirty, size_t); + +/* Full, huge */ +CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_npageslabs_huge, + arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[1].npageslabs, + size_t); +CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_nactive_huge, + arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[1].nactive, size_t); +CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_ndirty_huge, + arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[1].ndirty, size_t); + +/* Empty, nonhuge */ +CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_npageslabs_nonhuge, + arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[0].npageslabs, + size_t); +CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_nactive_nonhuge, + arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[0].nactive, size_t); +CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_ndirty_nonhuge, + arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[0].ndirty, size_t); + +/* Empty, huge */ +CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_npageslabs_huge, + arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[1].npageslabs, + size_t); +CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_nactive_huge, + arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[1].nactive, size_t); +CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_ndirty_huge, + arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[1].ndirty, size_t); + +/* Nonfull, nonhuge */ +CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_nonhuge, + arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][0].npageslabs, + size_t); +CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_nonhuge, + arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][0].nactive, + size_t); +CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_nonhuge, + arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][0].ndirty, + size_t); + +/* Nonfull, huge */ +CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_huge, + arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][1].npageslabs, + size_t); +CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_huge, + arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][1].nactive, + size_t); +CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_huge, + arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][1].ndirty, + size_t); + +static const ctl_named_node_t * +stats_arenas_i_hpa_shard_nonfull_slabs_j_index(tsdn_t *tsdn, const size_t *mib, + size_t miblen, size_t j) { + if (j >= PSSET_NPSIZES) { + return NULL; + } + return super_stats_arenas_i_hpa_shard_nonfull_slabs_j_node; +} + static bool ctl_arenas_i_verify(size_t i) { size_t a = arenas_i2a_impl(i, true, true); @@ -3161,6 +3892,32 @@ experimental_hooks_remove_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, return ret; } +static int +experimental_thread_activity_callback_ctl(tsd_t *tsd, const size_t *mib, + size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + + if (!config_stats) { + return ENOENT; + } + + activity_callback_thunk_t t_old = tsd_activity_callback_thunk_get(tsd); + READ(t_old, activity_callback_thunk_t); + + if (newp != NULL) { + /* + * This initialization is unnecessary. If it's omitted, though, + * clang gets confused and warns on the subsequent use of t_new. + */ + activity_callback_thunk_t t_new = {NULL, NULL}; + WRITE(t_new, activity_callback_thunk_t); + tsd_activity_callback_thunk_set(tsd, t_new); + } + ret = 0; +label_return: + return ret; +} + /* * Output six memory utilization entries for an input pointer, the first one of * type (void *) and the remaining five of type size_t, describing the following @@ -3178,7 +3935,8 @@ experimental_hooks_remove_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, * otherwise their values are undefined. * * This API is mainly intended for small class allocations, where extents are - * used as slab. + * used as slab. Note that if the bin the extent belongs to is completely + * full, "(a)" will be NULL. * * In case of large class allocations, "(a)" will be NULL, and "(e)" and "(f)" * will be zero (if stats are enabled; otherwise undefined). The other three @@ -3232,11 +3990,11 @@ experimental_utilization_query_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; - assert(sizeof(extent_util_stats_verbose_t) + assert(sizeof(inspect_extent_util_stats_verbose_t) == sizeof(void *) + sizeof(size_t) * 5); if (oldp == NULL || oldlenp == NULL - || *oldlenp != sizeof(extent_util_stats_verbose_t) + || *oldlenp != sizeof(inspect_extent_util_stats_verbose_t) || newp == NULL) { ret = EINVAL; goto label_return; @@ -3244,9 +4002,9 @@ experimental_utilization_query_ctl(tsd_t *tsd, const size_t *mib, void *ptr = NULL; WRITE(ptr, void *); - extent_util_stats_verbose_t *util_stats - = (extent_util_stats_verbose_t *)oldp; - extent_util_stats_verbose_get(tsd_tsdn(tsd), ptr, + inspect_extent_util_stats_verbose_t *util_stats + = (inspect_extent_util_stats_verbose_t *)oldp; + inspect_extent_util_stats_verbose_get(tsd_tsdn(tsd), ptr, &util_stats->nfree, &util_stats->nregs, &util_stats->size, &util_stats->bin_nfree, &util_stats->bin_nregs, &util_stats->slabcur_addr); @@ -3357,21 +4115,22 @@ experimental_utilization_batch_query_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; - assert(sizeof(extent_util_stats_t) == sizeof(size_t) * 3); + assert(sizeof(inspect_extent_util_stats_t) == sizeof(size_t) * 3); const size_t len = newlen / sizeof(const void *); if (oldp == NULL || oldlenp == NULL || newp == NULL || newlen == 0 || newlen != len * sizeof(const void *) - || *oldlenp != len * sizeof(extent_util_stats_t)) { + || *oldlenp != len * sizeof(inspect_extent_util_stats_t)) { ret = EINVAL; goto label_return; } void **ptrs = (void **)newp; - extent_util_stats_t *util_stats = (extent_util_stats_t *)oldp; + inspect_extent_util_stats_t *util_stats = + (inspect_extent_util_stats_t *)oldp; size_t i; for (i = 0; i < len; ++i) { - extent_util_stats_get(tsd_tsdn(tsd), ptrs[i], + inspect_extent_util_stats_get(tsd_tsdn(tsd), ptrs[i], &util_stats[i].nfree, &util_stats[i].nregs, &util_stats[i].size); } @@ -3420,7 +4179,7 @@ experimental_arenas_i_pactivep_ctl(tsd_t *tsd, const size_t *mib, #if defined(JEMALLOC_GCC_ATOMIC_ATOMICS) || \ defined(JEMALLOC_GCC_SYNC_ATOMICS) || defined(_MSC_VER) /* Expose the underlying counter for fast read. */ - pactivep = (size_t *)&(arena->nactive.repr); + pactivep = (size_t *)&(arena->pa_shard.nactive.repr); READ(pactivep, size_t *); ret = 0; #else @@ -3433,3 +4192,223 @@ experimental_arenas_i_pactivep_ctl(tsd_t *tsd, const size_t *mib, malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); return ret; } + +static int +experimental_prof_recent_alloc_max_ctl(tsd_t *tsd, const size_t *mib, + size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + + if (!(config_prof && opt_prof)) { + ret = ENOENT; + goto label_return; + } + + ssize_t old_max; + if (newp != NULL) { + if (newlen != sizeof(ssize_t)) { + ret = EINVAL; + goto label_return; + } + ssize_t max = *(ssize_t *)newp; + if (max < -1) { + ret = EINVAL; + goto label_return; + } + old_max = prof_recent_alloc_max_ctl_write(tsd, max); + } else { + old_max = prof_recent_alloc_max_ctl_read(); + } + READ(old_max, ssize_t); + + ret = 0; + +label_return: + return ret; +} + +typedef struct write_cb_packet_s write_cb_packet_t; +struct write_cb_packet_s { + write_cb_t *write_cb; + void *cbopaque; +}; + +static int +experimental_prof_recent_alloc_dump_ctl(tsd_t *tsd, const size_t *mib, + size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + + if (!(config_prof && opt_prof)) { + ret = ENOENT; + goto label_return; + } + + assert(sizeof(write_cb_packet_t) == sizeof(void *) * 2); + + WRITEONLY(); + write_cb_packet_t write_cb_packet; + ASSURED_WRITE(write_cb_packet, write_cb_packet_t); + + prof_recent_alloc_dump(tsd, write_cb_packet.write_cb, + write_cb_packet.cbopaque); + + ret = 0; + +label_return: + return ret; +} + +typedef struct batch_alloc_packet_s batch_alloc_packet_t; +struct batch_alloc_packet_s { + void **ptrs; + size_t num; + size_t size; + int flags; +}; + +static int +experimental_batch_alloc_ctl(tsd_t *tsd, const size_t *mib, + size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + + VERIFY_READ(size_t); + + batch_alloc_packet_t batch_alloc_packet; + ASSURED_WRITE(batch_alloc_packet, batch_alloc_packet_t); + size_t filled = batch_alloc(batch_alloc_packet.ptrs, + batch_alloc_packet.num, batch_alloc_packet.size, + batch_alloc_packet.flags); + READ(filled, size_t); + + ret = 0; + +label_return: + return ret; +} + +static int +prof_stats_bins_i_live_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + unsigned binind; + prof_stats_t stats; + + if (!(config_prof && opt_prof && opt_prof_stats)) { + ret = ENOENT; + goto label_return; + } + + READONLY(); + MIB_UNSIGNED(binind, 3); + if (binind >= SC_NBINS) { + ret = EINVAL; + goto label_return; + } + prof_stats_get_live(tsd, (szind_t)binind, &stats); + READ(stats, prof_stats_t); + + ret = 0; +label_return: + return ret; +} + +static int +prof_stats_bins_i_accum_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + unsigned binind; + prof_stats_t stats; + + if (!(config_prof && opt_prof && opt_prof_stats)) { + ret = ENOENT; + goto label_return; + } + + READONLY(); + MIB_UNSIGNED(binind, 3); + if (binind >= SC_NBINS) { + ret = EINVAL; + goto label_return; + } + prof_stats_get_accum(tsd, (szind_t)binind, &stats); + READ(stats, prof_stats_t); + + ret = 0; +label_return: + return ret; +} + +static const ctl_named_node_t * +prof_stats_bins_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, + size_t i) { + if (!(config_prof && opt_prof && opt_prof_stats)) { + return NULL; + } + if (i >= SC_NBINS) { + return NULL; + } + return super_prof_stats_bins_i_node; +} + +static int +prof_stats_lextents_i_live_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + unsigned lextent_ind; + prof_stats_t stats; + + if (!(config_prof && opt_prof && opt_prof_stats)) { + ret = ENOENT; + goto label_return; + } + + READONLY(); + MIB_UNSIGNED(lextent_ind, 3); + if (lextent_ind >= SC_NSIZES - SC_NBINS) { + ret = EINVAL; + goto label_return; + } + prof_stats_get_live(tsd, (szind_t)(lextent_ind + SC_NBINS), &stats); + READ(stats, prof_stats_t); + + ret = 0; +label_return: + return ret; +} + +static int +prof_stats_lextents_i_accum_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + unsigned lextent_ind; + prof_stats_t stats; + + if (!(config_prof && opt_prof && opt_prof_stats)) { + ret = ENOENT; + goto label_return; + } + + READONLY(); + MIB_UNSIGNED(lextent_ind, 3); + if (lextent_ind >= SC_NSIZES - SC_NBINS) { + ret = EINVAL; + goto label_return; + } + prof_stats_get_accum(tsd, (szind_t)(lextent_ind + SC_NBINS), &stats); + READ(stats, prof_stats_t); + + ret = 0; +label_return: + return ret; +} + +static const ctl_named_node_t * +prof_stats_lextents_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, + size_t i) { + if (!(config_prof && opt_prof && opt_prof_stats)) { + return NULL; + } + if (i >= SC_NSIZES - SC_NBINS) { + return NULL; + } + return super_prof_stats_lextents_i_node; +} diff --git a/contrib/jemalloc/src/decay.c b/contrib/jemalloc/src/decay.c new file mode 100644 index 00000000000000..d801b2bc08ea99 --- /dev/null +++ b/contrib/jemalloc/src/decay.c @@ -0,0 +1,295 @@ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/decay.h" + +static const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = { +#define STEP(step, h, x, y) \ + h, + SMOOTHSTEP +#undef STEP +}; + +/* + * Generate a new deadline that is uniformly random within the next epoch after + * the current one. + */ +void +decay_deadline_init(decay_t *decay) { + nstime_copy(&decay->deadline, &decay->epoch); + nstime_add(&decay->deadline, &decay->interval); + if (decay_ms_read(decay) > 0) { + nstime_t jitter; + + nstime_init(&jitter, prng_range_u64(&decay->jitter_state, + nstime_ns(&decay->interval))); + nstime_add(&decay->deadline, &jitter); + } +} + +void +decay_reinit(decay_t *decay, nstime_t *cur_time, ssize_t decay_ms) { + atomic_store_zd(&decay->time_ms, decay_ms, ATOMIC_RELAXED); + if (decay_ms > 0) { + nstime_init(&decay->interval, (uint64_t)decay_ms * + KQU(1000000)); + nstime_idivide(&decay->interval, SMOOTHSTEP_NSTEPS); + } + + nstime_copy(&decay->epoch, cur_time); + decay->jitter_state = (uint64_t)(uintptr_t)decay; + decay_deadline_init(decay); + decay->nunpurged = 0; + memset(decay->backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t)); +} + +bool +decay_init(decay_t *decay, nstime_t *cur_time, ssize_t decay_ms) { + if (config_debug) { + for (size_t i = 0; i < sizeof(decay_t); i++) { + assert(((char *)decay)[i] == 0); + } + decay->ceil_npages = 0; + } + if (malloc_mutex_init(&decay->mtx, "decay", WITNESS_RANK_DECAY, + malloc_mutex_rank_exclusive)) { + return true; + } + decay->purging = false; + decay_reinit(decay, cur_time, decay_ms); + return false; +} + +bool +decay_ms_valid(ssize_t decay_ms) { + if (decay_ms < -1) { + return false; + } + if (decay_ms == -1 || (uint64_t)decay_ms <= NSTIME_SEC_MAX * + KQU(1000)) { + return true; + } + return false; +} + +static void +decay_maybe_update_time(decay_t *decay, nstime_t *new_time) { + if (unlikely(!nstime_monotonic() && nstime_compare(&decay->epoch, + new_time) > 0)) { + /* + * Time went backwards. Move the epoch back in time and + * generate a new deadline, with the expectation that time + * typically flows forward for long enough periods of time that + * epochs complete. Unfortunately, this strategy is susceptible + * to clock jitter triggering premature epoch advances, but + * clock jitter estimation and compensation isn't feasible here + * because calls into this code are event-driven. + */ + nstime_copy(&decay->epoch, new_time); + decay_deadline_init(decay); + } else { + /* Verify that time does not go backwards. */ + assert(nstime_compare(&decay->epoch, new_time) <= 0); + } +} + +static size_t +decay_backlog_npages_limit(const decay_t *decay) { + /* + * For each element of decay_backlog, multiply by the corresponding + * fixed-point smoothstep decay factor. Sum the products, then divide + * to round down to the nearest whole number of pages. + */ + uint64_t sum = 0; + for (unsigned i = 0; i < SMOOTHSTEP_NSTEPS; i++) { + sum += decay->backlog[i] * h_steps[i]; + } + size_t npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP); + + return npages_limit_backlog; +} + +/* + * Update backlog, assuming that 'nadvance_u64' time intervals have passed. + * Trailing 'nadvance_u64' records should be erased and 'current_npages' is + * placed as the newest record. + */ +static void +decay_backlog_update(decay_t *decay, uint64_t nadvance_u64, + size_t current_npages) { + if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) { + memset(decay->backlog, 0, (SMOOTHSTEP_NSTEPS-1) * + sizeof(size_t)); + } else { + size_t nadvance_z = (size_t)nadvance_u64; + + assert((uint64_t)nadvance_z == nadvance_u64); + + memmove(decay->backlog, &decay->backlog[nadvance_z], + (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t)); + if (nadvance_z > 1) { + memset(&decay->backlog[SMOOTHSTEP_NSTEPS - + nadvance_z], 0, (nadvance_z-1) * sizeof(size_t)); + } + } + + size_t npages_delta = (current_npages > decay->nunpurged) ? + current_npages - decay->nunpurged : 0; + decay->backlog[SMOOTHSTEP_NSTEPS-1] = npages_delta; + + if (config_debug) { + if (current_npages > decay->ceil_npages) { + decay->ceil_npages = current_npages; + } + size_t npages_limit = decay_backlog_npages_limit(decay); + assert(decay->ceil_npages >= npages_limit); + if (decay->ceil_npages > npages_limit) { + decay->ceil_npages = npages_limit; + } + } +} + +static inline bool +decay_deadline_reached(const decay_t *decay, const nstime_t *time) { + return (nstime_compare(&decay->deadline, time) <= 0); +} + +uint64_t +decay_npages_purge_in(decay_t *decay, nstime_t *time, size_t npages_new) { + uint64_t decay_interval_ns = decay_epoch_duration_ns(decay); + size_t n_epoch = (size_t)(nstime_ns(time) / decay_interval_ns); + + uint64_t npages_purge; + if (n_epoch >= SMOOTHSTEP_NSTEPS) { + npages_purge = npages_new; + } else { + uint64_t h_steps_max = h_steps[SMOOTHSTEP_NSTEPS - 1]; + assert(h_steps_max >= + h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]); + npages_purge = npages_new * (h_steps_max - + h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]); + npages_purge >>= SMOOTHSTEP_BFP; + } + return npages_purge; +} + +bool +decay_maybe_advance_epoch(decay_t *decay, nstime_t *new_time, + size_t npages_current) { + /* Handle possible non-monotonicity of time. */ + decay_maybe_update_time(decay, new_time); + + if (!decay_deadline_reached(decay, new_time)) { + return false; + } + nstime_t delta; + nstime_copy(&delta, new_time); + nstime_subtract(&delta, &decay->epoch); + + uint64_t nadvance_u64 = nstime_divide(&delta, &decay->interval); + assert(nadvance_u64 > 0); + + /* Add nadvance_u64 decay intervals to epoch. */ + nstime_copy(&delta, &decay->interval); + nstime_imultiply(&delta, nadvance_u64); + nstime_add(&decay->epoch, &delta); + + /* Set a new deadline. */ + decay_deadline_init(decay); + + /* Update the backlog. */ + decay_backlog_update(decay, nadvance_u64, npages_current); + + decay->npages_limit = decay_backlog_npages_limit(decay); + decay->nunpurged = (decay->npages_limit > npages_current) ? + decay->npages_limit : npages_current; + + return true; +} + +/* + * Calculate how many pages should be purged after 'interval'. + * + * First, calculate how many pages should remain at the moment, then subtract + * the number of pages that should remain after 'interval'. The difference is + * how many pages should be purged until then. + * + * The number of pages that should remain at a specific moment is calculated + * like this: pages(now) = sum(backlog[i] * h_steps[i]). After 'interval' + * passes, backlog would shift 'interval' positions to the left and sigmoid + * curve would be applied starting with backlog[interval]. + * + * The implementation doesn't directly map to the description, but it's + * essentially the same calculation, optimized to avoid iterating over + * [interval..SMOOTHSTEP_NSTEPS) twice. + */ +static inline size_t +decay_npurge_after_interval(decay_t *decay, size_t interval) { + size_t i; + uint64_t sum = 0; + for (i = 0; i < interval; i++) { + sum += decay->backlog[i] * h_steps[i]; + } + for (; i < SMOOTHSTEP_NSTEPS; i++) { + sum += decay->backlog[i] * + (h_steps[i] - h_steps[i - interval]); + } + + return (size_t)(sum >> SMOOTHSTEP_BFP); +} + +uint64_t decay_ns_until_purge(decay_t *decay, size_t npages_current, + uint64_t npages_threshold) { + if (!decay_gradually(decay)) { + return DECAY_UNBOUNDED_TIME_TO_PURGE; + } + uint64_t decay_interval_ns = decay_epoch_duration_ns(decay); + assert(decay_interval_ns > 0); + if (npages_current == 0) { + unsigned i; + for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) { + if (decay->backlog[i] > 0) { + break; + } + } + if (i == SMOOTHSTEP_NSTEPS) { + /* No dirty pages recorded. Sleep indefinitely. */ + return DECAY_UNBOUNDED_TIME_TO_PURGE; + } + } + if (npages_current <= npages_threshold) { + /* Use max interval. */ + return decay_interval_ns * SMOOTHSTEP_NSTEPS; + } + + /* Minimal 2 intervals to ensure reaching next epoch deadline. */ + size_t lb = 2; + size_t ub = SMOOTHSTEP_NSTEPS; + + size_t npurge_lb, npurge_ub; + npurge_lb = decay_npurge_after_interval(decay, lb); + if (npurge_lb > npages_threshold) { + return decay_interval_ns * lb; + } + npurge_ub = decay_npurge_after_interval(decay, ub); + if (npurge_ub < npages_threshold) { + return decay_interval_ns * ub; + } + + unsigned n_search = 0; + size_t target, npurge; + while ((npurge_lb + npages_threshold < npurge_ub) && (lb + 2 < ub)) { + target = (lb + ub) / 2; + npurge = decay_npurge_after_interval(decay, target); + if (npurge > npages_threshold) { + ub = target; + npurge_ub = npurge; + } else { + lb = target; + npurge_lb = npurge; + } + assert(n_search < lg_floor(SMOOTHSTEP_NSTEPS) + 1); + ++n_search; + } + return decay_interval_ns * (ub + lb) / 2; +} diff --git a/contrib/jemalloc/src/ecache.c b/contrib/jemalloc/src/ecache.c new file mode 100644 index 00000000000000..a242227d32d658 --- /dev/null +++ b/contrib/jemalloc/src/ecache.c @@ -0,0 +1,35 @@ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/san.h" + +bool +ecache_init(tsdn_t *tsdn, ecache_t *ecache, extent_state_t state, unsigned ind, + bool delay_coalesce) { + if (malloc_mutex_init(&ecache->mtx, "extents", WITNESS_RANK_EXTENTS, + malloc_mutex_rank_exclusive)) { + return true; + } + ecache->state = state; + ecache->ind = ind; + ecache->delay_coalesce = delay_coalesce; + eset_init(&ecache->eset, state); + eset_init(&ecache->guarded_eset, state); + + return false; +} + +void +ecache_prefork(tsdn_t *tsdn, ecache_t *ecache) { + malloc_mutex_prefork(tsdn, &ecache->mtx); +} + +void +ecache_postfork_parent(tsdn_t *tsdn, ecache_t *ecache) { + malloc_mutex_postfork_parent(tsdn, &ecache->mtx); +} + +void +ecache_postfork_child(tsdn_t *tsdn, ecache_t *ecache) { + malloc_mutex_postfork_child(tsdn, &ecache->mtx); +} diff --git a/contrib/jemalloc/src/edata.c b/contrib/jemalloc/src/edata.c new file mode 100644 index 00000000000000..82b6f5654b515d --- /dev/null +++ b/contrib/jemalloc/src/edata.c @@ -0,0 +1,6 @@ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +ph_gen(, edata_avail, edata_t, avail_link, + edata_esnead_comp) +ph_gen(, edata_heap, edata_t, heap_link, edata_snad_comp) diff --git a/contrib/jemalloc/src/edata_cache.c b/contrib/jemalloc/src/edata_cache.c new file mode 100644 index 00000000000000..6bc1848cbcb818 --- /dev/null +++ b/contrib/jemalloc/src/edata_cache.c @@ -0,0 +1,154 @@ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +bool +edata_cache_init(edata_cache_t *edata_cache, base_t *base) { + edata_avail_new(&edata_cache->avail); + /* + * This is not strictly necessary, since the edata_cache_t is only + * created inside an arena, which is zeroed on creation. But this is + * handy as a safety measure. + */ + atomic_store_zu(&edata_cache->count, 0, ATOMIC_RELAXED); + if (malloc_mutex_init(&edata_cache->mtx, "edata_cache", + WITNESS_RANK_EDATA_CACHE, malloc_mutex_rank_exclusive)) { + return true; + } + edata_cache->base = base; + return false; +} + +edata_t * +edata_cache_get(tsdn_t *tsdn, edata_cache_t *edata_cache) { + malloc_mutex_lock(tsdn, &edata_cache->mtx); + edata_t *edata = edata_avail_first(&edata_cache->avail); + if (edata == NULL) { + malloc_mutex_unlock(tsdn, &edata_cache->mtx); + return base_alloc_edata(tsdn, edata_cache->base); + } + edata_avail_remove(&edata_cache->avail, edata); + atomic_load_sub_store_zu(&edata_cache->count, 1); + malloc_mutex_unlock(tsdn, &edata_cache->mtx); + return edata; +} + +void +edata_cache_put(tsdn_t *tsdn, edata_cache_t *edata_cache, edata_t *edata) { + malloc_mutex_lock(tsdn, &edata_cache->mtx); + edata_avail_insert(&edata_cache->avail, edata); + atomic_load_add_store_zu(&edata_cache->count, 1); + malloc_mutex_unlock(tsdn, &edata_cache->mtx); +} + +void +edata_cache_prefork(tsdn_t *tsdn, edata_cache_t *edata_cache) { + malloc_mutex_prefork(tsdn, &edata_cache->mtx); +} + +void +edata_cache_postfork_parent(tsdn_t *tsdn, edata_cache_t *edata_cache) { + malloc_mutex_postfork_parent(tsdn, &edata_cache->mtx); +} + +void +edata_cache_postfork_child(tsdn_t *tsdn, edata_cache_t *edata_cache) { + malloc_mutex_postfork_child(tsdn, &edata_cache->mtx); +} + +void +edata_cache_fast_init(edata_cache_fast_t *ecs, edata_cache_t *fallback) { + edata_list_inactive_init(&ecs->list); + ecs->fallback = fallback; + ecs->disabled = false; +} + +static void +edata_cache_fast_try_fill_from_fallback(tsdn_t *tsdn, + edata_cache_fast_t *ecs) { + edata_t *edata; + malloc_mutex_lock(tsdn, &ecs->fallback->mtx); + for (int i = 0; i < EDATA_CACHE_FAST_FILL; i++) { + edata = edata_avail_remove_first(&ecs->fallback->avail); + if (edata == NULL) { + break; + } + edata_list_inactive_append(&ecs->list, edata); + atomic_load_sub_store_zu(&ecs->fallback->count, 1); + } + malloc_mutex_unlock(tsdn, &ecs->fallback->mtx); +} + +edata_t * +edata_cache_fast_get(tsdn_t *tsdn, edata_cache_fast_t *ecs) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_EDATA_CACHE, 0); + + if (ecs->disabled) { + assert(edata_list_inactive_first(&ecs->list) == NULL); + return edata_cache_get(tsdn, ecs->fallback); + } + + edata_t *edata = edata_list_inactive_first(&ecs->list); + if (edata != NULL) { + edata_list_inactive_remove(&ecs->list, edata); + return edata; + } + /* Slow path; requires synchronization. */ + edata_cache_fast_try_fill_from_fallback(tsdn, ecs); + edata = edata_list_inactive_first(&ecs->list); + if (edata != NULL) { + edata_list_inactive_remove(&ecs->list, edata); + } else { + /* + * Slowest path (fallback was also empty); allocate something + * new. + */ + edata = base_alloc_edata(tsdn, ecs->fallback->base); + } + return edata; +} + +static void +edata_cache_fast_flush_all(tsdn_t *tsdn, edata_cache_fast_t *ecs) { + /* + * You could imagine smarter cache management policies (like + * only flushing down to some threshold in anticipation of + * future get requests). But just flushing everything provides + * a good opportunity to defrag too, and lets us share code between the + * flush and disable pathways. + */ + edata_t *edata; + size_t nflushed = 0; + malloc_mutex_lock(tsdn, &ecs->fallback->mtx); + while ((edata = edata_list_inactive_first(&ecs->list)) != NULL) { + edata_list_inactive_remove(&ecs->list, edata); + edata_avail_insert(&ecs->fallback->avail, edata); + nflushed++; + } + atomic_load_add_store_zu(&ecs->fallback->count, nflushed); + malloc_mutex_unlock(tsdn, &ecs->fallback->mtx); +} + +void +edata_cache_fast_put(tsdn_t *tsdn, edata_cache_fast_t *ecs, edata_t *edata) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_EDATA_CACHE, 0); + + if (ecs->disabled) { + assert(edata_list_inactive_first(&ecs->list) == NULL); + edata_cache_put(tsdn, ecs->fallback, edata); + return; + } + + /* + * Prepend rather than append, to do LIFO ordering in the hopes of some + * cache locality. + */ + edata_list_inactive_prepend(&ecs->list, edata); +} + +void +edata_cache_fast_disable(tsdn_t *tsdn, edata_cache_fast_t *ecs) { + edata_cache_fast_flush_all(tsdn, ecs); + ecs->disabled = true; +} diff --git a/contrib/jemalloc/src/ehooks.c b/contrib/jemalloc/src/ehooks.c new file mode 100644 index 00000000000000..383e9de6a6b9e7 --- /dev/null +++ b/contrib/jemalloc/src/ehooks.c @@ -0,0 +1,275 @@ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/ehooks.h" +#include "jemalloc/internal/extent_mmap.h" + +void +ehooks_init(ehooks_t *ehooks, extent_hooks_t *extent_hooks, unsigned ind) { + /* All other hooks are optional; this one is not. */ + assert(extent_hooks->alloc != NULL); + ehooks->ind = ind; + ehooks_set_extent_hooks_ptr(ehooks, extent_hooks); +} + +/* + * If the caller specifies (!*zero), it is still possible to receive zeroed + * memory, in which case *zero is toggled to true. arena_extent_alloc() takes + * advantage of this to avoid demanding zeroed extents, but taking advantage of + * them if they are returned. + */ +static void * +extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, + size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) { + void *ret; + + assert(size != 0); + assert(alignment != 0); + + /* "primary" dss. */ + if (have_dss && dss_prec == dss_prec_primary && (ret = + extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero, + commit)) != NULL) { + return ret; + } + /* mmap. */ + if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit)) + != NULL) { + return ret; + } + /* "secondary" dss. */ + if (have_dss && dss_prec == dss_prec_secondary && (ret = + extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero, + commit)) != NULL) { + return ret; + } + + /* All strategies for allocation failed. */ + return NULL; +} + +void * +ehooks_default_alloc_impl(tsdn_t *tsdn, void *new_addr, size_t size, + size_t alignment, bool *zero, bool *commit, unsigned arena_ind) { + arena_t *arena = arena_get(tsdn, arena_ind, false); + /* NULL arena indicates arena_create. */ + assert(arena != NULL || alignment == HUGEPAGE); + dss_prec_t dss = (arena == NULL) ? dss_prec_disabled : + (dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_RELAXED); + void *ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment, + zero, commit, dss); + if (have_madvise_huge && ret) { + pages_set_thp_state(ret, size); + } + return ret; +} + +static void * +ehooks_default_alloc(extent_hooks_t *extent_hooks, void *new_addr, size_t size, + size_t alignment, bool *zero, bool *commit, unsigned arena_ind) { + return ehooks_default_alloc_impl(tsdn_fetch(), new_addr, size, + ALIGNMENT_CEILING(alignment, PAGE), zero, commit, arena_ind); +} + +bool +ehooks_default_dalloc_impl(void *addr, size_t size) { + if (!have_dss || !extent_in_dss(addr)) { + return extent_dalloc_mmap(addr, size); + } + return true; +} + +static bool +ehooks_default_dalloc(extent_hooks_t *extent_hooks, void *addr, size_t size, + bool committed, unsigned arena_ind) { + return ehooks_default_dalloc_impl(addr, size); +} + +void +ehooks_default_destroy_impl(void *addr, size_t size) { + if (!have_dss || !extent_in_dss(addr)) { + pages_unmap(addr, size); + } +} + +static void +ehooks_default_destroy(extent_hooks_t *extent_hooks, void *addr, size_t size, + bool committed, unsigned arena_ind) { + ehooks_default_destroy_impl(addr, size); +} + +bool +ehooks_default_commit_impl(void *addr, size_t offset, size_t length) { + return pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset), + length); +} + +static bool +ehooks_default_commit(extent_hooks_t *extent_hooks, void *addr, size_t size, + size_t offset, size_t length, unsigned arena_ind) { + return ehooks_default_commit_impl(addr, offset, length); +} + +bool +ehooks_default_decommit_impl(void *addr, size_t offset, size_t length) { + return pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset), + length); +} + +static bool +ehooks_default_decommit(extent_hooks_t *extent_hooks, void *addr, size_t size, + size_t offset, size_t length, unsigned arena_ind) { + return ehooks_default_decommit_impl(addr, offset, length); +} + +#ifdef PAGES_CAN_PURGE_LAZY +bool +ehooks_default_purge_lazy_impl(void *addr, size_t offset, size_t length) { + return pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset), + length); +} + +static bool +ehooks_default_purge_lazy(extent_hooks_t *extent_hooks, void *addr, size_t size, + size_t offset, size_t length, unsigned arena_ind) { + assert(addr != NULL); + assert((offset & PAGE_MASK) == 0); + assert(length != 0); + assert((length & PAGE_MASK) == 0); + return ehooks_default_purge_lazy_impl(addr, offset, length); +} +#endif + +#ifdef PAGES_CAN_PURGE_FORCED +bool +ehooks_default_purge_forced_impl(void *addr, size_t offset, size_t length) { + return pages_purge_forced((void *)((uintptr_t)addr + + (uintptr_t)offset), length); +} + +static bool +ehooks_default_purge_forced(extent_hooks_t *extent_hooks, void *addr, + size_t size, size_t offset, size_t length, unsigned arena_ind) { + assert(addr != NULL); + assert((offset & PAGE_MASK) == 0); + assert(length != 0); + assert((length & PAGE_MASK) == 0); + return ehooks_default_purge_forced_impl(addr, offset, length); +} +#endif + +bool +ehooks_default_split_impl() { + if (!maps_coalesce) { + /* + * Without retain, only whole regions can be purged (required by + * MEM_RELEASE on Windows) -- therefore disallow splitting. See + * comments in extent_head_no_merge(). + */ + return !opt_retain; + } + + return false; +} + +static bool +ehooks_default_split(extent_hooks_t *extent_hooks, void *addr, size_t size, + size_t size_a, size_t size_b, bool committed, unsigned arena_ind) { + return ehooks_default_split_impl(); +} + +bool +ehooks_default_merge_impl(tsdn_t *tsdn, void *addr_a, void *addr_b) { + assert(addr_a < addr_b); + /* + * For non-DSS cases -- + * a) W/o maps_coalesce, merge is not always allowed (Windows): + * 1) w/o retain, never merge (first branch below). + * 2) with retain, only merge extents from the same VirtualAlloc + * region (in which case MEM_DECOMMIT is utilized for purging). + * + * b) With maps_coalesce, it's always possible to merge. + * 1) w/o retain, always allow merge (only about dirty / muzzy). + * 2) with retain, to preserve the SN / first-fit, merge is still + * disallowed if b is a head extent, i.e. no merging across + * different mmap regions. + * + * a2) and b2) are implemented in emap_try_acquire_edata_neighbor, and + * sanity checked in the second branch below. + */ + if (!maps_coalesce && !opt_retain) { + return true; + } + if (config_debug) { + edata_t *a = emap_edata_lookup(tsdn, &arena_emap_global, + addr_a); + bool head_a = edata_is_head_get(a); + edata_t *b = emap_edata_lookup(tsdn, &arena_emap_global, + addr_b); + bool head_b = edata_is_head_get(b); + emap_assert_mapped(tsdn, &arena_emap_global, a); + emap_assert_mapped(tsdn, &arena_emap_global, b); + assert(extent_neighbor_head_state_mergeable(head_a, head_b, + /* forward */ true)); + } + if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) { + return true; + } + + return false; +} + +bool +ehooks_default_merge(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a, + void *addr_b, size_t size_b, bool committed, unsigned arena_ind) { + tsdn_t *tsdn = tsdn_fetch(); + + return ehooks_default_merge_impl(tsdn, addr_a, addr_b); +} + +void +ehooks_default_zero_impl(void *addr, size_t size) { + /* + * By default, we try to zero out memory using OS-provided demand-zeroed + * pages. If the user has specifically requested hugepages, though, we + * don't want to purge in the middle of a hugepage (which would break it + * up), so we act conservatively and use memset. + */ + bool needs_memset = true; + if (opt_thp != thp_mode_always) { + needs_memset = pages_purge_forced(addr, size); + } + if (needs_memset) { + memset(addr, 0, size); + } +} + +void +ehooks_default_guard_impl(void *guard1, void *guard2) { + pages_mark_guards(guard1, guard2); +} + +void +ehooks_default_unguard_impl(void *guard1, void *guard2) { + pages_unmark_guards(guard1, guard2); +} + +const extent_hooks_t ehooks_default_extent_hooks = { + ehooks_default_alloc, + ehooks_default_dalloc, + ehooks_default_destroy, + ehooks_default_commit, + ehooks_default_decommit, +#ifdef PAGES_CAN_PURGE_LAZY + ehooks_default_purge_lazy, +#else + NULL, +#endif +#ifdef PAGES_CAN_PURGE_FORCED + ehooks_default_purge_forced, +#else + NULL, +#endif + ehooks_default_split, + ehooks_default_merge +}; diff --git a/contrib/jemalloc/src/emap.c b/contrib/jemalloc/src/emap.c new file mode 100644 index 00000000000000..9cc95a724a9b2a --- /dev/null +++ b/contrib/jemalloc/src/emap.c @@ -0,0 +1,386 @@ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/emap.h" + +enum emap_lock_result_e { + emap_lock_result_success, + emap_lock_result_failure, + emap_lock_result_no_extent +}; +typedef enum emap_lock_result_e emap_lock_result_t; + +bool +emap_init(emap_t *emap, base_t *base, bool zeroed) { + return rtree_new(&emap->rtree, base, zeroed); +} + +void +emap_update_edata_state(tsdn_t *tsdn, emap_t *emap, edata_t *edata, + extent_state_t state) { + witness_assert_positive_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE); + + edata_state_set(edata, state); + + EMAP_DECLARE_RTREE_CTX; + rtree_leaf_elm_t *elm1 = rtree_leaf_elm_lookup(tsdn, &emap->rtree, + rtree_ctx, (uintptr_t)edata_base_get(edata), /* dependent */ true, + /* init_missing */ false); + assert(elm1 != NULL); + rtree_leaf_elm_t *elm2 = edata_size_get(edata) == PAGE ? NULL : + rtree_leaf_elm_lookup(tsdn, &emap->rtree, rtree_ctx, + (uintptr_t)edata_last_get(edata), /* dependent */ true, + /* init_missing */ false); + + rtree_leaf_elm_state_update(tsdn, &emap->rtree, elm1, elm2, state); + + emap_assert_mapped(tsdn, emap, edata); +} + +static inline edata_t * +emap_try_acquire_edata_neighbor_impl(tsdn_t *tsdn, emap_t *emap, edata_t *edata, + extent_pai_t pai, extent_state_t expected_state, bool forward, + bool expanding) { + witness_assert_positive_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE); + assert(!edata_guarded_get(edata)); + assert(!expanding || forward); + assert(!edata_state_in_transition(expected_state)); + assert(expected_state == extent_state_dirty || + expected_state == extent_state_muzzy || + expected_state == extent_state_retained); + + void *neighbor_addr = forward ? edata_past_get(edata) : + edata_before_get(edata); + /* + * This is subtle; the rtree code asserts that its input pointer is + * non-NULL, and this is a useful thing to check. But it's possible + * that edata corresponds to an address of (void *)PAGE (in practice, + * this has only been observed on FreeBSD when address-space + * randomization is on, but it could in principle happen anywhere). In + * this case, edata_before_get(edata) is NULL, triggering the assert. + */ + if (neighbor_addr == NULL) { + return NULL; + } + + EMAP_DECLARE_RTREE_CTX; + rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &emap->rtree, + rtree_ctx, (uintptr_t)neighbor_addr, /* dependent*/ false, + /* init_missing */ false); + if (elm == NULL) { + return NULL; + } + + rtree_contents_t neighbor_contents = rtree_leaf_elm_read(tsdn, + &emap->rtree, elm, /* dependent */ true); + if (!extent_can_acquire_neighbor(edata, neighbor_contents, pai, + expected_state, forward, expanding)) { + return NULL; + } + + /* From this point, the neighbor edata can be safely acquired. */ + edata_t *neighbor = neighbor_contents.edata; + assert(edata_state_get(neighbor) == expected_state); + emap_update_edata_state(tsdn, emap, neighbor, extent_state_merging); + if (expanding) { + extent_assert_can_expand(edata, neighbor); + } else { + extent_assert_can_coalesce(edata, neighbor); + } + + return neighbor; +} + +edata_t * +emap_try_acquire_edata_neighbor(tsdn_t *tsdn, emap_t *emap, edata_t *edata, + extent_pai_t pai, extent_state_t expected_state, bool forward) { + return emap_try_acquire_edata_neighbor_impl(tsdn, emap, edata, pai, + expected_state, forward, /* expand */ false); +} + +edata_t * +emap_try_acquire_edata_neighbor_expand(tsdn_t *tsdn, emap_t *emap, + edata_t *edata, extent_pai_t pai, extent_state_t expected_state) { + /* Try expanding forward. */ + return emap_try_acquire_edata_neighbor_impl(tsdn, emap, edata, pai, + expected_state, /* forward */ true, /* expand */ true); +} + +void +emap_release_edata(tsdn_t *tsdn, emap_t *emap, edata_t *edata, + extent_state_t new_state) { + assert(emap_edata_in_transition(tsdn, emap, edata)); + assert(emap_edata_is_acquired(tsdn, emap, edata)); + + emap_update_edata_state(tsdn, emap, edata, new_state); +} + +static bool +emap_rtree_leaf_elms_lookup(tsdn_t *tsdn, emap_t *emap, rtree_ctx_t *rtree_ctx, + const edata_t *edata, bool dependent, bool init_missing, + rtree_leaf_elm_t **r_elm_a, rtree_leaf_elm_t **r_elm_b) { + *r_elm_a = rtree_leaf_elm_lookup(tsdn, &emap->rtree, rtree_ctx, + (uintptr_t)edata_base_get(edata), dependent, init_missing); + if (!dependent && *r_elm_a == NULL) { + return true; + } + assert(*r_elm_a != NULL); + + *r_elm_b = rtree_leaf_elm_lookup(tsdn, &emap->rtree, rtree_ctx, + (uintptr_t)edata_last_get(edata), dependent, init_missing); + if (!dependent && *r_elm_b == NULL) { + return true; + } + assert(*r_elm_b != NULL); + + return false; +} + +static void +emap_rtree_write_acquired(tsdn_t *tsdn, emap_t *emap, rtree_leaf_elm_t *elm_a, + rtree_leaf_elm_t *elm_b, edata_t *edata, szind_t szind, bool slab) { + rtree_contents_t contents; + contents.edata = edata; + contents.metadata.szind = szind; + contents.metadata.slab = slab; + contents.metadata.is_head = (edata == NULL) ? false : + edata_is_head_get(edata); + contents.metadata.state = (edata == NULL) ? 0 : edata_state_get(edata); + rtree_leaf_elm_write(tsdn, &emap->rtree, elm_a, contents); + if (elm_b != NULL) { + rtree_leaf_elm_write(tsdn, &emap->rtree, elm_b, contents); + } +} + +bool +emap_register_boundary(tsdn_t *tsdn, emap_t *emap, edata_t *edata, + szind_t szind, bool slab) { + assert(edata_state_get(edata) == extent_state_active); + EMAP_DECLARE_RTREE_CTX; + + rtree_leaf_elm_t *elm_a, *elm_b; + bool err = emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, edata, + false, true, &elm_a, &elm_b); + if (err) { + return true; + } + assert(rtree_leaf_elm_read(tsdn, &emap->rtree, elm_a, + /* dependent */ false).edata == NULL); + assert(rtree_leaf_elm_read(tsdn, &emap->rtree, elm_b, + /* dependent */ false).edata == NULL); + emap_rtree_write_acquired(tsdn, emap, elm_a, elm_b, edata, szind, slab); + return false; +} + +/* Invoked *after* emap_register_boundary. */ +void +emap_register_interior(tsdn_t *tsdn, emap_t *emap, edata_t *edata, + szind_t szind) { + EMAP_DECLARE_RTREE_CTX; + + assert(edata_slab_get(edata)); + assert(edata_state_get(edata) == extent_state_active); + + if (config_debug) { + /* Making sure the boundary is registered already. */ + rtree_leaf_elm_t *elm_a, *elm_b; + bool err = emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, + edata, /* dependent */ true, /* init_missing */ false, + &elm_a, &elm_b); + assert(!err); + rtree_contents_t contents_a, contents_b; + contents_a = rtree_leaf_elm_read(tsdn, &emap->rtree, elm_a, + /* dependent */ true); + contents_b = rtree_leaf_elm_read(tsdn, &emap->rtree, elm_b, + /* dependent */ true); + assert(contents_a.edata == edata && contents_b.edata == edata); + assert(contents_a.metadata.slab && contents_b.metadata.slab); + } + + rtree_contents_t contents; + contents.edata = edata; + contents.metadata.szind = szind; + contents.metadata.slab = true; + contents.metadata.state = extent_state_active; + contents.metadata.is_head = false; /* Not allowed to access. */ + + assert(edata_size_get(edata) > (2 << LG_PAGE)); + rtree_write_range(tsdn, &emap->rtree, rtree_ctx, + (uintptr_t)edata_base_get(edata) + PAGE, + (uintptr_t)edata_last_get(edata) - PAGE, contents); +} + +void +emap_deregister_boundary(tsdn_t *tsdn, emap_t *emap, edata_t *edata) { + /* + * The edata must be either in an acquired state, or protected by state + * based locks. + */ + if (!emap_edata_is_acquired(tsdn, emap, edata)) { + witness_assert_positive_depth_to_rank( + tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE); + } + + EMAP_DECLARE_RTREE_CTX; + rtree_leaf_elm_t *elm_a, *elm_b; + + emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, edata, + true, false, &elm_a, &elm_b); + emap_rtree_write_acquired(tsdn, emap, elm_a, elm_b, NULL, SC_NSIZES, + false); +} + +void +emap_deregister_interior(tsdn_t *tsdn, emap_t *emap, edata_t *edata) { + EMAP_DECLARE_RTREE_CTX; + + assert(edata_slab_get(edata)); + if (edata_size_get(edata) > (2 << LG_PAGE)) { + rtree_clear_range(tsdn, &emap->rtree, rtree_ctx, + (uintptr_t)edata_base_get(edata) + PAGE, + (uintptr_t)edata_last_get(edata) - PAGE); + } +} + +void +emap_remap(tsdn_t *tsdn, emap_t *emap, edata_t *edata, szind_t szind, + bool slab) { + EMAP_DECLARE_RTREE_CTX; + + if (szind != SC_NSIZES) { + rtree_contents_t contents; + contents.edata = edata; + contents.metadata.szind = szind; + contents.metadata.slab = slab; + contents.metadata.is_head = edata_is_head_get(edata); + contents.metadata.state = edata_state_get(edata); + + rtree_write(tsdn, &emap->rtree, rtree_ctx, + (uintptr_t)edata_addr_get(edata), contents); + /* + * Recall that this is called only for active->inactive and + * inactive->active transitions (since only active extents have + * meaningful values for szind and slab). Active, non-slab + * extents only need to handle lookups at their head (on + * deallocation), so we don't bother filling in the end + * boundary. + * + * For slab extents, we do the end-mapping change. This still + * leaves the interior unmodified; an emap_register_interior + * call is coming in those cases, though. + */ + if (slab && edata_size_get(edata) > PAGE) { + uintptr_t key = (uintptr_t)edata_past_get(edata) + - (uintptr_t)PAGE; + rtree_write(tsdn, &emap->rtree, rtree_ctx, key, + contents); + } + } +} + +bool +emap_split_prepare(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare, + edata_t *edata, size_t size_a, edata_t *trail, size_t size_b) { + EMAP_DECLARE_RTREE_CTX; + + /* + * We use incorrect constants for things like arena ind, zero, ranged, + * and commit state, and head status. This is a fake edata_t, used to + * facilitate a lookup. + */ + edata_t lead = {0}; + edata_init(&lead, 0U, edata_addr_get(edata), size_a, false, 0, 0, + extent_state_active, false, false, EXTENT_PAI_PAC, EXTENT_NOT_HEAD); + + emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, &lead, false, true, + &prepare->lead_elm_a, &prepare->lead_elm_b); + emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, trail, false, true, + &prepare->trail_elm_a, &prepare->trail_elm_b); + + if (prepare->lead_elm_a == NULL || prepare->lead_elm_b == NULL + || prepare->trail_elm_a == NULL || prepare->trail_elm_b == NULL) { + return true; + } + return false; +} + +void +emap_split_commit(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare, + edata_t *lead, size_t size_a, edata_t *trail, size_t size_b) { + /* + * We should think about not writing to the lead leaf element. We can + * get into situations where a racing realloc-like call can disagree + * with a size lookup request. I think it's fine to declare that these + * situations are race bugs, but there's an argument to be made that for + * things like xallocx, a size lookup call should return either the old + * size or the new size, but not anything else. + */ + emap_rtree_write_acquired(tsdn, emap, prepare->lead_elm_a, + prepare->lead_elm_b, lead, SC_NSIZES, /* slab */ false); + emap_rtree_write_acquired(tsdn, emap, prepare->trail_elm_a, + prepare->trail_elm_b, trail, SC_NSIZES, /* slab */ false); +} + +void +emap_merge_prepare(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare, + edata_t *lead, edata_t *trail) { + EMAP_DECLARE_RTREE_CTX; + emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, lead, true, false, + &prepare->lead_elm_a, &prepare->lead_elm_b); + emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, trail, true, false, + &prepare->trail_elm_a, &prepare->trail_elm_b); +} + +void +emap_merge_commit(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare, + edata_t *lead, edata_t *trail) { + rtree_contents_t clear_contents; + clear_contents.edata = NULL; + clear_contents.metadata.szind = SC_NSIZES; + clear_contents.metadata.slab = false; + clear_contents.metadata.is_head = false; + clear_contents.metadata.state = (extent_state_t)0; + + if (prepare->lead_elm_b != NULL) { + rtree_leaf_elm_write(tsdn, &emap->rtree, + prepare->lead_elm_b, clear_contents); + } + + rtree_leaf_elm_t *merged_b; + if (prepare->trail_elm_b != NULL) { + rtree_leaf_elm_write(tsdn, &emap->rtree, + prepare->trail_elm_a, clear_contents); + merged_b = prepare->trail_elm_b; + } else { + merged_b = prepare->trail_elm_a; + } + + emap_rtree_write_acquired(tsdn, emap, prepare->lead_elm_a, merged_b, + lead, SC_NSIZES, false); +} + +void +emap_do_assert_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata) { + EMAP_DECLARE_RTREE_CTX; + + rtree_contents_t contents = rtree_read(tsdn, &emap->rtree, rtree_ctx, + (uintptr_t)edata_base_get(edata)); + assert(contents.edata == edata); + assert(contents.metadata.is_head == edata_is_head_get(edata)); + assert(contents.metadata.state == edata_state_get(edata)); +} + +void +emap_do_assert_not_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata) { + emap_full_alloc_ctx_t context1 = {0}; + emap_full_alloc_ctx_try_lookup(tsdn, emap, edata_base_get(edata), + &context1); + assert(context1.edata == NULL); + + emap_full_alloc_ctx_t context2 = {0}; + emap_full_alloc_ctx_try_lookup(tsdn, emap, edata_last_get(edata), + &context2); + assert(context2.edata == NULL); +} diff --git a/contrib/jemalloc/src/eset.c b/contrib/jemalloc/src/eset.c new file mode 100644 index 00000000000000..6f8f335e198b25 --- /dev/null +++ b/contrib/jemalloc/src/eset.c @@ -0,0 +1,282 @@ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/eset.h" + +#define ESET_NPSIZES (SC_NPSIZES + 1) + +static void +eset_bin_init(eset_bin_t *bin) { + edata_heap_new(&bin->heap); + /* + * heap_min doesn't need initialization; it gets filled in when the bin + * goes from non-empty to empty. + */ +} + +static void +eset_bin_stats_init(eset_bin_stats_t *bin_stats) { + atomic_store_zu(&bin_stats->nextents, 0, ATOMIC_RELAXED); + atomic_store_zu(&bin_stats->nbytes, 0, ATOMIC_RELAXED); +} + +void +eset_init(eset_t *eset, extent_state_t state) { + for (unsigned i = 0; i < ESET_NPSIZES; i++) { + eset_bin_init(&eset->bins[i]); + eset_bin_stats_init(&eset->bin_stats[i]); + } + fb_init(eset->bitmap, ESET_NPSIZES); + edata_list_inactive_init(&eset->lru); + eset->state = state; +} + +size_t +eset_npages_get(eset_t *eset) { + return atomic_load_zu(&eset->npages, ATOMIC_RELAXED); +} + +size_t +eset_nextents_get(eset_t *eset, pszind_t pind) { + return atomic_load_zu(&eset->bin_stats[pind].nextents, ATOMIC_RELAXED); +} + +size_t +eset_nbytes_get(eset_t *eset, pszind_t pind) { + return atomic_load_zu(&eset->bin_stats[pind].nbytes, ATOMIC_RELAXED); +} + +static void +eset_stats_add(eset_t *eset, pszind_t pind, size_t sz) { + size_t cur = atomic_load_zu(&eset->bin_stats[pind].nextents, + ATOMIC_RELAXED); + atomic_store_zu(&eset->bin_stats[pind].nextents, cur + 1, + ATOMIC_RELAXED); + cur = atomic_load_zu(&eset->bin_stats[pind].nbytes, ATOMIC_RELAXED); + atomic_store_zu(&eset->bin_stats[pind].nbytes, cur + sz, + ATOMIC_RELAXED); +} + +static void +eset_stats_sub(eset_t *eset, pszind_t pind, size_t sz) { + size_t cur = atomic_load_zu(&eset->bin_stats[pind].nextents, + ATOMIC_RELAXED); + atomic_store_zu(&eset->bin_stats[pind].nextents, cur - 1, + ATOMIC_RELAXED); + cur = atomic_load_zu(&eset->bin_stats[pind].nbytes, ATOMIC_RELAXED); + atomic_store_zu(&eset->bin_stats[pind].nbytes, cur - sz, + ATOMIC_RELAXED); +} + +void +eset_insert(eset_t *eset, edata_t *edata) { + assert(edata_state_get(edata) == eset->state); + + size_t size = edata_size_get(edata); + size_t psz = sz_psz_quantize_floor(size); + pszind_t pind = sz_psz2ind(psz); + + edata_cmp_summary_t edata_cmp_summary = edata_cmp_summary_get(edata); + if (edata_heap_empty(&eset->bins[pind].heap)) { + fb_set(eset->bitmap, ESET_NPSIZES, (size_t)pind); + /* Only element is automatically the min element. */ + eset->bins[pind].heap_min = edata_cmp_summary; + } else { + /* + * There's already a min element; update the summary if we're + * about to insert a lower one. + */ + if (edata_cmp_summary_comp(edata_cmp_summary, + eset->bins[pind].heap_min) < 0) { + eset->bins[pind].heap_min = edata_cmp_summary; + } + } + edata_heap_insert(&eset->bins[pind].heap, edata); + + if (config_stats) { + eset_stats_add(eset, pind, size); + } + + edata_list_inactive_append(&eset->lru, edata); + size_t npages = size >> LG_PAGE; + /* + * All modifications to npages hold the mutex (as asserted above), so we + * don't need an atomic fetch-add; we can get by with a load followed by + * a store. + */ + size_t cur_eset_npages = + atomic_load_zu(&eset->npages, ATOMIC_RELAXED); + atomic_store_zu(&eset->npages, cur_eset_npages + npages, + ATOMIC_RELAXED); +} + +void +eset_remove(eset_t *eset, edata_t *edata) { + assert(edata_state_get(edata) == eset->state || + edata_state_in_transition(edata_state_get(edata))); + + size_t size = edata_size_get(edata); + size_t psz = sz_psz_quantize_floor(size); + pszind_t pind = sz_psz2ind(psz); + if (config_stats) { + eset_stats_sub(eset, pind, size); + } + + edata_cmp_summary_t edata_cmp_summary = edata_cmp_summary_get(edata); + edata_heap_remove(&eset->bins[pind].heap, edata); + if (edata_heap_empty(&eset->bins[pind].heap)) { + fb_unset(eset->bitmap, ESET_NPSIZES, (size_t)pind); + } else { + /* + * This is a little weird; we compare if the summaries are + * equal, rather than if the edata we removed was the heap + * minimum. The reason why is that getting the heap minimum + * can cause a pairing heap merge operation. We can avoid this + * if we only update the min if it's changed, in which case the + * summaries of the removed element and the min element should + * compare equal. + */ + if (edata_cmp_summary_comp(edata_cmp_summary, + eset->bins[pind].heap_min) == 0) { + eset->bins[pind].heap_min = edata_cmp_summary_get( + edata_heap_first(&eset->bins[pind].heap)); + } + } + edata_list_inactive_remove(&eset->lru, edata); + size_t npages = size >> LG_PAGE; + /* + * As in eset_insert, we hold eset->mtx and so don't need atomic + * operations for updating eset->npages. + */ + size_t cur_extents_npages = + atomic_load_zu(&eset->npages, ATOMIC_RELAXED); + assert(cur_extents_npages >= npages); + atomic_store_zu(&eset->npages, + cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED); +} + +/* + * Find an extent with size [min_size, max_size) to satisfy the alignment + * requirement. For each size, try only the first extent in the heap. + */ +static edata_t * +eset_fit_alignment(eset_t *eset, size_t min_size, size_t max_size, + size_t alignment) { + pszind_t pind = sz_psz2ind(sz_psz_quantize_ceil(min_size)); + pszind_t pind_max = sz_psz2ind(sz_psz_quantize_ceil(max_size)); + + for (pszind_t i = + (pszind_t)fb_ffs(eset->bitmap, ESET_NPSIZES, (size_t)pind); + i < pind_max; + i = (pszind_t)fb_ffs(eset->bitmap, ESET_NPSIZES, (size_t)i + 1)) { + assert(i < SC_NPSIZES); + assert(!edata_heap_empty(&eset->bins[i].heap)); + edata_t *edata = edata_heap_first(&eset->bins[i].heap); + uintptr_t base = (uintptr_t)edata_base_get(edata); + size_t candidate_size = edata_size_get(edata); + assert(candidate_size >= min_size); + + uintptr_t next_align = ALIGNMENT_CEILING((uintptr_t)base, + PAGE_CEILING(alignment)); + if (base > next_align || base + candidate_size <= next_align) { + /* Overflow or not crossing the next alignment. */ + continue; + } + + size_t leadsize = next_align - base; + if (candidate_size - leadsize >= min_size) { + return edata; + } + } + + return NULL; +} + +/* + * Do first-fit extent selection, i.e. select the oldest/lowest extent that is + * large enough. + * + * lg_max_fit is the (log of the) maximum ratio between the requested size and + * the returned size that we'll allow. This can reduce fragmentation by + * avoiding reusing and splitting large extents for smaller sizes. In practice, + * it's set to opt_lg_extent_max_active_fit for the dirty eset and SC_PTR_BITS + * for others. + */ +static edata_t * +eset_first_fit(eset_t *eset, size_t size, bool exact_only, + unsigned lg_max_fit) { + edata_t *ret = NULL; + edata_cmp_summary_t ret_summ JEMALLOC_CC_SILENCE_INIT({0}); + + pszind_t pind = sz_psz2ind(sz_psz_quantize_ceil(size)); + + if (exact_only) { + return edata_heap_empty(&eset->bins[pind].heap) ? NULL : + edata_heap_first(&eset->bins[pind].heap); + } + + for (pszind_t i = + (pszind_t)fb_ffs(eset->bitmap, ESET_NPSIZES, (size_t)pind); + i < ESET_NPSIZES; + i = (pszind_t)fb_ffs(eset->bitmap, ESET_NPSIZES, (size_t)i + 1)) { + assert(!edata_heap_empty(&eset->bins[i].heap)); + if (lg_max_fit == SC_PTR_BITS) { + /* + * We'll shift by this below, and shifting out all the + * bits is undefined. Decreasing is safe, since the + * page size is larger than 1 byte. + */ + lg_max_fit = SC_PTR_BITS - 1; + } + if ((sz_pind2sz(i) >> lg_max_fit) > size) { + break; + } + if (ret == NULL || edata_cmp_summary_comp( + eset->bins[i].heap_min, ret_summ) < 0) { + /* + * We grab the edata as early as possible, even though + * we might change it later. Practically, a large + * portion of eset_fit calls succeed at the first valid + * index, so this doesn't cost much, and we get the + * effect of prefetching the edata as early as possible. + */ + edata_t *edata = edata_heap_first(&eset->bins[i].heap); + assert(edata_size_get(edata) >= size); + assert(ret == NULL || edata_snad_comp(edata, ret) < 0); + assert(ret == NULL || edata_cmp_summary_comp( + eset->bins[i].heap_min, + edata_cmp_summary_get(edata)) == 0); + ret = edata; + ret_summ = eset->bins[i].heap_min; + } + if (i == SC_NPSIZES) { + break; + } + assert(i < SC_NPSIZES); + } + + return ret; +} + +edata_t * +eset_fit(eset_t *eset, size_t esize, size_t alignment, bool exact_only, + unsigned lg_max_fit) { + size_t max_size = esize + PAGE_CEILING(alignment) - PAGE; + /* Beware size_t wrap-around. */ + if (max_size < esize) { + return NULL; + } + + edata_t *edata = eset_first_fit(eset, max_size, exact_only, lg_max_fit); + + if (alignment > PAGE && edata == NULL) { + /* + * max_size guarantees the alignment requirement but is rather + * pessimistic. Next we try to satisfy the aligned allocation + * with sizes in [esize, max_size). + */ + edata = eset_fit_alignment(eset, esize, max_size, alignment); + } + + return edata; +} diff --git a/contrib/jemalloc/src/exp_grow.c b/contrib/jemalloc/src/exp_grow.c new file mode 100644 index 00000000000000..386471f49fdec1 --- /dev/null +++ b/contrib/jemalloc/src/exp_grow.c @@ -0,0 +1,8 @@ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +void +exp_grow_init(exp_grow_t *exp_grow) { + exp_grow->next = sz_psz2ind(HUGEPAGE); + exp_grow->limit = sz_psz2ind(SC_LARGE_MAXCLASS); +} diff --git a/contrib/jemalloc/src/extent.c b/contrib/jemalloc/src/extent.c index b4ef382676beca..cf3d1f3112df7b 100644 --- a/contrib/jemalloc/src/extent.c +++ b/contrib/jemalloc/src/extent.c @@ -1,93 +1,28 @@ -#define JEMALLOC_EXTENT_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/assert.h" +#include "jemalloc/internal/emap.h" #include "jemalloc/internal/extent_dss.h" #include "jemalloc/internal/extent_mmap.h" #include "jemalloc/internal/ph.h" -#include "jemalloc/internal/rtree.h" #include "jemalloc/internal/mutex.h" -#include "jemalloc/internal/mutex_pool.h" /******************************************************************************/ /* Data. */ -rtree_t extents_rtree; -/* Keyed by the address of the extent_t being protected. */ -mutex_pool_t extent_mutex_pool; - size_t opt_lg_extent_max_active_fit = LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT; -static const bitmap_info_t extents_bitmap_info = - BITMAP_INFO_INITIALIZER(SC_NPSIZES+1); - -static void *extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, - size_t size, size_t alignment, bool *zero, bool *commit, - unsigned arena_ind); -static bool extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, - size_t size, bool committed, unsigned arena_ind); -static void extent_destroy_default(extent_hooks_t *extent_hooks, void *addr, - size_t size, bool committed, unsigned arena_ind); -static bool extent_commit_default(extent_hooks_t *extent_hooks, void *addr, - size_t size, size_t offset, size_t length, unsigned arena_ind); -static bool extent_commit_impl(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, - size_t length, bool growing_retained); -static bool extent_decommit_default(extent_hooks_t *extent_hooks, - void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind); -#ifdef PAGES_CAN_PURGE_LAZY -static bool extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, - size_t size, size_t offset, size_t length, unsigned arena_ind); -#endif -static bool extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, - size_t length, bool growing_retained); -#ifdef PAGES_CAN_PURGE_FORCED -static bool extent_purge_forced_default(extent_hooks_t *extent_hooks, - void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind); -#endif -static bool extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, - size_t length, bool growing_retained); -static bool extent_split_default(extent_hooks_t *extent_hooks, void *addr, - size_t size, size_t size_a, size_t size_b, bool committed, - unsigned arena_ind); -static extent_t *extent_split_impl(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a, - szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b, - bool growing_retained); -static bool extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, - size_t size_a, void *addr_b, size_t size_b, bool committed, - unsigned arena_ind); -static bool extent_merge_impl(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b, - bool growing_retained); - -const extent_hooks_t extent_hooks_default = { - extent_alloc_default, - extent_dalloc_default, - extent_destroy_default, - extent_commit_default, - extent_decommit_default -#ifdef PAGES_CAN_PURGE_LAZY - , - extent_purge_lazy_default -#else - , - NULL -#endif -#ifdef PAGES_CAN_PURGE_FORCED - , - extent_purge_forced_default -#else - , - NULL -#endif - , - extent_split_default, - extent_merge_default -}; +static bool extent_commit_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, + size_t offset, size_t length, bool growing_retained); +static bool extent_purge_lazy_impl(tsdn_t *tsdn, ehooks_t *ehooks, + edata_t *edata, size_t offset, size_t length, bool growing_retained); +static bool extent_purge_forced_impl(tsdn_t *tsdn, ehooks_t *ehooks, + edata_t *edata, size_t offset, size_t length, bool growing_retained); +static edata_t *extent_split_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, + edata_t *edata, size_t size_a, size_t size_b, bool holding_core_locks); +static bool extent_merge_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, + edata_t *a, edata_t *b, bool holding_core_locks); /* Used exclusively for gdump triggering. */ static atomic_zu_t curpages; @@ -99,503 +34,158 @@ static atomic_zu_t highpages; * definition. */ -static void extent_deregister(tsdn_t *tsdn, extent_t *extent); -static extent_t *extent_recycle(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr, - size_t usize, size_t pad, size_t alignment, bool slab, szind_t szind, - bool *zero, bool *commit, bool growing_retained); -static extent_t *extent_try_coalesce(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, - extent_t *extent, bool *coalesced, bool growing_retained); -static void extent_record(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent, - bool growing_retained); +static void extent_deregister(tsdn_t *tsdn, pac_t *pac, edata_t *edata); +static edata_t *extent_recycle(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, + ecache_t *ecache, edata_t *expand_edata, size_t usize, size_t alignment, + bool zero, bool *commit, bool growing_retained, bool guarded); +static edata_t *extent_try_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, + ecache_t *ecache, edata_t *edata, bool *coalesced); +static edata_t *extent_alloc_retained(tsdn_t *tsdn, pac_t *pac, + ehooks_t *ehooks, edata_t *expand_edata, size_t size, size_t alignment, + bool zero, bool *commit, bool guarded); /******************************************************************************/ -#define ATTR_NONE /* does nothing */ - -ph_gen(ATTR_NONE, extent_avail_, extent_tree_t, extent_t, ph_link, - extent_esnead_comp) - -#undef ATTR_NONE - -typedef enum { - lock_result_success, - lock_result_failure, - lock_result_no_extent -} lock_result_t; - -static lock_result_t -extent_rtree_leaf_elm_try_lock(tsdn_t *tsdn, rtree_leaf_elm_t *elm, - extent_t **result, bool inactive_only) { - extent_t *extent1 = rtree_leaf_elm_extent_read(tsdn, &extents_rtree, - elm, true); - - /* Slab implies active extents and should be skipped. */ - if (extent1 == NULL || (inactive_only && rtree_leaf_elm_slab_read(tsdn, - &extents_rtree, elm, true))) { - return lock_result_no_extent; - } - - /* - * It's possible that the extent changed out from under us, and with it - * the leaf->extent mapping. We have to recheck while holding the lock. - */ - extent_lock(tsdn, extent1); - extent_t *extent2 = rtree_leaf_elm_extent_read(tsdn, - &extents_rtree, elm, true); - - if (extent1 == extent2) { - *result = extent1; - return lock_result_success; - } else { - extent_unlock(tsdn, extent1); - return lock_result_failure; - } -} - -/* - * Returns a pool-locked extent_t * if there's one associated with the given - * address, and NULL otherwise. - */ -static extent_t * -extent_lock_from_addr(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, void *addr, - bool inactive_only) { - extent_t *ret = NULL; - rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &extents_rtree, - rtree_ctx, (uintptr_t)addr, false, false); - if (elm == NULL) { - return NULL; - } - lock_result_t lock_result; - do { - lock_result = extent_rtree_leaf_elm_try_lock(tsdn, elm, &ret, - inactive_only); - } while (lock_result == lock_result_failure); - return ret; -} - -extent_t * -extent_alloc(tsdn_t *tsdn, arena_t *arena) { - malloc_mutex_lock(tsdn, &arena->extent_avail_mtx); - extent_t *extent = extent_avail_first(&arena->extent_avail); - if (extent == NULL) { - malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx); - return base_alloc_extent(tsdn, arena->base); - } - extent_avail_remove(&arena->extent_avail, extent); - atomic_fetch_sub_zu(&arena->extent_avail_cnt, 1, ATOMIC_RELAXED); - malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx); - return extent; -} - -void -extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { - malloc_mutex_lock(tsdn, &arena->extent_avail_mtx); - extent_avail_insert(&arena->extent_avail, extent); - atomic_fetch_add_zu(&arena->extent_avail_cnt, 1, ATOMIC_RELAXED); - malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx); -} - -extent_hooks_t * -extent_hooks_get(arena_t *arena) { - return base_extent_hooks_get(arena->base); -} - -extent_hooks_t * -extent_hooks_set(tsd_t *tsd, arena_t *arena, extent_hooks_t *extent_hooks) { - background_thread_info_t *info; - if (have_background_thread) { - info = arena_background_thread_info_get(arena); - malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); - } - extent_hooks_t *ret = base_extent_hooks_set(arena->base, extent_hooks); - if (have_background_thread) { - malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); - } - - return ret; -} - -static void -extent_hooks_assure_initialized(arena_t *arena, - extent_hooks_t **r_extent_hooks) { - if (*r_extent_hooks == EXTENT_HOOKS_INITIALIZER) { - *r_extent_hooks = extent_hooks_get(arena); - } -} - -#ifndef JEMALLOC_JET -static -#endif size_t -extent_size_quantize_floor(size_t size) { - size_t ret; - pszind_t pind; - - assert(size > 0); - assert((size & PAGE_MASK) == 0); - - pind = sz_psz2ind(size - sz_large_pad + 1); - if (pind == 0) { - /* - * Avoid underflow. This short-circuit would also do the right - * thing for all sizes in the range for which there are - * PAGE-spaced size classes, but it's simplest to just handle - * the one case that would cause erroneous results. - */ - return size; - } - ret = sz_pind2sz(pind - 1) + sz_large_pad; - assert(ret <= size); - return ret; +extent_sn_next(pac_t *pac) { + return atomic_fetch_add_zu(&pac->extent_sn_next, 1, ATOMIC_RELAXED); } -#ifndef JEMALLOC_JET -static -#endif -size_t -extent_size_quantize_ceil(size_t size) { - size_t ret; - - assert(size > 0); - assert(size - sz_large_pad <= SC_LARGE_MAXCLASS); - assert((size & PAGE_MASK) == 0); - - ret = extent_size_quantize_floor(size); - if (ret < size) { - /* - * Skip a quantization that may have an adequately large extent, - * because under-sized extents may be mixed in. This only - * happens when an unusual size is requested, i.e. for aligned - * allocation, and is just one of several places where linear - * search would potentially find sufficiently aligned available - * memory somewhere lower. - */ - ret = sz_pind2sz(sz_psz2ind(ret - sz_large_pad + 1)) + - sz_large_pad; - } - return ret; +static inline bool +extent_may_force_decay(pac_t *pac) { + return !(pac_decay_ms_get(pac, extent_state_dirty) == -1 + || pac_decay_ms_get(pac, extent_state_muzzy) == -1); } -/* Generate pairing heap functions. */ -ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp) +static bool +extent_try_delayed_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, + ecache_t *ecache, edata_t *edata) { + emap_update_edata_state(tsdn, pac->emap, edata, extent_state_active); -bool -extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state, - bool delay_coalesce) { - if (malloc_mutex_init(&extents->mtx, "extents", WITNESS_RANK_EXTENTS, - malloc_mutex_rank_exclusive)) { + bool coalesced; + edata = extent_try_coalesce(tsdn, pac, ehooks, ecache, + edata, &coalesced); + emap_update_edata_state(tsdn, pac->emap, edata, ecache->state); + + if (!coalesced) { return true; } - for (unsigned i = 0; i < SC_NPSIZES + 1; i++) { - extent_heap_new(&extents->heaps[i]); - } - bitmap_init(extents->bitmap, &extents_bitmap_info, true); - extent_list_init(&extents->lru); - atomic_store_zu(&extents->npages, 0, ATOMIC_RELAXED); - extents->state = state; - extents->delay_coalesce = delay_coalesce; + eset_insert(&ecache->eset, edata); return false; } -extent_state_t -extents_state_get(const extents_t *extents) { - return extents->state; -} - -size_t -extents_npages_get(extents_t *extents) { - return atomic_load_zu(&extents->npages, ATOMIC_RELAXED); -} - -size_t -extents_nextents_get(extents_t *extents, pszind_t pind) { - return atomic_load_zu(&extents->nextents[pind], ATOMIC_RELAXED); -} - -size_t -extents_nbytes_get(extents_t *extents, pszind_t pind) { - return atomic_load_zu(&extents->nbytes[pind], ATOMIC_RELAXED); -} - -static void -extents_stats_add(extents_t *extent, pszind_t pind, size_t sz) { - size_t cur = atomic_load_zu(&extent->nextents[pind], ATOMIC_RELAXED); - atomic_store_zu(&extent->nextents[pind], cur + 1, ATOMIC_RELAXED); - cur = atomic_load_zu(&extent->nbytes[pind], ATOMIC_RELAXED); - atomic_store_zu(&extent->nbytes[pind], cur + sz, ATOMIC_RELAXED); -} - -static void -extents_stats_sub(extents_t *extent, pszind_t pind, size_t sz) { - size_t cur = atomic_load_zu(&extent->nextents[pind], ATOMIC_RELAXED); - atomic_store_zu(&extent->nextents[pind], cur - 1, ATOMIC_RELAXED); - cur = atomic_load_zu(&extent->nbytes[pind], ATOMIC_RELAXED); - atomic_store_zu(&extent->nbytes[pind], cur - sz, ATOMIC_RELAXED); -} - -static void -extents_insert_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) { - malloc_mutex_assert_owner(tsdn, &extents->mtx); - assert(extent_state_get(extent) == extents->state); - - size_t size = extent_size_get(extent); - size_t psz = extent_size_quantize_floor(size); - pszind_t pind = sz_psz2ind(psz); - if (extent_heap_empty(&extents->heaps[pind])) { - bitmap_unset(extents->bitmap, &extents_bitmap_info, - (size_t)pind); - } - extent_heap_insert(&extents->heaps[pind], extent); - - if (config_stats) { - extents_stats_add(extents, pind, size); - } - - extent_list_append(&extents->lru, extent); - size_t npages = size >> LG_PAGE; - /* - * All modifications to npages hold the mutex (as asserted above), so we - * don't need an atomic fetch-add; we can get by with a load followed by - * a store. - */ - size_t cur_extents_npages = - atomic_load_zu(&extents->npages, ATOMIC_RELAXED); - atomic_store_zu(&extents->npages, cur_extents_npages + npages, - ATOMIC_RELAXED); -} - -static void -extents_remove_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) { - malloc_mutex_assert_owner(tsdn, &extents->mtx); - assert(extent_state_get(extent) == extents->state); - - size_t size = extent_size_get(extent); - size_t psz = extent_size_quantize_floor(size); - pszind_t pind = sz_psz2ind(psz); - extent_heap_remove(&extents->heaps[pind], extent); - - if (config_stats) { - extents_stats_sub(extents, pind, size); - } - - if (extent_heap_empty(&extents->heaps[pind])) { - bitmap_set(extents->bitmap, &extents_bitmap_info, - (size_t)pind); - } - extent_list_remove(&extents->lru, extent); - size_t npages = size >> LG_PAGE; - /* - * As in extents_insert_locked, we hold extents->mtx and so don't need - * atomic operations for updating extents->npages. - */ - size_t cur_extents_npages = - atomic_load_zu(&extents->npages, ATOMIC_RELAXED); - assert(cur_extents_npages >= npages); - atomic_store_zu(&extents->npages, - cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED); -} - -/* - * Find an extent with size [min_size, max_size) to satisfy the alignment - * requirement. For each size, try only the first extent in the heap. - */ -static extent_t * -extents_fit_alignment(extents_t *extents, size_t min_size, size_t max_size, - size_t alignment) { - pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(min_size)); - pszind_t pind_max = sz_psz2ind(extent_size_quantize_ceil(max_size)); - - for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, - &extents_bitmap_info, (size_t)pind); i < pind_max; i = - (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info, - (size_t)i+1)) { - assert(i < SC_NPSIZES); - assert(!extent_heap_empty(&extents->heaps[i])); - extent_t *extent = extent_heap_first(&extents->heaps[i]); - uintptr_t base = (uintptr_t)extent_base_get(extent); - size_t candidate_size = extent_size_get(extent); - assert(candidate_size >= min_size); - - uintptr_t next_align = ALIGNMENT_CEILING((uintptr_t)base, - PAGE_CEILING(alignment)); - if (base > next_align || base + candidate_size <= next_align) { - /* Overflow or not crossing the next alignment. */ - continue; - } - - size_t leadsize = next_align - base; - if (candidate_size - leadsize >= min_size) { - return extent; - } - } +edata_t * +ecache_alloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache, + edata_t *expand_edata, size_t size, size_t alignment, bool zero, + bool guarded) { + assert(size != 0); + assert(alignment != 0); + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); - return NULL; + bool commit = true; + edata_t *edata = extent_recycle(tsdn, pac, ehooks, ecache, expand_edata, + size, alignment, zero, &commit, false, guarded); + assert(edata == NULL || edata_pai_get(edata) == EXTENT_PAI_PAC); + assert(edata == NULL || edata_guarded_get(edata) == guarded); + return edata; } -/* - * Do first-fit extent selection, i.e. select the oldest/lowest extent that is - * large enough. - */ -static extent_t * -extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, - size_t size) { - extent_t *ret = NULL; - - pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size)); - - if (!maps_coalesce && !opt_retain) { - /* - * No split / merge allowed (Windows w/o retain). Try exact fit - * only. - */ - return extent_heap_empty(&extents->heaps[pind]) ? NULL : - extent_heap_first(&extents->heaps[pind]); - } +edata_t * +ecache_alloc_grow(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache, + edata_t *expand_edata, size_t size, size_t alignment, bool zero, + bool guarded) { + assert(size != 0); + assert(alignment != 0); + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); - for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, - &extents_bitmap_info, (size_t)pind); - i < SC_NPSIZES + 1; - i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info, - (size_t)i+1)) { - assert(!extent_heap_empty(&extents->heaps[i])); - extent_t *extent = extent_heap_first(&extents->heaps[i]); - assert(extent_size_get(extent) >= size); - /* - * In order to reduce fragmentation, avoid reusing and splitting - * large extents for much smaller sizes. - * - * Only do check for dirty extents (delay_coalesce). - */ - if (extents->delay_coalesce && - (sz_pind2sz(i) >> opt_lg_extent_max_active_fit) > size) { - break; - } - if (ret == NULL || extent_snad_comp(extent, ret) < 0) { - ret = extent; + bool commit = true; + edata_t *edata = extent_alloc_retained(tsdn, pac, ehooks, expand_edata, + size, alignment, zero, &commit, guarded); + if (edata == NULL) { + if (opt_retain && expand_edata != NULL) { + /* + * When retain is enabled and trying to expand, we do + * not attempt extent_alloc_wrapper which does mmap that + * is very unlikely to succeed (unless it happens to be + * at the end). + */ + return NULL; } - if (i == SC_NPSIZES) { - break; + if (guarded) { + /* + * Means no cached guarded extents available (and no + * grow_retained was attempted). The pac_alloc flow + * will alloc regular extents to make new guarded ones. + */ + return NULL; } - assert(i < SC_NPSIZES); - } - - return ret; -} - -/* - * Do first-fit extent selection, where the selection policy choice is - * based on extents->delay_coalesce. - */ -static extent_t * -extents_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, - size_t esize, size_t alignment) { - malloc_mutex_assert_owner(tsdn, &extents->mtx); - - size_t max_size = esize + PAGE_CEILING(alignment) - PAGE; - /* Beware size_t wrap-around. */ - if (max_size < esize) { - return NULL; - } - - extent_t *extent = - extents_first_fit_locked(tsdn, arena, extents, max_size); - - if (alignment > PAGE && extent == NULL) { - /* - * max_size guarantees the alignment requirement but is rather - * pessimistic. Next we try to satisfy the aligned allocation - * with sizes in [esize, max_size). - */ - extent = extents_fit_alignment(extents, esize, max_size, - alignment); - } - - return extent; -} - -static bool -extent_try_delayed_coalesce(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, - extent_t *extent) { - extent_state_set(extent, extent_state_active); - bool coalesced; - extent = extent_try_coalesce(tsdn, arena, r_extent_hooks, rtree_ctx, - extents, extent, &coalesced, false); - extent_state_set(extent, extents_state_get(extents)); - - if (!coalesced) { - return true; + void *new_addr = (expand_edata == NULL) ? NULL : + edata_past_get(expand_edata); + edata = extent_alloc_wrapper(tsdn, pac, ehooks, new_addr, + size, alignment, zero, &commit, + /* growing_retained */ false); } - extents_insert_locked(tsdn, extents, extent); - return false; -} -extent_t * -extents_alloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, - extents_t *extents, void *new_addr, size_t size, size_t pad, - size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) { - assert(size + pad != 0); - assert(alignment != 0); - witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), - WITNESS_RANK_CORE, 0); - - extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks, extents, - new_addr, size, pad, alignment, slab, szind, zero, commit, false); - assert(extent == NULL || extent_dumpable_get(extent)); - return extent; + assert(edata == NULL || edata_pai_get(edata) == EXTENT_PAI_PAC); + return edata; } void -extents_dalloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, - extents_t *extents, extent_t *extent) { - assert(extent_base_get(extent) != NULL); - assert(extent_size_get(extent) != 0); - assert(extent_dumpable_get(extent)); +ecache_dalloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache, + edata_t *edata) { + assert(edata_base_get(edata) != NULL); + assert(edata_size_get(edata) != 0); + assert(edata_pai_get(edata) == EXTENT_PAI_PAC); witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); - extent_addr_set(extent, extent_base_get(extent)); - extent_zeroed_set(extent, false); + edata_addr_set(edata, edata_base_get(edata)); + edata_zeroed_set(edata, false); - extent_record(tsdn, arena, r_extent_hooks, extents, extent, false); + extent_record(tsdn, pac, ehooks, ecache, edata); } -extent_t * -extents_evict(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, - extents_t *extents, size_t npages_min) { - rtree_ctx_t rtree_ctx_fallback; - rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); - - malloc_mutex_lock(tsdn, &extents->mtx); +edata_t * +ecache_evict(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, + ecache_t *ecache, size_t npages_min) { + malloc_mutex_lock(tsdn, &ecache->mtx); /* * Get the LRU coalesced extent, if any. If coalescing was delayed, * the loop will iterate until the LRU extent is fully coalesced. */ - extent_t *extent; + edata_t *edata; while (true) { /* Get the LRU extent, if any. */ - extent = extent_list_first(&extents->lru); - if (extent == NULL) { - goto label_return; + eset_t *eset = &ecache->eset; + edata = edata_list_inactive_first(&eset->lru); + if (edata == NULL) { + /* + * Next check if there are guarded extents. They are + * more expensive to purge (since they are not + * mergeable), thus in favor of caching them longer. + */ + eset = &ecache->guarded_eset; + edata = edata_list_inactive_first(&eset->lru); + if (edata == NULL) { + goto label_return; + } } /* Check the eviction limit. */ - size_t extents_npages = atomic_load_zu(&extents->npages, - ATOMIC_RELAXED); + size_t extents_npages = ecache_npages_get(ecache); if (extents_npages <= npages_min) { - extent = NULL; + edata = NULL; goto label_return; } - extents_remove_locked(tsdn, extents, extent); - if (!extents->delay_coalesce) { + eset_remove(eset, edata); + if (!ecache->delay_coalesce || edata_guarded_get(edata)) { break; } /* Try to coalesce. */ - if (extent_try_delayed_coalesce(tsdn, arena, r_extent_hooks, - rtree_ctx, extents, extent)) { + if (extent_try_delayed_coalesce(tsdn, pac, ehooks, ecache, + edata)) { break; } /* @@ -608,23 +198,24 @@ extents_evict(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, * Either mark the extent active or deregister it to protect against * concurrent operations. */ - switch (extents_state_get(extents)) { + switch (ecache->state) { case extent_state_active: not_reached(); case extent_state_dirty: case extent_state_muzzy: - extent_state_set(extent, extent_state_active); + emap_update_edata_state(tsdn, pac->emap, edata, + extent_state_active); break; case extent_state_retained: - extent_deregister(tsdn, extent); + extent_deregister(tsdn, pac, edata); break; default: not_reached(); } label_return: - malloc_mutex_unlock(tsdn, &extents->mtx); - return extent; + malloc_mutex_unlock(tsdn, &ecache->mtx); + return edata; } /* @@ -632,123 +223,73 @@ extents_evict(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, * indicates OOM), e.g. when trying to split an existing extent. */ static void -extents_abandon_vm(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, - extents_t *extents, extent_t *extent, bool growing_retained) { - size_t sz = extent_size_get(extent); +extents_abandon_vm(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache, + edata_t *edata, bool growing_retained) { + size_t sz = edata_size_get(edata); if (config_stats) { - arena_stats_accum_zu(&arena->stats.abandoned_vm, sz); + atomic_fetch_add_zu(&pac->stats->abandoned_vm, sz, + ATOMIC_RELAXED); } /* * Leak extent after making sure its pages have already been purged, so * that this is only a virtual memory leak. */ - if (extents_state_get(extents) == extent_state_dirty) { - if (extent_purge_lazy_impl(tsdn, arena, r_extent_hooks, - extent, 0, sz, growing_retained)) { - extent_purge_forced_impl(tsdn, arena, r_extent_hooks, - extent, 0, extent_size_get(extent), - growing_retained); + if (ecache->state == extent_state_dirty) { + if (extent_purge_lazy_impl(tsdn, ehooks, edata, 0, sz, + growing_retained)) { + extent_purge_forced_impl(tsdn, ehooks, edata, 0, + edata_size_get(edata), growing_retained); } } - extent_dalloc(tsdn, arena, extent); -} - -void -extents_prefork(tsdn_t *tsdn, extents_t *extents) { - malloc_mutex_prefork(tsdn, &extents->mtx); -} - -void -extents_postfork_parent(tsdn_t *tsdn, extents_t *extents) { - malloc_mutex_postfork_parent(tsdn, &extents->mtx); -} - -void -extents_postfork_child(tsdn_t *tsdn, extents_t *extents) { - malloc_mutex_postfork_child(tsdn, &extents->mtx); + edata_cache_put(tsdn, pac->edata_cache, edata); } static void -extent_deactivate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, - extent_t *extent) { - assert(extent_arena_get(extent) == arena); - assert(extent_state_get(extent) == extent_state_active); +extent_deactivate_locked_impl(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache, + edata_t *edata) { + malloc_mutex_assert_owner(tsdn, &ecache->mtx); + assert(edata_arena_ind_get(edata) == ecache_ind_get(ecache)); - extent_state_set(extent, extents_state_get(extents)); - extents_insert_locked(tsdn, extents, extent); + emap_update_edata_state(tsdn, pac->emap, edata, ecache->state); + eset_t *eset = edata_guarded_get(edata) ? &ecache->guarded_eset : + &ecache->eset; + eset_insert(eset, edata); } static void -extent_deactivate(tsdn_t *tsdn, arena_t *arena, extents_t *extents, - extent_t *extent) { - malloc_mutex_lock(tsdn, &extents->mtx); - extent_deactivate_locked(tsdn, arena, extents, extent); - malloc_mutex_unlock(tsdn, &extents->mtx); +extent_deactivate_locked(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache, + edata_t *edata) { + assert(edata_state_get(edata) == extent_state_active); + extent_deactivate_locked_impl(tsdn, pac, ecache, edata); } static void -extent_activate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, - extent_t *extent) { - assert(extent_arena_get(extent) == arena); - assert(extent_state_get(extent) == extents_state_get(extents)); - - extents_remove_locked(tsdn, extents, extent); - extent_state_set(extent, extent_state_active); -} - -static bool -extent_rtree_leaf_elms_lookup(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, - const extent_t *extent, bool dependent, bool init_missing, - rtree_leaf_elm_t **r_elm_a, rtree_leaf_elm_t **r_elm_b) { - *r_elm_a = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx, - (uintptr_t)extent_base_get(extent), dependent, init_missing); - if (!dependent && *r_elm_a == NULL) { - return true; - } - assert(*r_elm_a != NULL); - - *r_elm_b = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx, - (uintptr_t)extent_last_get(extent), dependent, init_missing); - if (!dependent && *r_elm_b == NULL) { - return true; - } - assert(*r_elm_b != NULL); - - return false; +extent_deactivate_check_state_locked(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache, + edata_t *edata, extent_state_t expected_state) { + assert(edata_state_get(edata) == expected_state); + extent_deactivate_locked_impl(tsdn, pac, ecache, edata); } static void -extent_rtree_write_acquired(tsdn_t *tsdn, rtree_leaf_elm_t *elm_a, - rtree_leaf_elm_t *elm_b, extent_t *extent, szind_t szind, bool slab) { - rtree_leaf_elm_write(tsdn, &extents_rtree, elm_a, extent, szind, slab); - if (elm_b != NULL) { - rtree_leaf_elm_write(tsdn, &extents_rtree, elm_b, extent, szind, - slab); - } -} +extent_activate_locked(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache, eset_t *eset, + edata_t *edata) { + assert(edata_arena_ind_get(edata) == ecache_ind_get(ecache)); + assert(edata_state_get(edata) == ecache->state || + edata_state_get(edata) == extent_state_merging); -static void -extent_interior_register(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, extent_t *extent, - szind_t szind) { - assert(extent_slab_get(extent)); - - /* Register interior. */ - for (size_t i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) { - rtree_write(tsdn, &extents_rtree, rtree_ctx, - (uintptr_t)extent_base_get(extent) + (uintptr_t)(i << - LG_PAGE), extent, szind, true); - } + eset_remove(eset, edata); + emap_update_edata_state(tsdn, pac->emap, edata, extent_state_active); } -static void -extent_gdump_add(tsdn_t *tsdn, const extent_t *extent) { +void +extent_gdump_add(tsdn_t *tsdn, const edata_t *edata) { cassert(config_prof); /* prof_gdump() requirement. */ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); - if (opt_prof && extent_state_get(extent) == extent_state_active) { - size_t nadd = extent_size_get(extent) >> LG_PAGE; + if (opt_prof && edata_state_get(edata) == extent_state_active) { + size_t nadd = edata_size_get(edata) >> LG_PAGE; size_t cur = atomic_fetch_add_zu(&curpages, nadd, ATOMIC_RELAXED) + nadd; size_t high = atomic_load_zu(&highpages, ATOMIC_RELAXED); @@ -767,232 +308,184 @@ extent_gdump_add(tsdn_t *tsdn, const extent_t *extent) { } static void -extent_gdump_sub(tsdn_t *tsdn, const extent_t *extent) { +extent_gdump_sub(tsdn_t *tsdn, const edata_t *edata) { cassert(config_prof); - if (opt_prof && extent_state_get(extent) == extent_state_active) { - size_t nsub = extent_size_get(extent) >> LG_PAGE; + if (opt_prof && edata_state_get(edata) == extent_state_active) { + size_t nsub = edata_size_get(edata) >> LG_PAGE; assert(atomic_load_zu(&curpages, ATOMIC_RELAXED) >= nsub); atomic_fetch_sub_zu(&curpages, nsub, ATOMIC_RELAXED); } } static bool -extent_register_impl(tsdn_t *tsdn, extent_t *extent, bool gdump_add) { - rtree_ctx_t rtree_ctx_fallback; - rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); - rtree_leaf_elm_t *elm_a, *elm_b; - +extent_register_impl(tsdn_t *tsdn, pac_t *pac, edata_t *edata, bool gdump_add) { + assert(edata_state_get(edata) == extent_state_active); /* - * We need to hold the lock to protect against a concurrent coalesce - * operation that sees us in a partial state. + * No locking needed, as the edata must be in active state, which + * prevents other threads from accessing the edata. */ - extent_lock(tsdn, extent); - - if (extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, false, true, - &elm_a, &elm_b)) { - extent_unlock(tsdn, extent); + if (emap_register_boundary(tsdn, pac->emap, edata, SC_NSIZES, + /* slab */ false)) { return true; } - szind_t szind = extent_szind_get_maybe_invalid(extent); - bool slab = extent_slab_get(extent); - extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent, szind, slab); - if (slab) { - extent_interior_register(tsdn, rtree_ctx, extent, szind); - } - - extent_unlock(tsdn, extent); - if (config_prof && gdump_add) { - extent_gdump_add(tsdn, extent); + extent_gdump_add(tsdn, edata); } return false; } static bool -extent_register(tsdn_t *tsdn, extent_t *extent) { - return extent_register_impl(tsdn, extent, true); +extent_register(tsdn_t *tsdn, pac_t *pac, edata_t *edata) { + return extent_register_impl(tsdn, pac, edata, true); } static bool -extent_register_no_gdump_add(tsdn_t *tsdn, extent_t *extent) { - return extent_register_impl(tsdn, extent, false); +extent_register_no_gdump_add(tsdn_t *tsdn, pac_t *pac, edata_t *edata) { + return extent_register_impl(tsdn, pac, edata, false); } static void -extent_reregister(tsdn_t *tsdn, extent_t *extent) { - bool err = extent_register(tsdn, extent); +extent_reregister(tsdn_t *tsdn, pac_t *pac, edata_t *edata) { + bool err = extent_register(tsdn, pac, edata); assert(!err); } -/* - * Removes all pointers to the given extent from the global rtree indices for - * its interior. This is relevant for slab extents, for which we need to do - * metadata lookups at places other than the head of the extent. We deregister - * on the interior, then, when an extent moves from being an active slab to an - * inactive state. - */ -static void -extent_interior_deregister(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, - extent_t *extent) { - size_t i; - - assert(extent_slab_get(extent)); - - for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) { - rtree_clear(tsdn, &extents_rtree, rtree_ctx, - (uintptr_t)extent_base_get(extent) + (uintptr_t)(i << - LG_PAGE)); - } -} - /* * Removes all pointers to the given extent from the global rtree. */ static void -extent_deregister_impl(tsdn_t *tsdn, extent_t *extent, bool gdump) { - rtree_ctx_t rtree_ctx_fallback; - rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); - rtree_leaf_elm_t *elm_a, *elm_b; - extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, true, false, - &elm_a, &elm_b); - - extent_lock(tsdn, extent); - - extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL, SC_NSIZES, false); - if (extent_slab_get(extent)) { - extent_interior_deregister(tsdn, rtree_ctx, extent); - extent_slab_set(extent, false); - } - - extent_unlock(tsdn, extent); +extent_deregister_impl(tsdn_t *tsdn, pac_t *pac, edata_t *edata, + bool gdump) { + emap_deregister_boundary(tsdn, pac->emap, edata); if (config_prof && gdump) { - extent_gdump_sub(tsdn, extent); + extent_gdump_sub(tsdn, edata); } } static void -extent_deregister(tsdn_t *tsdn, extent_t *extent) { - extent_deregister_impl(tsdn, extent, true); +extent_deregister(tsdn_t *tsdn, pac_t *pac, edata_t *edata) { + extent_deregister_impl(tsdn, pac, edata, true); } static void -extent_deregister_no_gdump_sub(tsdn_t *tsdn, extent_t *extent) { - extent_deregister_impl(tsdn, extent, false); +extent_deregister_no_gdump_sub(tsdn_t *tsdn, pac_t *pac, + edata_t *edata) { + extent_deregister_impl(tsdn, pac, edata, false); } /* - * Tries to find and remove an extent from extents that can be used for the + * Tries to find and remove an extent from ecache that can be used for the * given allocation request. */ -static extent_t * -extent_recycle_extract(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, - void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, - bool growing_retained) { - witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), - WITNESS_RANK_CORE, growing_retained ? 1 : 0); +static edata_t * +extent_recycle_extract(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, + ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment, + bool guarded) { + malloc_mutex_assert_owner(tsdn, &ecache->mtx); assert(alignment > 0); - if (config_debug && new_addr != NULL) { + if (config_debug && expand_edata != NULL) { /* - * Non-NULL new_addr has two use cases: - * - * 1) Recycle a known-extant extent, e.g. during purging. - * 2) Perform in-place expanding reallocation. - * - * Regardless of use case, new_addr must either refer to a - * non-existing extent, or to the base of an extant extent, - * since only active slabs support interior lookups (which of - * course cannot be recycled). + * Non-NULL expand_edata indicates in-place expanding realloc. + * new_addr must either refer to a non-existing extent, or to + * the base of an extant extent, since only active slabs support + * interior lookups (which of course cannot be recycled). */ + void *new_addr = edata_past_get(expand_edata); assert(PAGE_ADDR2BASE(new_addr) == new_addr); - assert(pad == 0); assert(alignment <= PAGE); } - size_t esize = size + pad; - malloc_mutex_lock(tsdn, &extents->mtx); - extent_hooks_assure_initialized(arena, r_extent_hooks); - extent_t *extent; - if (new_addr != NULL) { - extent = extent_lock_from_addr(tsdn, rtree_ctx, new_addr, - false); - if (extent != NULL) { - /* - * We might null-out extent to report an error, but we - * still need to unlock the associated mutex after. - */ - extent_t *unlock_extent = extent; - assert(extent_base_get(extent) == new_addr); - if (extent_arena_get(extent) != arena || - extent_size_get(extent) < esize || - extent_state_get(extent) != - extents_state_get(extents)) { - extent = NULL; + edata_t *edata; + eset_t *eset = guarded ? &ecache->guarded_eset : &ecache->eset; + if (expand_edata != NULL) { + edata = emap_try_acquire_edata_neighbor_expand(tsdn, pac->emap, + expand_edata, EXTENT_PAI_PAC, ecache->state); + if (edata != NULL) { + extent_assert_can_expand(expand_edata, edata); + if (edata_size_get(edata) < size) { + emap_release_edata(tsdn, pac->emap, edata, + ecache->state); + edata = NULL; } - extent_unlock(tsdn, unlock_extent); } } else { - extent = extents_fit_locked(tsdn, arena, extents, esize, - alignment); + /* + * A large extent might be broken up from its original size to + * some small size to satisfy a small request. When that small + * request is freed, though, it won't merge back with the larger + * extent if delayed coalescing is on. The large extent can + * then no longer satify a request for its original size. To + * limit this effect, when delayed coalescing is enabled, we + * put a cap on how big an extent we can split for a request. + */ + unsigned lg_max_fit = ecache->delay_coalesce + ? (unsigned)opt_lg_extent_max_active_fit : SC_PTR_BITS; + + /* + * If split and merge are not allowed (Windows w/o retain), try + * exact fit only. + * + * For simplicity purposes, splitting guarded extents is not + * supported. Hence, we do only exact fit for guarded + * allocations. + */ + bool exact_only = (!maps_coalesce && !opt_retain) || guarded; + edata = eset_fit(eset, size, alignment, exact_only, + lg_max_fit); } - if (extent == NULL) { - malloc_mutex_unlock(tsdn, &extents->mtx); + if (edata == NULL) { return NULL; } + assert(!guarded || edata_guarded_get(edata)); + extent_activate_locked(tsdn, pac, ecache, eset, edata); - extent_activate_locked(tsdn, arena, extents, extent); - malloc_mutex_unlock(tsdn, &extents->mtx); - - return extent; + return edata; } /* * Given an allocation request and an extent guaranteed to be able to satisfy - * it, this splits off lead and trail extents, leaving extent pointing to an + * it, this splits off lead and trail extents, leaving edata pointing to an * extent satisfying the allocation. - * This function doesn't put lead or trail into any extents_t; it's the caller's + * This function doesn't put lead or trail into any ecache; it's the caller's * job to ensure that they can be reused. */ typedef enum { /* - * Split successfully. lead, extent, and trail, are modified to extents + * Split successfully. lead, edata, and trail, are modified to extents * describing the ranges before, in, and after the given allocation. */ extent_split_interior_ok, /* * The extent can't satisfy the given allocation request. None of the - * input extent_t *s are touched. + * input edata_t *s are touched. */ extent_split_interior_cant_alloc, /* * In a potentially invalid state. Must leak (if *to_leak is non-NULL), * and salvage what's still salvageable (if *to_salvage is non-NULL). - * None of lead, extent, or trail are valid. + * None of lead, edata, or trail are valid. */ extent_split_interior_error } extent_split_interior_result_t; static extent_split_interior_result_t -extent_split_interior(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, +extent_split_interior(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, /* The result of splitting, in case of success. */ - extent_t **extent, extent_t **lead, extent_t **trail, + edata_t **edata, edata_t **lead, edata_t **trail, /* The mess to clean up, in case of error. */ - extent_t **to_leak, extent_t **to_salvage, - void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, - szind_t szind, bool growing_retained) { - size_t esize = size + pad; - size_t leadsize = ALIGNMENT_CEILING((uintptr_t)extent_base_get(*extent), - PAGE_CEILING(alignment)) - (uintptr_t)extent_base_get(*extent); - assert(new_addr == NULL || leadsize == 0); - if (extent_size_get(*extent) < leadsize + esize) { + edata_t **to_leak, edata_t **to_salvage, + edata_t *expand_edata, size_t size, size_t alignment) { + size_t leadsize = ALIGNMENT_CEILING((uintptr_t)edata_base_get(*edata), + PAGE_CEILING(alignment)) - (uintptr_t)edata_base_get(*edata); + assert(expand_edata == NULL || leadsize == 0); + if (edata_size_get(*edata) < leadsize + size) { return extent_split_interior_cant_alloc; } - size_t trailsize = extent_size_get(*extent) - leadsize - esize; + size_t trailsize = edata_size_get(*edata) - leadsize - size; *lead = NULL; *trail = NULL; @@ -1001,11 +494,11 @@ extent_split_interior(tsdn_t *tsdn, arena_t *arena, /* Split the lead. */ if (leadsize != 0) { - *lead = *extent; - *extent = extent_split_impl(tsdn, arena, r_extent_hooks, - *lead, leadsize, SC_NSIZES, false, esize + trailsize, szind, - slab, growing_retained); - if (*extent == NULL) { + assert(!edata_guarded_get(*edata)); + *lead = *edata; + *edata = extent_split_impl(tsdn, pac, ehooks, *lead, leadsize, + size + trailsize, /* holding_core_locks*/ true); + if (*edata == NULL) { *to_leak = *lead; *lead = NULL; return extent_split_interior_error; @@ -1014,36 +507,18 @@ extent_split_interior(tsdn_t *tsdn, arena_t *arena, /* Split the trail. */ if (trailsize != 0) { - *trail = extent_split_impl(tsdn, arena, r_extent_hooks, *extent, - esize, szind, slab, trailsize, SC_NSIZES, false, - growing_retained); + assert(!edata_guarded_get(*edata)); + *trail = extent_split_impl(tsdn, pac, ehooks, *edata, size, + trailsize, /* holding_core_locks */ true); if (*trail == NULL) { - *to_leak = *extent; + *to_leak = *edata; *to_salvage = *lead; *lead = NULL; - *extent = NULL; + *edata = NULL; return extent_split_interior_error; } } - if (leadsize == 0 && trailsize == 0) { - /* - * Splitting causes szind to be set as a side effect, but no - * splitting occurred. - */ - extent_szind_set(*extent, szind); - if (szind != SC_NSIZES) { - rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, - (uintptr_t)extent_addr_get(*extent), szind, slab); - if (slab && extent_size_get(*extent) > PAGE) { - rtree_szind_slab_update(tsdn, &extents_rtree, - rtree_ctx, - (uintptr_t)extent_past_get(*extent) - - (uintptr_t)PAGE, szind, slab); - } - } - } - return extent_split_interior_ok; } @@ -1051,42 +526,43 @@ extent_split_interior(tsdn_t *tsdn, arena_t *arena, * This fulfills the indicated allocation request out of the given extent (which * the caller should have ensured was big enough). If there's any unused space * before or after the resulting allocation, that space is given its own extent - * and put back into extents. + * and put back into ecache. */ -static extent_t * -extent_recycle_split(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, - void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, - szind_t szind, extent_t *extent, bool growing_retained) { - extent_t *lead; - extent_t *trail; - extent_t *to_leak; - extent_t *to_salvage; +static edata_t * +extent_recycle_split(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, + ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment, + edata_t *edata, bool growing_retained) { + assert(!edata_guarded_get(edata) || size == edata_size_get(edata)); + malloc_mutex_assert_owner(tsdn, &ecache->mtx); + + edata_t *lead; + edata_t *trail; + edata_t *to_leak JEMALLOC_CC_SILENCE_INIT(NULL); + edata_t *to_salvage JEMALLOC_CC_SILENCE_INIT(NULL); extent_split_interior_result_t result = extent_split_interior( - tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail, - &to_leak, &to_salvage, new_addr, size, pad, alignment, slab, szind, - growing_retained); + tsdn, pac, ehooks, &edata, &lead, &trail, &to_leak, &to_salvage, + expand_edata, size, alignment); if (!maps_coalesce && result != extent_split_interior_ok && !opt_retain) { /* * Split isn't supported (implies Windows w/o retain). Avoid - * leaking the extents. + * leaking the extent. */ assert(to_leak != NULL && lead == NULL && trail == NULL); - extent_deactivate(tsdn, arena, extents, to_leak); + extent_deactivate_locked(tsdn, pac, ecache, to_leak); return NULL; } if (result == extent_split_interior_ok) { if (lead != NULL) { - extent_deactivate(tsdn, arena, extents, lead); + extent_deactivate_locked(tsdn, pac, ecache, lead); } if (trail != NULL) { - extent_deactivate(tsdn, arena, extents, trail); + extent_deactivate_locked(tsdn, pac, ecache, trail); } - return extent; + return edata; } else { /* * We should have picked an extent that was large enough to @@ -1094,294 +570,144 @@ extent_recycle_split(tsdn_t *tsdn, arena_t *arena, */ assert(result == extent_split_interior_error); if (to_salvage != NULL) { - extent_deregister(tsdn, to_salvage); + extent_deregister(tsdn, pac, to_salvage); } if (to_leak != NULL) { - void *leak = extent_base_get(to_leak); - extent_deregister_no_gdump_sub(tsdn, to_leak); - extents_abandon_vm(tsdn, arena, r_extent_hooks, extents, - to_leak, growing_retained); - assert(extent_lock_from_addr(tsdn, rtree_ctx, leak, - false) == NULL); + extent_deregister_no_gdump_sub(tsdn, pac, to_leak); + /* + * May go down the purge path (which assume no ecache + * locks). Only happens with OOM caused split failures. + */ + malloc_mutex_unlock(tsdn, &ecache->mtx); + extents_abandon_vm(tsdn, pac, ehooks, ecache, to_leak, + growing_retained); + malloc_mutex_lock(tsdn, &ecache->mtx); } return NULL; } unreachable(); } -static bool -extent_need_manual_zero(arena_t *arena) { - /* - * Need to manually zero the extent on repopulating if either; 1) non - * default extent hooks installed (in which case the purge semantics may - * change); or 2) transparent huge pages enabled. - */ - return (!arena_has_default_hooks(arena) || - (opt_thp == thp_mode_always)); -} - /* * Tries to satisfy the given allocation request by reusing one of the extents - * in the given extents_t. + * in the given ecache_t. */ -static extent_t * -extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, - extents_t *extents, void *new_addr, size_t size, size_t pad, - size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit, - bool growing_retained) { +static edata_t * +extent_recycle(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache, + edata_t *expand_edata, size_t size, size_t alignment, bool zero, + bool *commit, bool growing_retained, bool guarded) { witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, growing_retained ? 1 : 0); - assert(new_addr == NULL || !slab); - assert(pad == 0 || !slab); - assert(!*zero || !slab); + assert(!guarded || expand_edata == NULL); + assert(!guarded || alignment <= PAGE); - rtree_ctx_t rtree_ctx_fallback; - rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + malloc_mutex_lock(tsdn, &ecache->mtx); - extent_t *extent = extent_recycle_extract(tsdn, arena, r_extent_hooks, - rtree_ctx, extents, new_addr, size, pad, alignment, slab, - growing_retained); - if (extent == NULL) { + edata_t *edata = extent_recycle_extract(tsdn, pac, ehooks, ecache, + expand_edata, size, alignment, guarded); + if (edata == NULL) { + malloc_mutex_unlock(tsdn, &ecache->mtx); return NULL; } - extent = extent_recycle_split(tsdn, arena, r_extent_hooks, rtree_ctx, - extents, new_addr, size, pad, alignment, slab, szind, extent, - growing_retained); - if (extent == NULL) { + edata = extent_recycle_split(tsdn, pac, ehooks, ecache, expand_edata, + size, alignment, edata, growing_retained); + malloc_mutex_unlock(tsdn, &ecache->mtx); + if (edata == NULL) { return NULL; } - if (*commit && !extent_committed_get(extent)) { - if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent, - 0, extent_size_get(extent), growing_retained)) { - extent_record(tsdn, arena, r_extent_hooks, extents, - extent, growing_retained); - return NULL; - } - if (!extent_need_manual_zero(arena)) { - extent_zeroed_set(extent, true); - } + assert(edata_state_get(edata) == extent_state_active); + if (extent_commit_zero(tsdn, ehooks, edata, *commit, zero, + growing_retained)) { + extent_record(tsdn, pac, ehooks, ecache, edata); + return NULL; } - - if (extent_committed_get(extent)) { + if (edata_committed_get(edata)) { + /* + * This reverses the purpose of this variable - previously it + * was treated as an input parameter, now it turns into an + * output parameter, reporting if the edata has actually been + * committed. + */ *commit = true; } - if (extent_zeroed_get(extent)) { - *zero = true; - } - - if (pad != 0) { - extent_addr_randomize(tsdn, extent, alignment); - } - assert(extent_state_get(extent) == extent_state_active); - if (slab) { - extent_slab_set(extent, slab); - extent_interior_register(tsdn, rtree_ctx, extent, szind); - } - - if (*zero) { - void *addr = extent_base_get(extent); - if (!extent_zeroed_get(extent)) { - size_t size = extent_size_get(extent); - if (extent_need_manual_zero(arena) || - pages_purge_forced(addr, size)) { - memset(addr, 0, size); - } - } else if (config_debug) { - size_t *p = (size_t *)(uintptr_t)addr; - /* Check the first page only. */ - for (size_t i = 0; i < PAGE / sizeof(size_t); i++) { - assert(p[i] == 0); - } - } - } - return extent; + return edata; } /* - * If the caller specifies (!*zero), it is still possible to receive zeroed - * memory, in which case *zero is toggled to true. arena_extent_alloc() takes - * advantage of this to avoid demanding zeroed extents, but taking advantage of - * them if they are returned. + * If virtual memory is retained, create increasingly larger extents from which + * to split requested extents in order to limit the total number of disjoint + * virtual memory ranges retained by each shard. */ -static void * -extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, - size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) { - void *ret; +static edata_t * +extent_grow_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, + size_t size, size_t alignment, bool zero, bool *commit) { + malloc_mutex_assert_owner(tsdn, &pac->grow_mtx); - assert(size != 0); - assert(alignment != 0); - - /* "primary" dss. */ - if (have_dss && dss_prec == dss_prec_primary && (ret = - extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero, - commit)) != NULL) { - return ret; - } - /* mmap. */ - if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit)) - != NULL) { - return ret; - } - /* "secondary" dss. */ - if (have_dss && dss_prec == dss_prec_secondary && (ret = - extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero, - commit)) != NULL) { - return ret; - } - - /* All strategies for allocation failed. */ - return NULL; -} - -static void * -extent_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr, - size_t size, size_t alignment, bool *zero, bool *commit) { - void *ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment, zero, - commit, (dss_prec_t)atomic_load_u(&arena->dss_prec, - ATOMIC_RELAXED)); - if (have_madvise_huge && ret) { - pages_set_thp_state(ret, size); - } - return ret; -} - -static void * -extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size, - size_t alignment, bool *zero, bool *commit, unsigned arena_ind) { - tsdn_t *tsdn; - arena_t *arena; - - tsdn = tsdn_fetch(); - arena = arena_get(tsdn, arena_ind, false); - /* - * The arena we're allocating on behalf of must have been initialized - * already. - */ - assert(arena != NULL); - - return extent_alloc_default_impl(tsdn, arena, new_addr, size, - ALIGNMENT_CEILING(alignment, PAGE), zero, commit); -} - -static void -extent_hook_pre_reentrancy(tsdn_t *tsdn, arena_t *arena) { - tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn); - if (arena == arena_get(tsd_tsdn(tsd), 0, false)) { - /* - * The only legitimate case of customized extent hooks for a0 is - * hooks with no allocation activities. One such example is to - * place metadata on pre-allocated resources such as huge pages. - * In that case, rely on reentrancy_level checks to catch - * infinite recursions. - */ - pre_reentrancy(tsd, NULL); - } else { - pre_reentrancy(tsd, arena); - } -} - -static void -extent_hook_post_reentrancy(tsdn_t *tsdn) { - tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn); - post_reentrancy(tsd); -} - -/* - * If virtual memory is retained, create increasingly larger extents from which - * to split requested extents in order to limit the total number of disjoint - * virtual memory ranges retained by each arena. - */ -static extent_t * -extent_grow_retained(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, size_t size, size_t pad, size_t alignment, - bool slab, szind_t szind, bool *zero, bool *commit) { - malloc_mutex_assert_owner(tsdn, &arena->extent_grow_mtx); - assert(pad == 0 || !slab); - assert(!*zero || !slab); - - size_t esize = size + pad; - size_t alloc_size_min = esize + PAGE_CEILING(alignment) - PAGE; + size_t alloc_size_min = size + PAGE_CEILING(alignment) - PAGE; /* Beware size_t wrap-around. */ - if (alloc_size_min < esize) { + if (alloc_size_min < size) { goto label_err; } /* * Find the next extent size in the series that would be large enough to * satisfy this request. */ - pszind_t egn_skip = 0; - size_t alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip); - while (alloc_size < alloc_size_min) { - egn_skip++; - if (arena->extent_grow_next + egn_skip >= - sz_psz2ind(SC_LARGE_MAXCLASS)) { - /* Outside legal range. */ - goto label_err; - } - alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip); + size_t alloc_size; + pszind_t exp_grow_skip; + bool err = exp_grow_size_prepare(&pac->exp_grow, alloc_size_min, + &alloc_size, &exp_grow_skip); + if (err) { + goto label_err; } - extent_t *extent = extent_alloc(tsdn, arena); - if (extent == NULL) { + edata_t *edata = edata_cache_get(tsdn, pac->edata_cache); + if (edata == NULL) { goto label_err; } bool zeroed = false; bool committed = false; - void *ptr; - if (*r_extent_hooks == &extent_hooks_default) { - ptr = extent_alloc_default_impl(tsdn, arena, NULL, - alloc_size, PAGE, &zeroed, &committed); - } else { - extent_hook_pre_reentrancy(tsdn, arena); - ptr = (*r_extent_hooks)->alloc(*r_extent_hooks, NULL, - alloc_size, PAGE, &zeroed, &committed, - arena_ind_get(arena)); - extent_hook_post_reentrancy(tsdn); - } + void *ptr = ehooks_alloc(tsdn, ehooks, NULL, alloc_size, PAGE, &zeroed, + &committed); - extent_init(extent, arena, ptr, alloc_size, false, SC_NSIZES, - arena_extent_sn_next(arena), extent_state_active, zeroed, - committed, true, EXTENT_IS_HEAD); if (ptr == NULL) { - extent_dalloc(tsdn, arena, extent); + edata_cache_put(tsdn, pac->edata_cache, edata); goto label_err; } - if (extent_register_no_gdump_add(tsdn, extent)) { - extent_dalloc(tsdn, arena, extent); + edata_init(edata, ecache_ind_get(&pac->ecache_retained), ptr, + alloc_size, false, SC_NSIZES, extent_sn_next(pac), + extent_state_active, zeroed, committed, EXTENT_PAI_PAC, + EXTENT_IS_HEAD); + + if (extent_register_no_gdump_add(tsdn, pac, edata)) { + edata_cache_put(tsdn, pac->edata_cache, edata); goto label_err; } - if (extent_zeroed_get(extent) && extent_committed_get(extent)) { - *zero = true; - } - if (extent_committed_get(extent)) { + if (edata_committed_get(edata)) { *commit = true; } - rtree_ctx_t rtree_ctx_fallback; - rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + edata_t *lead; + edata_t *trail; + edata_t *to_leak JEMALLOC_CC_SILENCE_INIT(NULL); + edata_t *to_salvage JEMALLOC_CC_SILENCE_INIT(NULL); - extent_t *lead; - extent_t *trail; - extent_t *to_leak; - extent_t *to_salvage; - extent_split_interior_result_t result = extent_split_interior( - tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail, - &to_leak, &to_salvage, NULL, size, pad, alignment, slab, szind, - true); + extent_split_interior_result_t result = extent_split_interior(tsdn, + pac, ehooks, &edata, &lead, &trail, &to_leak, &to_salvage, NULL, + size, alignment); if (result == extent_split_interior_ok) { if (lead != NULL) { - extent_record(tsdn, arena, r_extent_hooks, - &arena->extents_retained, lead, true); + extent_record(tsdn, pac, ehooks, &pac->ecache_retained, + lead); } if (trail != NULL) { - extent_record(tsdn, arena, r_extent_hooks, - &arena->extents_retained, trail, true); + extent_record(tsdn, pac, ehooks, &pac->ecache_retained, + trail); } } else { /* @@ -1393,26 +719,32 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, if (config_prof) { extent_gdump_add(tsdn, to_salvage); } - extent_record(tsdn, arena, r_extent_hooks, - &arena->extents_retained, to_salvage, true); + extent_record(tsdn, pac, ehooks, &pac->ecache_retained, + to_salvage); } if (to_leak != NULL) { - extent_deregister_no_gdump_sub(tsdn, to_leak); - extents_abandon_vm(tsdn, arena, r_extent_hooks, - &arena->extents_retained, to_leak, true); + extent_deregister_no_gdump_sub(tsdn, pac, to_leak); + extents_abandon_vm(tsdn, pac, ehooks, + &pac->ecache_retained, to_leak, true); } goto label_err; } - if (*commit && !extent_committed_get(extent)) { - if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent, 0, - extent_size_get(extent), true)) { - extent_record(tsdn, arena, r_extent_hooks, - &arena->extents_retained, extent, true); + if (*commit && !edata_committed_get(edata)) { + if (extent_commit_impl(tsdn, ehooks, edata, 0, + edata_size_get(edata), true)) { + extent_record(tsdn, pac, ehooks, + &pac->ecache_retained, edata); goto label_err; } - if (!extent_need_manual_zero(arena)) { - extent_zeroed_set(extent, true); + /* A successful commit should return zeroed memory. */ + if (config_debug) { + void *addr = edata_addr_get(edata); + size_t *p = (size_t *)(uintptr_t)addr; + /* Check the first page only. */ + for (size_t i = 0; i < PAGE / sizeof(size_t); i++) { + assert(p[i] == 0); + } } } @@ -1420,187 +752,74 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, * Increment extent_grow_next if doing so wouldn't exceed the allowed * range. */ - if (arena->extent_grow_next + egn_skip + 1 <= - arena->retain_grow_limit) { - arena->extent_grow_next += egn_skip + 1; - } else { - arena->extent_grow_next = arena->retain_grow_limit; - } /* All opportunities for failure are past. */ - malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx); + exp_grow_size_commit(&pac->exp_grow, exp_grow_skip); + malloc_mutex_unlock(tsdn, &pac->grow_mtx); if (config_prof) { /* Adjust gdump stats now that extent is final size. */ - extent_gdump_add(tsdn, extent); + extent_gdump_add(tsdn, edata); } - if (pad != 0) { - extent_addr_randomize(tsdn, extent, alignment); - } - if (slab) { - rtree_ctx_t rtree_ctx_fallback; - rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, - &rtree_ctx_fallback); - - extent_slab_set(extent, true); - extent_interior_register(tsdn, rtree_ctx, extent, szind); + if (zero && !edata_zeroed_get(edata)) { + ehooks_zero(tsdn, ehooks, edata_base_get(edata), + edata_size_get(edata)); } - if (*zero && !extent_zeroed_get(extent)) { - void *addr = extent_base_get(extent); - size_t size = extent_size_get(extent); - if (extent_need_manual_zero(arena) || - pages_purge_forced(addr, size)) { - memset(addr, 0, size); - } - } - - return extent; + return edata; label_err: - malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx); + malloc_mutex_unlock(tsdn, &pac->grow_mtx); return NULL; } -static extent_t * -extent_alloc_retained(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad, - size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) { +static edata_t * +extent_alloc_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, + edata_t *expand_edata, size_t size, size_t alignment, bool zero, + bool *commit, bool guarded) { assert(size != 0); assert(alignment != 0); - malloc_mutex_lock(tsdn, &arena->extent_grow_mtx); + malloc_mutex_lock(tsdn, &pac->grow_mtx); - extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks, - &arena->extents_retained, new_addr, size, pad, alignment, slab, - szind, zero, commit, true); - if (extent != NULL) { - malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx); + edata_t *edata = extent_recycle(tsdn, pac, ehooks, + &pac->ecache_retained, expand_edata, size, alignment, zero, commit, + /* growing_retained */ true, guarded); + if (edata != NULL) { + malloc_mutex_unlock(tsdn, &pac->grow_mtx); if (config_prof) { - extent_gdump_add(tsdn, extent); + extent_gdump_add(tsdn, edata); } - } else if (opt_retain && new_addr == NULL) { - extent = extent_grow_retained(tsdn, arena, r_extent_hooks, size, - pad, alignment, slab, szind, zero, commit); - /* extent_grow_retained() always releases extent_grow_mtx. */ - } else { - malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx); - } - malloc_mutex_assert_not_owner(tsdn, &arena->extent_grow_mtx); - - return extent; -} - -static extent_t * -extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad, - size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) { - size_t esize = size + pad; - extent_t *extent = extent_alloc(tsdn, arena); - if (extent == NULL) { - return NULL; - } - void *addr; - size_t palignment = ALIGNMENT_CEILING(alignment, PAGE); - if (*r_extent_hooks == &extent_hooks_default) { - /* Call directly to propagate tsdn. */ - addr = extent_alloc_default_impl(tsdn, arena, new_addr, esize, - palignment, zero, commit); + } else if (opt_retain && expand_edata == NULL && !guarded) { + edata = extent_grow_retained(tsdn, pac, ehooks, size, + alignment, zero, commit); + /* extent_grow_retained() always releases pac->grow_mtx. */ } else { - extent_hook_pre_reentrancy(tsdn, arena); - addr = (*r_extent_hooks)->alloc(*r_extent_hooks, new_addr, - esize, palignment, zero, commit, arena_ind_get(arena)); - extent_hook_post_reentrancy(tsdn); - } - if (addr == NULL) { - extent_dalloc(tsdn, arena, extent); - return NULL; - } - extent_init(extent, arena, addr, esize, slab, szind, - arena_extent_sn_next(arena), extent_state_active, *zero, *commit, - true, EXTENT_NOT_HEAD); - if (pad != 0) { - extent_addr_randomize(tsdn, extent, alignment); - } - if (extent_register(tsdn, extent)) { - extent_dalloc(tsdn, arena, extent); - return NULL; - } - - return extent; -} - -extent_t * -extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad, - size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) { - witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), - WITNESS_RANK_CORE, 0); - - extent_hooks_assure_initialized(arena, r_extent_hooks); - - extent_t *extent = extent_alloc_retained(tsdn, arena, r_extent_hooks, - new_addr, size, pad, alignment, slab, szind, zero, commit); - if (extent == NULL) { - if (opt_retain && new_addr != NULL) { - /* - * When retain is enabled and new_addr is set, we do not - * attempt extent_alloc_wrapper_hard which does mmap - * that is very unlikely to succeed (unless it happens - * to be at the end). - */ - return NULL; - } - extent = extent_alloc_wrapper_hard(tsdn, arena, r_extent_hooks, - new_addr, size, pad, alignment, slab, szind, zero, commit); + malloc_mutex_unlock(tsdn, &pac->grow_mtx); } + malloc_mutex_assert_not_owner(tsdn, &pac->grow_mtx); - assert(extent == NULL || extent_dumpable_get(extent)); - return extent; + return edata; } static bool -extent_can_coalesce(arena_t *arena, extents_t *extents, const extent_t *inner, - const extent_t *outer) { - assert(extent_arena_get(inner) == arena); - if (extent_arena_get(outer) != arena) { - return false; - } - - assert(extent_state_get(inner) == extent_state_active); - if (extent_state_get(outer) != extents->state) { - return false; - } - - if (extent_committed_get(inner) != extent_committed_get(outer)) { - return false; - } - - return true; -} - -static bool -extent_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, - extents_t *extents, extent_t *inner, extent_t *outer, bool forward, - bool growing_retained) { - assert(extent_can_coalesce(arena, extents, inner, outer)); - - extent_activate_locked(tsdn, arena, extents, outer); - - malloc_mutex_unlock(tsdn, &extents->mtx); - bool err = extent_merge_impl(tsdn, arena, r_extent_hooks, - forward ? inner : outer, forward ? outer : inner, growing_retained); - malloc_mutex_lock(tsdn, &extents->mtx); - +extent_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache, + edata_t *inner, edata_t *outer, bool forward) { + extent_assert_can_coalesce(inner, outer); + eset_remove(&ecache->eset, outer); + + bool err = extent_merge_impl(tsdn, pac, ehooks, + forward ? inner : outer, forward ? outer : inner, + /* holding_core_locks */ true); if (err) { - extent_deactivate_locked(tsdn, arena, extents, outer); + extent_deactivate_check_state_locked(tsdn, pac, ecache, outer, + extent_state_merging); } return err; } -static extent_t * -extent_try_coalesce_impl(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, - extent_t *extent, bool *coalesced, bool growing_retained, - bool inactive_only) { +static edata_t * +extent_try_coalesce_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, + ecache_t *ecache, edata_t *edata, bool *coalesced) { + assert(!edata_guarded_get(edata)); /* * We avoid checking / locking inactive neighbors for large size * classes, since they are eagerly coalesced on deallocation which can @@ -1615,470 +834,333 @@ extent_try_coalesce_impl(tsdn_t *tsdn, arena_t *arena, again = false; /* Try to coalesce forward. */ - extent_t *next = extent_lock_from_addr(tsdn, rtree_ctx, - extent_past_get(extent), inactive_only); + edata_t *next = emap_try_acquire_edata_neighbor(tsdn, pac->emap, + edata, EXTENT_PAI_PAC, ecache->state, /* forward */ true); if (next != NULL) { - /* - * extents->mtx only protects against races for - * like-state extents, so call extent_can_coalesce() - * before releasing next's pool lock. - */ - bool can_coalesce = extent_can_coalesce(arena, extents, - extent, next); - - extent_unlock(tsdn, next); - - if (can_coalesce && !extent_coalesce(tsdn, arena, - r_extent_hooks, extents, extent, next, true, - growing_retained)) { - if (extents->delay_coalesce) { + if (!extent_coalesce(tsdn, pac, ehooks, ecache, edata, + next, true)) { + if (ecache->delay_coalesce) { /* Do minimal coalescing. */ *coalesced = true; - return extent; + return edata; } again = true; } } /* Try to coalesce backward. */ - extent_t *prev = NULL; - if (extent_before_get(extent) != NULL) { - prev = extent_lock_from_addr(tsdn, rtree_ctx, - extent_before_get(extent), inactive_only); - } + edata_t *prev = emap_try_acquire_edata_neighbor(tsdn, pac->emap, + edata, EXTENT_PAI_PAC, ecache->state, /* forward */ false); if (prev != NULL) { - bool can_coalesce = extent_can_coalesce(arena, extents, - extent, prev); - extent_unlock(tsdn, prev); - - if (can_coalesce && !extent_coalesce(tsdn, arena, - r_extent_hooks, extents, extent, prev, false, - growing_retained)) { - extent = prev; - if (extents->delay_coalesce) { + if (!extent_coalesce(tsdn, pac, ehooks, ecache, edata, + prev, false)) { + edata = prev; + if (ecache->delay_coalesce) { /* Do minimal coalescing. */ *coalesced = true; - return extent; + return edata; } again = true; } } } while (again); - if (extents->delay_coalesce) { + if (ecache->delay_coalesce) { *coalesced = false; } - return extent; + return edata; +} + +static edata_t * +extent_try_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, + ecache_t *ecache, edata_t *edata, bool *coalesced) { + return extent_try_coalesce_impl(tsdn, pac, ehooks, ecache, edata, + coalesced); } -static extent_t * -extent_try_coalesce(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, - extent_t *extent, bool *coalesced, bool growing_retained) { - return extent_try_coalesce_impl(tsdn, arena, r_extent_hooks, rtree_ctx, - extents, extent, coalesced, growing_retained, false); +static edata_t * +extent_try_coalesce_large(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, + ecache_t *ecache, edata_t *edata, bool *coalesced) { + return extent_try_coalesce_impl(tsdn, pac, ehooks, ecache, edata, + coalesced); } -static extent_t * -extent_try_coalesce_large(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, - extent_t *extent, bool *coalesced, bool growing_retained) { - return extent_try_coalesce_impl(tsdn, arena, r_extent_hooks, rtree_ctx, - extents, extent, coalesced, growing_retained, true); +/* Purge a single extent to retained / unmapped directly. */ +static void +extent_maximally_purge(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, + edata_t *edata) { + size_t extent_size = edata_size_get(edata); + extent_dalloc_wrapper(tsdn, pac, ehooks, edata); + if (config_stats) { + /* Update stats accordingly. */ + LOCKEDINT_MTX_LOCK(tsdn, *pac->stats_mtx); + locked_inc_u64(tsdn, + LOCKEDINT_MTX(*pac->stats_mtx), + &pac->stats->decay_dirty.nmadvise, 1); + locked_inc_u64(tsdn, + LOCKEDINT_MTX(*pac->stats_mtx), + &pac->stats->decay_dirty.purged, + extent_size >> LG_PAGE); + LOCKEDINT_MTX_UNLOCK(tsdn, *pac->stats_mtx); + atomic_fetch_sub_zu(&pac->stats->pac_mapped, extent_size, + ATOMIC_RELAXED); + } } /* * Does the metadata management portions of putting an unused extent into the - * given extents_t (coalesces, deregisters slab interiors, the heap operations). + * given ecache_t (coalesces and inserts into the eset). */ -static void -extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, - extents_t *extents, extent_t *extent, bool growing_retained) { - rtree_ctx_t rtree_ctx_fallback; - rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); - - assert((extents_state_get(extents) != extent_state_dirty && - extents_state_get(extents) != extent_state_muzzy) || - !extent_zeroed_get(extent)); - - malloc_mutex_lock(tsdn, &extents->mtx); - extent_hooks_assure_initialized(arena, r_extent_hooks); - - extent_szind_set(extent, SC_NSIZES); - if (extent_slab_get(extent)) { - extent_interior_deregister(tsdn, rtree_ctx, extent); - extent_slab_set(extent, false); - } +void +extent_record(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache, + edata_t *edata) { + assert((ecache->state != extent_state_dirty && + ecache->state != extent_state_muzzy) || + !edata_zeroed_get(edata)); + + malloc_mutex_lock(tsdn, &ecache->mtx); - assert(rtree_extent_read(tsdn, &extents_rtree, rtree_ctx, - (uintptr_t)extent_base_get(extent), true) == extent); + emap_assert_mapped(tsdn, pac->emap, edata); - if (!extents->delay_coalesce) { - extent = extent_try_coalesce(tsdn, arena, r_extent_hooks, - rtree_ctx, extents, extent, NULL, growing_retained); - } else if (extent_size_get(extent) >= SC_LARGE_MINCLASS) { - assert(extents == &arena->extents_dirty); + if (edata_guarded_get(edata)) { + goto label_skip_coalesce; + } + if (!ecache->delay_coalesce) { + edata = extent_try_coalesce(tsdn, pac, ehooks, ecache, edata, + NULL); + } else if (edata_size_get(edata) >= SC_LARGE_MINCLASS) { + assert(ecache == &pac->ecache_dirty); /* Always coalesce large extents eagerly. */ bool coalesced; do { - assert(extent_state_get(extent) == extent_state_active); - extent = extent_try_coalesce_large(tsdn, arena, - r_extent_hooks, rtree_ctx, extents, extent, - &coalesced, growing_retained); + assert(edata_state_get(edata) == extent_state_active); + edata = extent_try_coalesce_large(tsdn, pac, ehooks, + ecache, edata, &coalesced); } while (coalesced); - if (extent_size_get(extent) >= oversize_threshold) { + if (edata_size_get(edata) >= + atomic_load_zu(&pac->oversize_threshold, ATOMIC_RELAXED) + && extent_may_force_decay(pac)) { /* Shortcut to purge the oversize extent eagerly. */ - malloc_mutex_unlock(tsdn, &extents->mtx); - arena_decay_extent(tsdn, arena, r_extent_hooks, extent); + malloc_mutex_unlock(tsdn, &ecache->mtx); + extent_maximally_purge(tsdn, pac, ehooks, edata); return; } } - extent_deactivate_locked(tsdn, arena, extents, extent); +label_skip_coalesce: + extent_deactivate_locked(tsdn, pac, ecache, edata); - malloc_mutex_unlock(tsdn, &extents->mtx); + malloc_mutex_unlock(tsdn, &ecache->mtx); } void -extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { - extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; - +extent_dalloc_gap(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, + edata_t *edata) { witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); - if (extent_register(tsdn, extent)) { - extent_dalloc(tsdn, arena, extent); + if (extent_register(tsdn, pac, edata)) { + edata_cache_put(tsdn, pac->edata_cache, edata); return; } - extent_dalloc_wrapper(tsdn, arena, &extent_hooks, extent); + extent_dalloc_wrapper(tsdn, pac, ehooks, edata); } static bool -extent_may_dalloc(void) { - /* With retain enabled, the default dalloc always fails. */ - return !opt_retain; -} - -static bool -extent_dalloc_default_impl(void *addr, size_t size) { - if (!have_dss || !extent_in_dss(addr)) { - return extent_dalloc_mmap(addr, size); - } - return true; -} - -static bool -extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, size_t size, - bool committed, unsigned arena_ind) { - return extent_dalloc_default_impl(addr, size); -} - -static bool -extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, extent_t *extent) { +extent_dalloc_wrapper_try(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, + edata_t *edata) { bool err; - assert(extent_base_get(extent) != NULL); - assert(extent_size_get(extent) != 0); + assert(edata_base_get(edata) != NULL); + assert(edata_size_get(edata) != 0); witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); - extent_addr_set(extent, extent_base_get(extent)); + edata_addr_set(edata, edata_base_get(edata)); - extent_hooks_assure_initialized(arena, r_extent_hooks); /* Try to deallocate. */ - if (*r_extent_hooks == &extent_hooks_default) { - /* Call directly to propagate tsdn. */ - err = extent_dalloc_default_impl(extent_base_get(extent), - extent_size_get(extent)); - } else { - extent_hook_pre_reentrancy(tsdn, arena); - err = ((*r_extent_hooks)->dalloc == NULL || - (*r_extent_hooks)->dalloc(*r_extent_hooks, - extent_base_get(extent), extent_size_get(extent), - extent_committed_get(extent), arena_ind_get(arena))); - extent_hook_post_reentrancy(tsdn); - } + err = ehooks_dalloc(tsdn, ehooks, edata_base_get(edata), + edata_size_get(edata), edata_committed_get(edata)); if (!err) { - extent_dalloc(tsdn, arena, extent); + edata_cache_put(tsdn, pac->edata_cache, edata); } return err; } +edata_t * +extent_alloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, + void *new_addr, size_t size, size_t alignment, bool zero, bool *commit, + bool growing_retained) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, growing_retained ? 1 : 0); + + edata_t *edata = edata_cache_get(tsdn, pac->edata_cache); + if (edata == NULL) { + return NULL; + } + size_t palignment = ALIGNMENT_CEILING(alignment, PAGE); + void *addr = ehooks_alloc(tsdn, ehooks, new_addr, size, palignment, + &zero, commit); + if (addr == NULL) { + edata_cache_put(tsdn, pac->edata_cache, edata); + return NULL; + } + edata_init(edata, ecache_ind_get(&pac->ecache_dirty), addr, + size, /* slab */ false, SC_NSIZES, extent_sn_next(pac), + extent_state_active, zero, *commit, EXTENT_PAI_PAC, + opt_retain ? EXTENT_IS_HEAD : EXTENT_NOT_HEAD); + /* + * Retained memory is not counted towards gdump. Only if an extent is + * allocated as a separate mapping, i.e. growing_retained is false, then + * gdump should be updated. + */ + bool gdump_add = !growing_retained; + if (extent_register_impl(tsdn, pac, edata, gdump_add)) { + edata_cache_put(tsdn, pac->edata_cache, edata); + return NULL; + } + + return edata; +} + void -extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, extent_t *extent) { - assert(extent_dumpable_get(extent)); +extent_dalloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, + edata_t *edata) { + assert(edata_pai_get(edata) == EXTENT_PAI_PAC); witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); /* Avoid calling the default extent_dalloc unless have to. */ - if (*r_extent_hooks != &extent_hooks_default || extent_may_dalloc()) { + if (!ehooks_dalloc_will_fail(ehooks)) { + /* Remove guard pages for dalloc / unmap. */ + if (edata_guarded_get(edata)) { + assert(ehooks_are_default(ehooks)); + san_unguard_pages_two_sided(tsdn, ehooks, edata, + pac->emap); + } /* * Deregister first to avoid a race with other allocating * threads, and reregister if deallocation fails. */ - extent_deregister(tsdn, extent); - if (!extent_dalloc_wrapper_try(tsdn, arena, r_extent_hooks, - extent)) { + extent_deregister(tsdn, pac, edata); + if (!extent_dalloc_wrapper_try(tsdn, pac, ehooks, edata)) { return; } - extent_reregister(tsdn, extent); + extent_reregister(tsdn, pac, edata); } - if (*r_extent_hooks != &extent_hooks_default) { - extent_hook_pre_reentrancy(tsdn, arena); - } /* Try to decommit; purge if that fails. */ bool zeroed; - if (!extent_committed_get(extent)) { + if (!edata_committed_get(edata)) { zeroed = true; - } else if (!extent_decommit_wrapper(tsdn, arena, r_extent_hooks, extent, - 0, extent_size_get(extent))) { + } else if (!extent_decommit_wrapper(tsdn, ehooks, edata, 0, + edata_size_get(edata))) { zeroed = true; - } else if ((*r_extent_hooks)->purge_forced != NULL && - !(*r_extent_hooks)->purge_forced(*r_extent_hooks, - extent_base_get(extent), extent_size_get(extent), 0, - extent_size_get(extent), arena_ind_get(arena))) { + } else if (!ehooks_purge_forced(tsdn, ehooks, edata_base_get(edata), + edata_size_get(edata), 0, edata_size_get(edata))) { zeroed = true; - } else if (extent_state_get(extent) == extent_state_muzzy || - ((*r_extent_hooks)->purge_lazy != NULL && - !(*r_extent_hooks)->purge_lazy(*r_extent_hooks, - extent_base_get(extent), extent_size_get(extent), 0, - extent_size_get(extent), arena_ind_get(arena)))) { + } else if (edata_state_get(edata) == extent_state_muzzy || + !ehooks_purge_lazy(tsdn, ehooks, edata_base_get(edata), + edata_size_get(edata), 0, edata_size_get(edata))) { zeroed = false; } else { zeroed = false; } - if (*r_extent_hooks != &extent_hooks_default) { - extent_hook_post_reentrancy(tsdn); - } - extent_zeroed_set(extent, zeroed); + edata_zeroed_set(edata, zeroed); if (config_prof) { - extent_gdump_sub(tsdn, extent); - } - - extent_record(tsdn, arena, r_extent_hooks, &arena->extents_retained, - extent, false); -} - -static void -extent_destroy_default_impl(void *addr, size_t size) { - if (!have_dss || !extent_in_dss(addr)) { - pages_unmap(addr, size); + extent_gdump_sub(tsdn, edata); } -} -static void -extent_destroy_default(extent_hooks_t *extent_hooks, void *addr, size_t size, - bool committed, unsigned arena_ind) { - extent_destroy_default_impl(addr, size); + extent_record(tsdn, pac, ehooks, &pac->ecache_retained, edata); } void -extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, extent_t *extent) { - assert(extent_base_get(extent) != NULL); - assert(extent_size_get(extent) != 0); +extent_destroy_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, + edata_t *edata) { + assert(edata_base_get(edata) != NULL); + assert(edata_size_get(edata) != 0); + extent_state_t state = edata_state_get(edata); + assert(state == extent_state_retained || state == extent_state_active); + assert(emap_edata_is_acquired(tsdn, pac->emap, edata)); witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); - /* Deregister first to avoid a race with other allocating threads. */ - extent_deregister(tsdn, extent); - - extent_addr_set(extent, extent_base_get(extent)); - - extent_hooks_assure_initialized(arena, r_extent_hooks); - /* Try to destroy; silently fail otherwise. */ - if (*r_extent_hooks == &extent_hooks_default) { - /* Call directly to propagate tsdn. */ - extent_destroy_default_impl(extent_base_get(extent), - extent_size_get(extent)); - } else if ((*r_extent_hooks)->destroy != NULL) { - extent_hook_pre_reentrancy(tsdn, arena); - (*r_extent_hooks)->destroy(*r_extent_hooks, - extent_base_get(extent), extent_size_get(extent), - extent_committed_get(extent), arena_ind_get(arena)); - extent_hook_post_reentrancy(tsdn); + if (edata_guarded_get(edata)) { + assert(opt_retain); + san_unguard_pages_pre_destroy(tsdn, ehooks, edata, pac->emap); } + edata_addr_set(edata, edata_base_get(edata)); - extent_dalloc(tsdn, arena, extent); -} + /* Try to destroy; silently fail otherwise. */ + ehooks_destroy(tsdn, ehooks, edata_base_get(edata), + edata_size_get(edata), edata_committed_get(edata)); -static bool -extent_commit_default(extent_hooks_t *extent_hooks, void *addr, size_t size, - size_t offset, size_t length, unsigned arena_ind) { - return pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset), - length); + edata_cache_put(tsdn, pac->edata_cache, edata); } static bool -extent_commit_impl(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, - size_t length, bool growing_retained) { +extent_commit_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, + size_t offset, size_t length, bool growing_retained) { witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, growing_retained ? 1 : 0); - - extent_hooks_assure_initialized(arena, r_extent_hooks); - if (*r_extent_hooks != &extent_hooks_default) { - extent_hook_pre_reentrancy(tsdn, arena); - } - bool err = ((*r_extent_hooks)->commit == NULL || - (*r_extent_hooks)->commit(*r_extent_hooks, extent_base_get(extent), - extent_size_get(extent), offset, length, arena_ind_get(arena))); - if (*r_extent_hooks != &extent_hooks_default) { - extent_hook_post_reentrancy(tsdn); - } - extent_committed_set(extent, extent_committed_get(extent) || !err); + bool err = ehooks_commit(tsdn, ehooks, edata_base_get(edata), + edata_size_get(edata), offset, length); + edata_committed_set(edata, edata_committed_get(edata) || !err); return err; } bool -extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, - size_t length) { - return extent_commit_impl(tsdn, arena, r_extent_hooks, extent, offset, - length, false); -} - -static bool -extent_decommit_default(extent_hooks_t *extent_hooks, void *addr, size_t size, - size_t offset, size_t length, unsigned arena_ind) { - return pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset), - length); +extent_commit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, + size_t offset, size_t length) { + return extent_commit_impl(tsdn, ehooks, edata, offset, length, + /* growing_retained */ false); } bool -extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, - size_t length) { +extent_decommit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, + size_t offset, size_t length) { witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); - - extent_hooks_assure_initialized(arena, r_extent_hooks); - - if (*r_extent_hooks != &extent_hooks_default) { - extent_hook_pre_reentrancy(tsdn, arena); - } - bool err = ((*r_extent_hooks)->decommit == NULL || - (*r_extent_hooks)->decommit(*r_extent_hooks, - extent_base_get(extent), extent_size_get(extent), offset, length, - arena_ind_get(arena))); - if (*r_extent_hooks != &extent_hooks_default) { - extent_hook_post_reentrancy(tsdn); - } - extent_committed_set(extent, extent_committed_get(extent) && err); + bool err = ehooks_decommit(tsdn, ehooks, edata_base_get(edata), + edata_size_get(edata), offset, length); + edata_committed_set(edata, edata_committed_get(edata) && err); return err; } -#ifdef PAGES_CAN_PURGE_LAZY static bool -extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, size_t size, - size_t offset, size_t length, unsigned arena_ind) { - assert(addr != NULL); - assert((offset & PAGE_MASK) == 0); - assert(length != 0); - assert((length & PAGE_MASK) == 0); - - return pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset), - length); -} -#endif - -static bool -extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, - size_t length, bool growing_retained) { +extent_purge_lazy_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, + size_t offset, size_t length, bool growing_retained) { witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, growing_retained ? 1 : 0); - - extent_hooks_assure_initialized(arena, r_extent_hooks); - - if ((*r_extent_hooks)->purge_lazy == NULL) { - return true; - } - if (*r_extent_hooks != &extent_hooks_default) { - extent_hook_pre_reentrancy(tsdn, arena); - } - bool err = (*r_extent_hooks)->purge_lazy(*r_extent_hooks, - extent_base_get(extent), extent_size_get(extent), offset, length, - arena_ind_get(arena)); - if (*r_extent_hooks != &extent_hooks_default) { - extent_hook_post_reentrancy(tsdn); - } - + bool err = ehooks_purge_lazy(tsdn, ehooks, edata_base_get(edata), + edata_size_get(edata), offset, length); return err; } bool -extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, - size_t length) { - return extent_purge_lazy_impl(tsdn, arena, r_extent_hooks, extent, - offset, length, false); -} - -#ifdef PAGES_CAN_PURGE_FORCED -static bool -extent_purge_forced_default(extent_hooks_t *extent_hooks, void *addr, - size_t size, size_t offset, size_t length, unsigned arena_ind) { - assert(addr != NULL); - assert((offset & PAGE_MASK) == 0); - assert(length != 0); - assert((length & PAGE_MASK) == 0); - - return pages_purge_forced((void *)((uintptr_t)addr + - (uintptr_t)offset), length); +extent_purge_lazy_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, + size_t offset, size_t length) { + return extent_purge_lazy_impl(tsdn, ehooks, edata, offset, + length, false); } -#endif static bool -extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, - size_t length, bool growing_retained) { +extent_purge_forced_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, + size_t offset, size_t length, bool growing_retained) { witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, growing_retained ? 1 : 0); - - extent_hooks_assure_initialized(arena, r_extent_hooks); - - if ((*r_extent_hooks)->purge_forced == NULL) { - return true; - } - if (*r_extent_hooks != &extent_hooks_default) { - extent_hook_pre_reentrancy(tsdn, arena); - } - bool err = (*r_extent_hooks)->purge_forced(*r_extent_hooks, - extent_base_get(extent), extent_size_get(extent), offset, length, - arena_ind_get(arena)); - if (*r_extent_hooks != &extent_hooks_default) { - extent_hook_post_reentrancy(tsdn); - } + bool err = ehooks_purge_forced(tsdn, ehooks, edata_base_get(edata), + edata_size_get(edata), offset, length); return err; } bool -extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, - size_t length) { - return extent_purge_forced_impl(tsdn, arena, r_extent_hooks, extent, - offset, length, false); -} - -static bool -extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size, - size_t size_a, size_t size_b, bool committed, unsigned arena_ind) { - if (!maps_coalesce) { - /* - * Without retain, only whole regions can be purged (required by - * MEM_RELEASE on Windows) -- therefore disallow splitting. See - * comments in extent_head_no_merge(). - */ - return !opt_retain; - } - - return false; +extent_purge_forced_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, + size_t offset, size_t length) { + return extent_purge_forced_impl(tsdn, ehooks, edata, offset, length, + false); } /* @@ -2088,183 +1170,95 @@ extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size, * with the trail (the higher addressed portion). This makes 'extent' the lead, * and returns the trail (except in case of error). */ -static extent_t * -extent_split_impl(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a, - szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b, - bool growing_retained) { - assert(extent_size_get(extent) == size_a + size_b); - witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), - WITNESS_RANK_CORE, growing_retained ? 1 : 0); - - extent_hooks_assure_initialized(arena, r_extent_hooks); +static edata_t * +extent_split_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, + edata_t *edata, size_t size_a, size_t size_b, bool holding_core_locks) { + assert(edata_size_get(edata) == size_a + size_b); + /* Only the shrink path may split w/o holding core locks. */ + if (holding_core_locks) { + witness_assert_positive_depth_to_rank( + tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE); + } else { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + } - if ((*r_extent_hooks)->split == NULL) { + if (ehooks_split_will_fail(ehooks)) { return NULL; } - extent_t *trail = extent_alloc(tsdn, arena); + edata_t *trail = edata_cache_get(tsdn, pac->edata_cache); if (trail == NULL) { goto label_error_a; } - extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) + - size_a), size_b, slab_b, szind_b, extent_sn_get(extent), - extent_state_get(extent), extent_zeroed_get(extent), - extent_committed_get(extent), extent_dumpable_get(extent), - EXTENT_NOT_HEAD); - - rtree_ctx_t rtree_ctx_fallback; - rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); - rtree_leaf_elm_t *lead_elm_a, *lead_elm_b; - { - extent_t lead; - - extent_init(&lead, arena, extent_addr_get(extent), size_a, - slab_a, szind_a, extent_sn_get(extent), - extent_state_get(extent), extent_zeroed_get(extent), - extent_committed_get(extent), extent_dumpable_get(extent), - EXTENT_NOT_HEAD); - - extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, &lead, false, - true, &lead_elm_a, &lead_elm_b); - } - rtree_leaf_elm_t *trail_elm_a, *trail_elm_b; - extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, trail, false, true, - &trail_elm_a, &trail_elm_b); - - if (lead_elm_a == NULL || lead_elm_b == NULL || trail_elm_a == NULL - || trail_elm_b == NULL) { + edata_init(trail, edata_arena_ind_get(edata), + (void *)((uintptr_t)edata_base_get(edata) + size_a), size_b, + /* slab */ false, SC_NSIZES, edata_sn_get(edata), + edata_state_get(edata), edata_zeroed_get(edata), + edata_committed_get(edata), EXTENT_PAI_PAC, EXTENT_NOT_HEAD); + emap_prepare_t prepare; + bool err = emap_split_prepare(tsdn, pac->emap, &prepare, edata, + size_a, trail, size_b); + if (err) { goto label_error_b; } - extent_lock2(tsdn, extent, trail); + /* + * No need to acquire trail or edata, because: 1) trail was new (just + * allocated); and 2) edata is either an active allocation (the shrink + * path), or in an acquired state (extracted from the ecache on the + * extent_recycle_split path). + */ + assert(emap_edata_is_acquired(tsdn, pac->emap, edata)); + assert(emap_edata_is_acquired(tsdn, pac->emap, trail)); + + err = ehooks_split(tsdn, ehooks, edata_base_get(edata), size_a + size_b, + size_a, size_b, edata_committed_get(edata)); - if (*r_extent_hooks != &extent_hooks_default) { - extent_hook_pre_reentrancy(tsdn, arena); - } - bool err = (*r_extent_hooks)->split(*r_extent_hooks, extent_base_get(extent), - size_a + size_b, size_a, size_b, extent_committed_get(extent), - arena_ind_get(arena)); - if (*r_extent_hooks != &extent_hooks_default) { - extent_hook_post_reentrancy(tsdn); - } if (err) { - goto label_error_c; + goto label_error_b; } - extent_size_set(extent, size_a); - extent_szind_set(extent, szind_a); - - extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, extent, - szind_a, slab_a); - extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail, - szind_b, slab_b); - - extent_unlock2(tsdn, extent, trail); + edata_size_set(edata, size_a); + emap_split_commit(tsdn, pac->emap, &prepare, edata, size_a, trail, + size_b); return trail; -label_error_c: - extent_unlock2(tsdn, extent, trail); label_error_b: - extent_dalloc(tsdn, arena, trail); + edata_cache_put(tsdn, pac->edata_cache, trail); label_error_a: return NULL; } -extent_t * -extent_split_wrapper(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a, - szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b) { - return extent_split_impl(tsdn, arena, r_extent_hooks, extent, size_a, - szind_a, slab_a, size_b, szind_b, slab_b, false); -} - -static bool -extent_merge_default_impl(void *addr_a, void *addr_b) { - if (!maps_coalesce && !opt_retain) { - return true; - } - if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) { - return true; - } - - return false; -} - -/* - * Returns true if the given extents can't be merged because of their head bit - * settings. Assumes the second extent has the higher address. - */ -static bool -extent_head_no_merge(extent_t *a, extent_t *b) { - assert(extent_base_get(a) < extent_base_get(b)); - /* - * When coalesce is not always allowed (Windows), only merge extents - * from the same VirtualAlloc region under opt.retain (in which case - * MEM_DECOMMIT is utilized for purging). - */ - if (maps_coalesce) { - return false; - } - if (!opt_retain) { - return true; - } - /* If b is a head extent, disallow the cross-region merge. */ - if (extent_is_head_get(b)) { - /* - * Additionally, sn should not overflow with retain; sanity - * check that different regions have unique sn. - */ - assert(extent_sn_comp(a, b) != 0); - return true; - } - assert(extent_sn_comp(a, b) == 0); - - return false; +edata_t * +extent_split_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *edata, + size_t size_a, size_t size_b, bool holding_core_locks) { + return extent_split_impl(tsdn, pac, ehooks, edata, size_a, size_b, + holding_core_locks); } static bool -extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a, - void *addr_b, size_t size_b, bool committed, unsigned arena_ind) { - if (!maps_coalesce) { - tsdn_t *tsdn = tsdn_fetch(); - extent_t *a = iealloc(tsdn, addr_a); - extent_t *b = iealloc(tsdn, addr_b); - if (extent_head_no_merge(a, b)) { - return true; - } +extent_merge_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *a, + edata_t *b, bool holding_core_locks) { + /* Only the expanding path may merge w/o holding ecache locks. */ + if (holding_core_locks) { + witness_assert_positive_depth_to_rank( + tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE); + } else { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); } - return extent_merge_default_impl(addr_a, addr_b); -} - -static bool -extent_merge_impl(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b, - bool growing_retained) { - witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), - WITNESS_RANK_CORE, growing_retained ? 1 : 0); - assert(extent_base_get(a) < extent_base_get(b)); - - extent_hooks_assure_initialized(arena, r_extent_hooks); - if ((*r_extent_hooks)->merge == NULL || extent_head_no_merge(a, b)) { - return true; - } + assert(edata_base_get(a) < edata_base_get(b)); + assert(edata_arena_ind_get(a) == edata_arena_ind_get(b)); + assert(edata_arena_ind_get(a) == ehooks_ind_get(ehooks)); + emap_assert_mapped(tsdn, pac->emap, a); + emap_assert_mapped(tsdn, pac->emap, b); - bool err; - if (*r_extent_hooks == &extent_hooks_default) { - /* Call directly to propagate tsdn. */ - err = extent_merge_default_impl(extent_base_get(a), - extent_base_get(b)); - } else { - extent_hook_pre_reentrancy(tsdn, arena); - err = (*r_extent_hooks)->merge(*r_extent_hooks, - extent_base_get(a), extent_size_get(a), extent_base_get(b), - extent_size_get(b), extent_committed_get(a), - arena_ind_get(arena)); - extent_hook_post_reentrancy(tsdn); - } + bool err = ehooks_merge(tsdn, ehooks, edata_base_get(a), + edata_size_get(a), edata_base_get(b), edata_size_get(b), + edata_committed_get(a)); if (err) { return true; @@ -2275,132 +1269,58 @@ extent_merge_impl(tsdn_t *tsdn, arena_t *arena, * owned, so the following code uses decomposed helper functions rather * than extent_{,de}register() to do things in the right order. */ - rtree_ctx_t rtree_ctx_fallback; - rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); - rtree_leaf_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b; - extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, a, true, false, &a_elm_a, - &a_elm_b); - extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, b, true, false, &b_elm_a, - &b_elm_b); - - extent_lock2(tsdn, a, b); - - if (a_elm_b != NULL) { - rtree_leaf_elm_write(tsdn, &extents_rtree, a_elm_b, NULL, - SC_NSIZES, false); - } - if (b_elm_b != NULL) { - rtree_leaf_elm_write(tsdn, &extents_rtree, b_elm_a, NULL, - SC_NSIZES, false); - } else { - b_elm_b = b_elm_a; - } + emap_prepare_t prepare; + emap_merge_prepare(tsdn, pac->emap, &prepare, a, b); - extent_size_set(a, extent_size_get(a) + extent_size_get(b)); - extent_szind_set(a, SC_NSIZES); - extent_sn_set(a, (extent_sn_get(a) < extent_sn_get(b)) ? - extent_sn_get(a) : extent_sn_get(b)); - extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b)); + assert(edata_state_get(a) == extent_state_active || + edata_state_get(a) == extent_state_merging); + edata_state_set(a, extent_state_active); + edata_size_set(a, edata_size_get(a) + edata_size_get(b)); + edata_sn_set(a, (edata_sn_get(a) < edata_sn_get(b)) ? + edata_sn_get(a) : edata_sn_get(b)); + edata_zeroed_set(a, edata_zeroed_get(a) && edata_zeroed_get(b)); - extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a, SC_NSIZES, - false); + emap_merge_commit(tsdn, pac->emap, &prepare, a, b); - extent_unlock2(tsdn, a, b); - - extent_dalloc(tsdn, extent_arena_get(b), b); + edata_cache_put(tsdn, pac->edata_cache, b); return false; } bool -extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b) { - return extent_merge_impl(tsdn, arena, r_extent_hooks, a, b, false); +extent_merge_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, + edata_t *a, edata_t *b) { + return extent_merge_impl(tsdn, pac, ehooks, a, b, + /* holding_core_locks */ false); } bool -extent_boot(void) { - if (rtree_new(&extents_rtree, true)) { - return true; - } +extent_commit_zero(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, + bool commit, bool zero, bool growing_retained) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, growing_retained ? 1 : 0); - if (mutex_pool_init(&extent_mutex_pool, "extent_mutex_pool", - WITNESS_RANK_EXTENT_POOL)) { - return true; + if (commit && !edata_committed_get(edata)) { + if (extent_commit_impl(tsdn, ehooks, edata, 0, + edata_size_get(edata), growing_retained)) { + return true; + } } - - if (have_dss) { - extent_dss_boot(); + if (zero && !edata_zeroed_get(edata)) { + void *addr = edata_base_get(edata); + size_t size = edata_size_get(edata); + ehooks_zero(tsdn, ehooks, addr, size); } - return false; } -void -extent_util_stats_get(tsdn_t *tsdn, const void *ptr, - size_t *nfree, size_t *nregs, size_t *size) { - assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL); - - const extent_t *extent = iealloc(tsdn, ptr); - if (unlikely(extent == NULL)) { - *nfree = *nregs = *size = 0; - return; - } - - *size = extent_size_get(extent); - if (!extent_slab_get(extent)) { - *nfree = 0; - *nregs = 1; - } else { - *nfree = extent_nfree_get(extent); - *nregs = bin_infos[extent_szind_get(extent)].nregs; - assert(*nfree <= *nregs); - assert(*nfree * extent_usize_get(extent) <= *size); - } -} - -void -extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr, - size_t *nfree, size_t *nregs, size_t *size, - size_t *bin_nfree, size_t *bin_nregs, void **slabcur_addr) { - assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL - && bin_nfree != NULL && bin_nregs != NULL && slabcur_addr != NULL); - - const extent_t *extent = iealloc(tsdn, ptr); - if (unlikely(extent == NULL)) { - *nfree = *nregs = *size = *bin_nfree = *bin_nregs = 0; - *slabcur_addr = NULL; - return; - } +bool +extent_boot(void) { + assert(sizeof(slab_data_t) >= sizeof(e_prof_info_t)); - *size = extent_size_get(extent); - if (!extent_slab_get(extent)) { - *nfree = *bin_nfree = *bin_nregs = 0; - *nregs = 1; - *slabcur_addr = NULL; - return; + if (have_dss) { + extent_dss_boot(); } - *nfree = extent_nfree_get(extent); - const szind_t szind = extent_szind_get(extent); - *nregs = bin_infos[szind].nregs; - assert(*nfree <= *nregs); - assert(*nfree * extent_usize_get(extent) <= *size); - - const arena_t *arena = extent_arena_get(extent); - assert(arena != NULL); - const unsigned binshard = extent_binshard_get(extent); - bin_t *bin = &arena->bins[szind].bin_shards[binshard]; - - malloc_mutex_lock(tsdn, &bin->lock); - if (config_stats) { - *bin_nregs = *nregs * bin->stats.curslabs; - assert(*bin_nregs >= bin->stats.curregs); - *bin_nfree = *bin_nregs - bin->stats.curregs; - } else { - *bin_nfree = *bin_nregs = 0; - } - *slabcur_addr = extent_addr_get(bin->slabcur); - assert(*slabcur_addr != NULL); - malloc_mutex_unlock(tsdn, &bin->lock); + return false; } diff --git a/contrib/jemalloc/src/extent_dss.c b/contrib/jemalloc/src/extent_dss.c index 858178911051b0..9a35bacfb4f8ac 100644 --- a/contrib/jemalloc/src/extent_dss.c +++ b/contrib/jemalloc/src/extent_dss.c @@ -1,4 +1,3 @@ -#define JEMALLOC_EXTENT_DSS_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" @@ -109,7 +108,7 @@ extent_dss_max_update(void *new_addr) { void * extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit) { - extent_t *gap; + edata_t *gap; cassert(have_dss); assert(size > 0); @@ -123,7 +122,7 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, return NULL; } - gap = extent_alloc(tsdn, arena); + gap = edata_cache_get(tsdn, &arena->pa_shard.edata_cache); if (gap == NULL) { return NULL; } @@ -141,6 +140,8 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, goto label_oom; } + bool head_state = opt_retain ? EXTENT_IS_HEAD : + EXTENT_NOT_HEAD; /* * Compute how much page-aligned gap space (if any) is * necessary to satisfy alignment. This space can be @@ -153,11 +154,12 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, size_t gap_size_page = (uintptr_t)ret - (uintptr_t)gap_addr_page; if (gap_size_page != 0) { - extent_init(gap, arena, gap_addr_page, - gap_size_page, false, SC_NSIZES, - arena_extent_sn_next(arena), - extent_state_active, false, true, true, - EXTENT_NOT_HEAD); + edata_init(gap, arena_ind_get(arena), + gap_addr_page, gap_size_page, false, + SC_NSIZES, extent_sn_next( + &arena->pa_shard.pac), + extent_state_active, false, true, + EXTENT_PAI_PAC, head_state); } /* * Compute the address just past the end of the desired @@ -186,25 +188,29 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, extent_dss_extending_finish(); if (gap_size_page != 0) { - extent_dalloc_gap(tsdn, arena, gap); + ehooks_t *ehooks = arena_get_ehooks( + arena); + extent_dalloc_gap(tsdn, + &arena->pa_shard.pac, ehooks, gap); } else { - extent_dalloc(tsdn, arena, gap); + edata_cache_put(tsdn, + &arena->pa_shard.edata_cache, gap); } if (!*commit) { *commit = pages_decommit(ret, size); } if (*zero && *commit) { - extent_hooks_t *extent_hooks = - EXTENT_HOOKS_INITIALIZER; - extent_t extent; + edata_t edata = {0}; + ehooks_t *ehooks = arena_get_ehooks( + arena); - extent_init(&extent, arena, ret, size, + edata_init(&edata, + arena_ind_get(arena), ret, size, size, false, SC_NSIZES, extent_state_active, false, true, - true, EXTENT_NOT_HEAD); + EXTENT_PAI_PAC, head_state); if (extent_purge_forced_wrapper(tsdn, - arena, &extent_hooks, &extent, 0, - size)) { + ehooks, &edata, 0, size)) { memset(ret, 0, size); } } @@ -224,7 +230,7 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, } label_oom: extent_dss_extending_finish(); - extent_dalloc(tsdn, arena, gap); + edata_cache_put(tsdn, &arena->pa_shard.edata_cache, gap); return NULL; } diff --git a/contrib/jemalloc/src/extent_mmap.c b/contrib/jemalloc/src/extent_mmap.c index 17fd1c8f9577a7..5f0ee2d24b1baa 100644 --- a/contrib/jemalloc/src/extent_mmap.c +++ b/contrib/jemalloc/src/extent_mmap.c @@ -1,4 +1,3 @@ -#define JEMALLOC_EXTENT_MMAP_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" diff --git a/contrib/jemalloc/src/fxp.c b/contrib/jemalloc/src/fxp.c new file mode 100644 index 00000000000000..96585f0a65acf6 --- /dev/null +++ b/contrib/jemalloc/src/fxp.c @@ -0,0 +1,124 @@ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/fxp.h" + +static bool +fxp_isdigit(char c) { + return '0' <= c && c <= '9'; +} + +bool +fxp_parse(fxp_t *result, const char *str, char **end) { + /* + * Using malloc_strtoumax in this method isn't as handy as you might + * expect (I tried). In the fractional part, significant leading zeros + * mean that you still need to do your own parsing, now with trickier + * math. In the integer part, the casting (uintmax_t to uint32_t) + * forces more reasoning about bounds than just checking for overflow as + * we parse. + */ + uint32_t integer_part = 0; + + const char *cur = str; + + /* The string must start with a digit or a decimal point. */ + if (*cur != '.' && !fxp_isdigit(*cur)) { + return true; + } + + while ('0' <= *cur && *cur <= '9') { + integer_part *= 10; + integer_part += *cur - '0'; + if (integer_part >= (1U << 16)) { + return true; + } + cur++; + } + + /* + * We've parsed all digits at the beginning of the string, without + * overflow. Either we're done, or there's a fractional part. + */ + if (*cur != '.') { + *result = (integer_part << 16); + if (end != NULL) { + *end = (char *)cur; + } + return false; + } + + /* There's a fractional part. */ + cur++; + if (!fxp_isdigit(*cur)) { + /* Shouldn't end on the decimal point. */ + return true; + } + + /* + * We use a lot of precision for the fractional part, even though we'll + * discard most of it; this lets us get exact values for the important + * special case where the denominator is a small power of 2 (for + * instance, 1/512 == 0.001953125 is exactly representable even with + * only 16 bits of fractional precision). We need to left-shift by 16 + * before dividing so we pick the number of digits to be + * floor(log(2**48)) = 14. + */ + uint64_t fractional_part = 0; + uint64_t frac_div = 1; + for (int i = 0; i < FXP_FRACTIONAL_PART_DIGITS; i++) { + fractional_part *= 10; + frac_div *= 10; + if (fxp_isdigit(*cur)) { + fractional_part += *cur - '0'; + cur++; + } + } + /* + * We only parse the first maxdigits characters, but we can still ignore + * any digits after that. + */ + while (fxp_isdigit(*cur)) { + cur++; + } + + assert(fractional_part < frac_div); + uint32_t fractional_repr = (uint32_t)( + (fractional_part << 16) / frac_div); + + /* Success! */ + *result = (integer_part << 16) + fractional_repr; + if (end != NULL) { + *end = (char *)cur; + } + return false; +} + +void +fxp_print(fxp_t a, char buf[FXP_BUF_SIZE]) { + uint32_t integer_part = fxp_round_down(a); + uint32_t fractional_part = (a & ((1U << 16) - 1)); + + int leading_fraction_zeros = 0; + uint64_t fraction_digits = fractional_part; + for (int i = 0; i < FXP_FRACTIONAL_PART_DIGITS; i++) { + if (fraction_digits < (1U << 16) + && fraction_digits * 10 >= (1U << 16)) { + leading_fraction_zeros = i; + } + fraction_digits *= 10; + } + fraction_digits >>= 16; + while (fraction_digits > 0 && fraction_digits % 10 == 0) { + fraction_digits /= 10; + } + + size_t printed = malloc_snprintf(buf, FXP_BUF_SIZE, "%"FMTu32".", + integer_part); + for (int i = 0; i < leading_fraction_zeros; i++) { + buf[printed] = '0'; + printed++; + } + malloc_snprintf(&buf[printed], FXP_BUF_SIZE - printed, "%"FMTu64, + fraction_digits); +} diff --git a/contrib/jemalloc/src/hash.c b/contrib/jemalloc/src/hash.c deleted file mode 100644 index 7b2bdc2bd6f428..00000000000000 --- a/contrib/jemalloc/src/hash.c +++ /dev/null @@ -1,3 +0,0 @@ -#define JEMALLOC_HASH_C_ -#include "jemalloc/internal/jemalloc_preamble.h" -#include "jemalloc/internal/jemalloc_internal_includes.h" diff --git a/contrib/jemalloc/src/hook.c b/contrib/jemalloc/src/hook.c index 9ac703cf9f516b..493edbbe512d4a 100644 --- a/contrib/jemalloc/src/hook.c +++ b/contrib/jemalloc/src/hook.c @@ -130,9 +130,9 @@ hook_reentrantp() { */ static bool in_hook_global = true; tsdn_t *tsdn = tsdn_fetch(); - tcache_t *tcache = tsdn_tcachep_get(tsdn); - if (tcache != NULL) { - return &tcache->in_hook; + bool *in_hook = tsdn_in_hookp_get(tsdn); + if (in_hook!= NULL) { + return in_hook; } return &in_hook_global; } diff --git a/contrib/jemalloc/src/hpa.c b/contrib/jemalloc/src/hpa.c new file mode 100644 index 00000000000000..7e2aeba0c0ffda --- /dev/null +++ b/contrib/jemalloc/src/hpa.c @@ -0,0 +1,1044 @@ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/hpa.h" + +#include "jemalloc/internal/fb.h" +#include "jemalloc/internal/witness.h" + +#define HPA_EDEN_SIZE (128 * HUGEPAGE) + +static edata_t *hpa_alloc(tsdn_t *tsdn, pai_t *self, size_t size, + size_t alignment, bool zero, bool guarded, bool frequent_reuse, + bool *deferred_work_generated); +static size_t hpa_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size, + size_t nallocs, edata_list_active_t *results, bool *deferred_work_generated); +static bool hpa_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata, + size_t old_size, size_t new_size, bool zero, bool *deferred_work_generated); +static bool hpa_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata, + size_t old_size, size_t new_size, bool *deferred_work_generated); +static void hpa_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata, + bool *deferred_work_generated); +static void hpa_dalloc_batch(tsdn_t *tsdn, pai_t *self, + edata_list_active_t *list, bool *deferred_work_generated); +static uint64_t hpa_time_until_deferred_work(tsdn_t *tsdn, pai_t *self); + +bool +hpa_supported() { +#ifdef _WIN32 + /* + * At least until the API and implementation is somewhat settled, we + * don't want to try to debug the VM subsystem on the hardest-to-test + * platform. + */ + return false; +#endif + if (!pages_can_hugify) { + return false; + } + /* + * We fundamentally rely on a address-space-hungry growth strategy for + * hugepages. + */ + if (LG_SIZEOF_PTR != 3) { + return false; + } + /* + * If we couldn't detect the value of HUGEPAGE, HUGEPAGE_PAGES becomes + * this sentinel value -- see the comment in pages.h. + */ + if (HUGEPAGE_PAGES == 1) { + return false; + } + return true; +} + +static void +hpa_do_consistency_checks(hpa_shard_t *shard) { + assert(shard->base != NULL); +} + +bool +hpa_central_init(hpa_central_t *central, base_t *base, const hpa_hooks_t *hooks) { + /* malloc_conf processing should have filtered out these cases. */ + assert(hpa_supported()); + bool err; + err = malloc_mutex_init(¢ral->grow_mtx, "hpa_central_grow", + WITNESS_RANK_HPA_CENTRAL_GROW, malloc_mutex_rank_exclusive); + if (err) { + return true; + } + err = malloc_mutex_init(¢ral->mtx, "hpa_central", + WITNESS_RANK_HPA_CENTRAL, malloc_mutex_rank_exclusive); + if (err) { + return true; + } + central->base = base; + central->eden = NULL; + central->eden_len = 0; + central->age_counter = 0; + central->hooks = *hooks; + return false; +} + +static hpdata_t * +hpa_alloc_ps(tsdn_t *tsdn, hpa_central_t *central) { + return (hpdata_t *)base_alloc(tsdn, central->base, sizeof(hpdata_t), + CACHELINE); +} + +hpdata_t * +hpa_central_extract(tsdn_t *tsdn, hpa_central_t *central, size_t size, + bool *oom) { + /* Don't yet support big allocations; these should get filtered out. */ + assert(size <= HUGEPAGE); + /* + * Should only try to extract from the central allocator if the local + * shard is exhausted. We should hold the grow_mtx on that shard. + */ + witness_assert_positive_depth_to_rank( + tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_HPA_SHARD_GROW); + + malloc_mutex_lock(tsdn, ¢ral->grow_mtx); + *oom = false; + + hpdata_t *ps = NULL; + + /* Is eden a perfect fit? */ + if (central->eden != NULL && central->eden_len == HUGEPAGE) { + ps = hpa_alloc_ps(tsdn, central); + if (ps == NULL) { + *oom = true; + malloc_mutex_unlock(tsdn, ¢ral->grow_mtx); + return NULL; + } + hpdata_init(ps, central->eden, central->age_counter++); + central->eden = NULL; + central->eden_len = 0; + malloc_mutex_unlock(tsdn, ¢ral->grow_mtx); + return ps; + } + + /* + * We're about to try to allocate from eden by splitting. If eden is + * NULL, we have to allocate it too. Otherwise, we just have to + * allocate an edata_t for the new psset. + */ + if (central->eden == NULL) { + /* + * During development, we're primarily concerned with systems + * with overcommit. Eventually, we should be more careful here. + */ + bool commit = true; + /* Allocate address space, bailing if we fail. */ + void *new_eden = pages_map(NULL, HPA_EDEN_SIZE, HUGEPAGE, + &commit); + if (new_eden == NULL) { + *oom = true; + malloc_mutex_unlock(tsdn, ¢ral->grow_mtx); + return NULL; + } + ps = hpa_alloc_ps(tsdn, central); + if (ps == NULL) { + pages_unmap(new_eden, HPA_EDEN_SIZE); + *oom = true; + malloc_mutex_unlock(tsdn, ¢ral->grow_mtx); + return NULL; + } + central->eden = new_eden; + central->eden_len = HPA_EDEN_SIZE; + } else { + /* Eden is already nonempty; only need an edata for ps. */ + ps = hpa_alloc_ps(tsdn, central); + if (ps == NULL) { + *oom = true; + malloc_mutex_unlock(tsdn, ¢ral->grow_mtx); + return NULL; + } + } + assert(ps != NULL); + assert(central->eden != NULL); + assert(central->eden_len > HUGEPAGE); + assert(central->eden_len % HUGEPAGE == 0); + assert(HUGEPAGE_ADDR2BASE(central->eden) == central->eden); + + hpdata_init(ps, central->eden, central->age_counter++); + + char *eden_char = (char *)central->eden; + eden_char += HUGEPAGE; + central->eden = (void *)eden_char; + central->eden_len -= HUGEPAGE; + + malloc_mutex_unlock(tsdn, ¢ral->grow_mtx); + + return ps; +} + +bool +hpa_shard_init(hpa_shard_t *shard, hpa_central_t *central, emap_t *emap, + base_t *base, edata_cache_t *edata_cache, unsigned ind, + const hpa_shard_opts_t *opts) { + /* malloc_conf processing should have filtered out these cases. */ + assert(hpa_supported()); + bool err; + err = malloc_mutex_init(&shard->grow_mtx, "hpa_shard_grow", + WITNESS_RANK_HPA_SHARD_GROW, malloc_mutex_rank_exclusive); + if (err) { + return true; + } + err = malloc_mutex_init(&shard->mtx, "hpa_shard", + WITNESS_RANK_HPA_SHARD, malloc_mutex_rank_exclusive); + if (err) { + return true; + } + + assert(edata_cache != NULL); + shard->central = central; + shard->base = base; + edata_cache_fast_init(&shard->ecf, edata_cache); + psset_init(&shard->psset); + shard->age_counter = 0; + shard->ind = ind; + shard->emap = emap; + + shard->opts = *opts; + + shard->npending_purge = 0; + nstime_init_zero(&shard->last_purge); + + shard->stats.npurge_passes = 0; + shard->stats.npurges = 0; + shard->stats.nhugifies = 0; + shard->stats.ndehugifies = 0; + + /* + * Fill these in last, so that if an hpa_shard gets used despite + * initialization failing, we'll at least crash instead of just + * operating on corrupted data. + */ + shard->pai.alloc = &hpa_alloc; + shard->pai.alloc_batch = &hpa_alloc_batch; + shard->pai.expand = &hpa_expand; + shard->pai.shrink = &hpa_shrink; + shard->pai.dalloc = &hpa_dalloc; + shard->pai.dalloc_batch = &hpa_dalloc_batch; + shard->pai.time_until_deferred_work = &hpa_time_until_deferred_work; + + hpa_do_consistency_checks(shard); + + return false; +} + +/* + * Note that the stats functions here follow the usual stats naming conventions; + * "merge" obtains the stats from some live object of instance, while "accum" + * only combines the stats from one stats objet to another. Hence the lack of + * locking here. + */ +static void +hpa_shard_nonderived_stats_accum(hpa_shard_nonderived_stats_t *dst, + hpa_shard_nonderived_stats_t *src) { + dst->npurge_passes += src->npurge_passes; + dst->npurges += src->npurges; + dst->nhugifies += src->nhugifies; + dst->ndehugifies += src->ndehugifies; +} + +void +hpa_shard_stats_accum(hpa_shard_stats_t *dst, hpa_shard_stats_t *src) { + psset_stats_accum(&dst->psset_stats, &src->psset_stats); + hpa_shard_nonderived_stats_accum(&dst->nonderived_stats, + &src->nonderived_stats); +} + +void +hpa_shard_stats_merge(tsdn_t *tsdn, hpa_shard_t *shard, + hpa_shard_stats_t *dst) { + hpa_do_consistency_checks(shard); + + malloc_mutex_lock(tsdn, &shard->grow_mtx); + malloc_mutex_lock(tsdn, &shard->mtx); + psset_stats_accum(&dst->psset_stats, &shard->psset.stats); + hpa_shard_nonderived_stats_accum(&dst->nonderived_stats, &shard->stats); + malloc_mutex_unlock(tsdn, &shard->mtx); + malloc_mutex_unlock(tsdn, &shard->grow_mtx); +} + +static bool +hpa_good_hugification_candidate(hpa_shard_t *shard, hpdata_t *ps) { + /* + * Note that this needs to be >= rather than just >, because of the + * important special case in which the hugification threshold is exactly + * HUGEPAGE. + */ + return hpdata_nactive_get(ps) * PAGE + >= shard->opts.hugification_threshold; +} + +static size_t +hpa_adjusted_ndirty(tsdn_t *tsdn, hpa_shard_t *shard) { + malloc_mutex_assert_owner(tsdn, &shard->mtx); + return psset_ndirty(&shard->psset) - shard->npending_purge; +} + +static size_t +hpa_ndirty_max(tsdn_t *tsdn, hpa_shard_t *shard) { + malloc_mutex_assert_owner(tsdn, &shard->mtx); + if (shard->opts.dirty_mult == (fxp_t)-1) { + return (size_t)-1; + } + return fxp_mul_frac(psset_nactive(&shard->psset), + shard->opts.dirty_mult); +} + +static bool +hpa_hugify_blocked_by_ndirty(tsdn_t *tsdn, hpa_shard_t *shard) { + malloc_mutex_assert_owner(tsdn, &shard->mtx); + hpdata_t *to_hugify = psset_pick_hugify(&shard->psset); + if (to_hugify == NULL) { + return false; + } + return hpa_adjusted_ndirty(tsdn, shard) + + hpdata_nretained_get(to_hugify) > hpa_ndirty_max(tsdn, shard); +} + +static bool +hpa_should_purge(tsdn_t *tsdn, hpa_shard_t *shard) { + malloc_mutex_assert_owner(tsdn, &shard->mtx); + if (hpa_adjusted_ndirty(tsdn, shard) > hpa_ndirty_max(tsdn, shard)) { + return true; + } + if (hpa_hugify_blocked_by_ndirty(tsdn, shard)) { + return true; + } + return false; +} + +static void +hpa_update_purge_hugify_eligibility(tsdn_t *tsdn, hpa_shard_t *shard, + hpdata_t *ps) { + malloc_mutex_assert_owner(tsdn, &shard->mtx); + if (hpdata_changing_state_get(ps)) { + hpdata_purge_allowed_set(ps, false); + hpdata_disallow_hugify(ps); + return; + } + /* + * Hugepages are distinctly costly to purge, so try to avoid it unless + * they're *particularly* full of dirty pages. Eventually, we should + * use a smarter / more dynamic heuristic for situations where we have + * to manually hugify. + * + * In situations where we don't manually hugify, this problem is + * reduced. The "bad" situation we're trying to avoid is one's that's + * common in some Linux configurations (where both enabled and defrag + * are set to madvise) that can lead to long latency spikes on the first + * access after a hugification. The ideal policy in such configurations + * is probably time-based for both purging and hugifying; only hugify a + * hugepage if it's met the criteria for some extended period of time, + * and only dehugify it if it's failed to meet the criteria for an + * extended period of time. When background threads are on, we should + * try to take this hit on one of them, as well. + * + * I think the ideal setting is THP always enabled, and defrag set to + * deferred; in that case we don't need any explicit calls on the + * allocator's end at all; we just try to pack allocations in a + * hugepage-friendly manner and let the OS hugify in the background. + */ + hpdata_purge_allowed_set(ps, hpdata_ndirty_get(ps) > 0); + if (hpa_good_hugification_candidate(shard, ps) + && !hpdata_huge_get(ps)) { + nstime_t now; + shard->central->hooks.curtime(&now, /* first_reading */ true); + hpdata_allow_hugify(ps, now); + } + /* + * Once a hugepage has become eligible for hugification, we don't mark + * it as ineligible just because it stops meeting the criteria (this + * could lead to situations where a hugepage that spends most of its + * time meeting the criteria never quite getting hugified if there are + * intervening deallocations). The idea is that the hugification delay + * will allow them to get purged, reseting their "hugify-allowed" bit. + * If they don't get purged, then the hugification isn't hurting and + * might help. As an exception, we don't hugify hugepages that are now + * empty; it definitely doesn't help there until the hugepage gets + * reused, which is likely not for a while. + */ + if (hpdata_nactive_get(ps) == 0) { + hpdata_disallow_hugify(ps); + } +} + +static bool +hpa_shard_has_deferred_work(tsdn_t *tsdn, hpa_shard_t *shard) { + malloc_mutex_assert_owner(tsdn, &shard->mtx); + hpdata_t *to_hugify = psset_pick_hugify(&shard->psset); + return to_hugify != NULL || hpa_should_purge(tsdn, shard); +} + +/* Returns whether or not we purged anything. */ +static bool +hpa_try_purge(tsdn_t *tsdn, hpa_shard_t *shard) { + malloc_mutex_assert_owner(tsdn, &shard->mtx); + + hpdata_t *to_purge = psset_pick_purge(&shard->psset); + if (to_purge == NULL) { + return false; + } + assert(hpdata_purge_allowed_get(to_purge)); + assert(!hpdata_changing_state_get(to_purge)); + + /* + * Don't let anyone else purge or hugify this page while + * we're purging it (allocations and deallocations are + * OK). + */ + psset_update_begin(&shard->psset, to_purge); + assert(hpdata_alloc_allowed_get(to_purge)); + hpdata_mid_purge_set(to_purge, true); + hpdata_purge_allowed_set(to_purge, false); + hpdata_disallow_hugify(to_purge); + /* + * Unlike with hugification (where concurrent + * allocations are allowed), concurrent allocation out + * of a hugepage being purged is unsafe; we might hand + * out an extent for an allocation and then purge it + * (clearing out user data). + */ + hpdata_alloc_allowed_set(to_purge, false); + psset_update_end(&shard->psset, to_purge); + + /* Gather all the metadata we'll need during the purge. */ + bool dehugify = hpdata_huge_get(to_purge); + hpdata_purge_state_t purge_state; + size_t num_to_purge = hpdata_purge_begin(to_purge, &purge_state); + + shard->npending_purge += num_to_purge; + + malloc_mutex_unlock(tsdn, &shard->mtx); + + /* Actually do the purging, now that the lock is dropped. */ + if (dehugify) { + shard->central->hooks.dehugify(hpdata_addr_get(to_purge), + HUGEPAGE); + } + size_t total_purged = 0; + uint64_t purges_this_pass = 0; + void *purge_addr; + size_t purge_size; + while (hpdata_purge_next(to_purge, &purge_state, &purge_addr, + &purge_size)) { + total_purged += purge_size; + assert(total_purged <= HUGEPAGE); + purges_this_pass++; + shard->central->hooks.purge(purge_addr, purge_size); + } + + malloc_mutex_lock(tsdn, &shard->mtx); + /* The shard updates */ + shard->npending_purge -= num_to_purge; + shard->stats.npurge_passes++; + shard->stats.npurges += purges_this_pass; + shard->central->hooks.curtime(&shard->last_purge, + /* first_reading */ false); + if (dehugify) { + shard->stats.ndehugifies++; + } + + /* The hpdata updates. */ + psset_update_begin(&shard->psset, to_purge); + if (dehugify) { + hpdata_dehugify(to_purge); + } + hpdata_purge_end(to_purge, &purge_state); + hpdata_mid_purge_set(to_purge, false); + + hpdata_alloc_allowed_set(to_purge, true); + hpa_update_purge_hugify_eligibility(tsdn, shard, to_purge); + + psset_update_end(&shard->psset, to_purge); + + return true; +} + +/* Returns whether or not we hugified anything. */ +static bool +hpa_try_hugify(tsdn_t *tsdn, hpa_shard_t *shard) { + malloc_mutex_assert_owner(tsdn, &shard->mtx); + + if (hpa_hugify_blocked_by_ndirty(tsdn, shard)) { + return false; + } + + hpdata_t *to_hugify = psset_pick_hugify(&shard->psset); + if (to_hugify == NULL) { + return false; + } + assert(hpdata_hugify_allowed_get(to_hugify)); + assert(!hpdata_changing_state_get(to_hugify)); + + /* Make sure that it's been hugifiable for long enough. */ + nstime_t time_hugify_allowed = hpdata_time_hugify_allowed(to_hugify); + uint64_t millis = shard->central->hooks.ms_since(&time_hugify_allowed); + if (millis < shard->opts.hugify_delay_ms) { + return false; + } + + /* + * Don't let anyone else purge or hugify this page while + * we're hugifying it (allocations and deallocations are + * OK). + */ + psset_update_begin(&shard->psset, to_hugify); + hpdata_mid_hugify_set(to_hugify, true); + hpdata_purge_allowed_set(to_hugify, false); + hpdata_disallow_hugify(to_hugify); + assert(hpdata_alloc_allowed_get(to_hugify)); + psset_update_end(&shard->psset, to_hugify); + + malloc_mutex_unlock(tsdn, &shard->mtx); + + shard->central->hooks.hugify(hpdata_addr_get(to_hugify), HUGEPAGE); + + malloc_mutex_lock(tsdn, &shard->mtx); + shard->stats.nhugifies++; + + psset_update_begin(&shard->psset, to_hugify); + hpdata_hugify(to_hugify); + hpdata_mid_hugify_set(to_hugify, false); + hpa_update_purge_hugify_eligibility(tsdn, shard, to_hugify); + psset_update_end(&shard->psset, to_hugify); + + return true; +} + +/* + * Execution of deferred work is forced if it's triggered by an explicit + * hpa_shard_do_deferred_work() call. + */ +static void +hpa_shard_maybe_do_deferred_work(tsdn_t *tsdn, hpa_shard_t *shard, + bool forced) { + malloc_mutex_assert_owner(tsdn, &shard->mtx); + if (!forced && shard->opts.deferral_allowed) { + return; + } + /* + * If we're on a background thread, do work so long as there's work to + * be done. Otherwise, bound latency to not be *too* bad by doing at + * most a small fixed number of operations. + */ + bool hugified = false; + bool purged = false; + size_t max_ops = (forced ? (size_t)-1 : 16); + size_t nops = 0; + do { + /* + * Always purge before hugifying, to make sure we get some + * ability to hit our quiescence targets. + */ + purged = false; + while (hpa_should_purge(tsdn, shard) && nops < max_ops) { + purged = hpa_try_purge(tsdn, shard); + if (purged) { + nops++; + } + } + hugified = hpa_try_hugify(tsdn, shard); + if (hugified) { + nops++; + } + malloc_mutex_assert_owner(tsdn, &shard->mtx); + malloc_mutex_assert_owner(tsdn, &shard->mtx); + } while ((hugified || purged) && nops < max_ops); +} + +static edata_t * +hpa_try_alloc_one_no_grow(tsdn_t *tsdn, hpa_shard_t *shard, size_t size, + bool *oom) { + bool err; + edata_t *edata = edata_cache_fast_get(tsdn, &shard->ecf); + if (edata == NULL) { + *oom = true; + return NULL; + } + + hpdata_t *ps = psset_pick_alloc(&shard->psset, size); + if (ps == NULL) { + edata_cache_fast_put(tsdn, &shard->ecf, edata); + return NULL; + } + + psset_update_begin(&shard->psset, ps); + + if (hpdata_empty(ps)) { + /* + * If the pageslab used to be empty, treat it as though it's + * brand new for fragmentation-avoidance purposes; what we're + * trying to approximate is the age of the allocations *in* that + * pageslab, and the allocations in the new pageslab are + * definitionally the youngest in this hpa shard. + */ + hpdata_age_set(ps, shard->age_counter++); + } + + void *addr = hpdata_reserve_alloc(ps, size); + edata_init(edata, shard->ind, addr, size, /* slab */ false, + SC_NSIZES, /* sn */ hpdata_age_get(ps), extent_state_active, + /* zeroed */ false, /* committed */ true, EXTENT_PAI_HPA, + EXTENT_NOT_HEAD); + edata_ps_set(edata, ps); + + /* + * This could theoretically be moved outside of the critical section, + * but that introduces the potential for a race. Without the lock, the + * (initially nonempty, since this is the reuse pathway) pageslab we + * allocated out of could become otherwise empty while the lock is + * dropped. This would force us to deal with a pageslab eviction down + * the error pathway, which is a pain. + */ + err = emap_register_boundary(tsdn, shard->emap, edata, + SC_NSIZES, /* slab */ false); + if (err) { + hpdata_unreserve(ps, edata_addr_get(edata), + edata_size_get(edata)); + /* + * We should arguably reset dirty state here, but this would + * require some sort of prepare + commit functionality that's a + * little much to deal with for now. + * + * We don't have a do_deferred_work down this pathway, on the + * principle that we didn't *really* affect shard state (we + * tweaked the stats, but our tweaks weren't really accurate). + */ + psset_update_end(&shard->psset, ps); + edata_cache_fast_put(tsdn, &shard->ecf, edata); + *oom = true; + return NULL; + } + + hpa_update_purge_hugify_eligibility(tsdn, shard, ps); + psset_update_end(&shard->psset, ps); + return edata; +} + +static size_t +hpa_try_alloc_batch_no_grow(tsdn_t *tsdn, hpa_shard_t *shard, size_t size, + bool *oom, size_t nallocs, edata_list_active_t *results, + bool *deferred_work_generated) { + malloc_mutex_lock(tsdn, &shard->mtx); + size_t nsuccess = 0; + for (; nsuccess < nallocs; nsuccess++) { + edata_t *edata = hpa_try_alloc_one_no_grow(tsdn, shard, size, + oom); + if (edata == NULL) { + break; + } + edata_list_active_append(results, edata); + } + + hpa_shard_maybe_do_deferred_work(tsdn, shard, /* forced */ false); + *deferred_work_generated = hpa_shard_has_deferred_work(tsdn, shard); + malloc_mutex_unlock(tsdn, &shard->mtx); + return nsuccess; +} + +static size_t +hpa_alloc_batch_psset(tsdn_t *tsdn, hpa_shard_t *shard, size_t size, + size_t nallocs, edata_list_active_t *results, + bool *deferred_work_generated) { + assert(size <= shard->opts.slab_max_alloc); + bool oom = false; + + size_t nsuccess = hpa_try_alloc_batch_no_grow(tsdn, shard, size, &oom, + nallocs, results, deferred_work_generated); + + if (nsuccess == nallocs || oom) { + return nsuccess; + } + + /* + * We didn't OOM, but weren't able to fill everything requested of us; + * try to grow. + */ + malloc_mutex_lock(tsdn, &shard->grow_mtx); + /* + * Check for grow races; maybe some earlier thread expanded the psset + * in between when we dropped the main mutex and grabbed the grow mutex. + */ + nsuccess += hpa_try_alloc_batch_no_grow(tsdn, shard, size, &oom, + nallocs - nsuccess, results, deferred_work_generated); + if (nsuccess == nallocs || oom) { + malloc_mutex_unlock(tsdn, &shard->grow_mtx); + return nsuccess; + } + + /* + * Note that we don't hold shard->mtx here (while growing); + * deallocations (and allocations of smaller sizes) may still succeed + * while we're doing this potentially expensive system call. + */ + hpdata_t *ps = hpa_central_extract(tsdn, shard->central, size, &oom); + if (ps == NULL) { + malloc_mutex_unlock(tsdn, &shard->grow_mtx); + return nsuccess; + } + + /* + * We got the pageslab; allocate from it. This does an unlock followed + * by a lock on the same mutex, and holds the grow mutex while doing + * deferred work, but this is an uncommon path; the simplicity is worth + * it. + */ + malloc_mutex_lock(tsdn, &shard->mtx); + psset_insert(&shard->psset, ps); + malloc_mutex_unlock(tsdn, &shard->mtx); + + nsuccess += hpa_try_alloc_batch_no_grow(tsdn, shard, size, &oom, + nallocs - nsuccess, results, deferred_work_generated); + /* + * Drop grow_mtx before doing deferred work; other threads blocked on it + * should be allowed to proceed while we're working. + */ + malloc_mutex_unlock(tsdn, &shard->grow_mtx); + + return nsuccess; +} + +static hpa_shard_t * +hpa_from_pai(pai_t *self) { + assert(self->alloc = &hpa_alloc); + assert(self->expand = &hpa_expand); + assert(self->shrink = &hpa_shrink); + assert(self->dalloc = &hpa_dalloc); + return (hpa_shard_t *)self; +} + +static size_t +hpa_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size, size_t nallocs, + edata_list_active_t *results, bool *deferred_work_generated) { + assert(nallocs > 0); + assert((size & PAGE_MASK) == 0); + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + hpa_shard_t *shard = hpa_from_pai(self); + + if (size > shard->opts.slab_max_alloc) { + return 0; + } + + size_t nsuccess = hpa_alloc_batch_psset(tsdn, shard, size, nallocs, + results, deferred_work_generated); + + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + /* + * Guard the sanity checks with config_debug because the loop cannot be + * proven non-circular by the compiler, even if everything within the + * loop is optimized away. + */ + if (config_debug) { + edata_t *edata; + ql_foreach(edata, &results->head, ql_link_active) { + emap_assert_mapped(tsdn, shard->emap, edata); + assert(edata_pai_get(edata) == EXTENT_PAI_HPA); + assert(edata_state_get(edata) == extent_state_active); + assert(edata_arena_ind_get(edata) == shard->ind); + assert(edata_szind_get_maybe_invalid(edata) == + SC_NSIZES); + assert(!edata_slab_get(edata)); + assert(edata_committed_get(edata)); + assert(edata_base_get(edata) == edata_addr_get(edata)); + assert(edata_base_get(edata) != NULL); + } + } + return nsuccess; +} + +static edata_t * +hpa_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero, + bool guarded, bool frequent_reuse, bool *deferred_work_generated) { + assert((size & PAGE_MASK) == 0); + assert(!guarded); + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + /* We don't handle alignment or zeroing for now. */ + if (alignment > PAGE || zero) { + return NULL; + } + /* + * An alloc with alignment == PAGE and zero == false is equivalent to a + * batch alloc of 1. Just do that, so we can share code. + */ + edata_list_active_t results; + edata_list_active_init(&results); + size_t nallocs = hpa_alloc_batch(tsdn, self, size, /* nallocs */ 1, + &results, deferred_work_generated); + assert(nallocs == 0 || nallocs == 1); + edata_t *edata = edata_list_active_first(&results); + return edata; +} + +static bool +hpa_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size, + size_t new_size, bool zero, bool *deferred_work_generated) { + /* Expand not yet supported. */ + return true; +} + +static bool +hpa_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata, + size_t old_size, size_t new_size, bool *deferred_work_generated) { + /* Shrink not yet supported. */ + return true; +} + +static void +hpa_dalloc_prepare_unlocked(tsdn_t *tsdn, hpa_shard_t *shard, edata_t *edata) { + malloc_mutex_assert_not_owner(tsdn, &shard->mtx); + + assert(edata_pai_get(edata) == EXTENT_PAI_HPA); + assert(edata_state_get(edata) == extent_state_active); + assert(edata_arena_ind_get(edata) == shard->ind); + assert(edata_szind_get_maybe_invalid(edata) == SC_NSIZES); + assert(edata_committed_get(edata)); + assert(edata_base_get(edata) != NULL); + + /* + * Another thread shouldn't be trying to touch the metadata of an + * allocation being freed. The one exception is a merge attempt from a + * lower-addressed PAC extent; in this case we have a nominal race on + * the edata metadata bits, but in practice the fact that the PAI bits + * are different will prevent any further access. The race is bad, but + * benign in practice, and the long term plan is to track enough state + * in the rtree to prevent these merge attempts in the first place. + */ + edata_addr_set(edata, edata_base_get(edata)); + edata_zeroed_set(edata, false); + emap_deregister_boundary(tsdn, shard->emap, edata); +} + +static void +hpa_dalloc_locked(tsdn_t *tsdn, hpa_shard_t *shard, edata_t *edata) { + malloc_mutex_assert_owner(tsdn, &shard->mtx); + + /* + * Release the metadata early, to avoid having to remember to do it + * while we're also doing tricky purging logic. First, we need to grab + * a few bits of metadata from it. + * + * Note that the shard mutex protects ps's metadata too; it wouldn't be + * correct to try to read most information out of it without the lock. + */ + hpdata_t *ps = edata_ps_get(edata); + /* Currently, all edatas come from pageslabs. */ + assert(ps != NULL); + void *unreserve_addr = edata_addr_get(edata); + size_t unreserve_size = edata_size_get(edata); + edata_cache_fast_put(tsdn, &shard->ecf, edata); + + psset_update_begin(&shard->psset, ps); + hpdata_unreserve(ps, unreserve_addr, unreserve_size); + hpa_update_purge_hugify_eligibility(tsdn, shard, ps); + psset_update_end(&shard->psset, ps); +} + +static void +hpa_dalloc_batch(tsdn_t *tsdn, pai_t *self, edata_list_active_t *list, + bool *deferred_work_generated) { + hpa_shard_t *shard = hpa_from_pai(self); + + edata_t *edata; + ql_foreach(edata, &list->head, ql_link_active) { + hpa_dalloc_prepare_unlocked(tsdn, shard, edata); + } + + malloc_mutex_lock(tsdn, &shard->mtx); + /* Now, remove from the list. */ + while ((edata = edata_list_active_first(list)) != NULL) { + edata_list_active_remove(list, edata); + hpa_dalloc_locked(tsdn, shard, edata); + } + hpa_shard_maybe_do_deferred_work(tsdn, shard, /* forced */ false); + *deferred_work_generated = + hpa_shard_has_deferred_work(tsdn, shard); + + malloc_mutex_unlock(tsdn, &shard->mtx); +} + +static void +hpa_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata, + bool *deferred_work_generated) { + assert(!edata_guarded_get(edata)); + /* Just a dalloc_batch of size 1; this lets us share logic. */ + edata_list_active_t dalloc_list; + edata_list_active_init(&dalloc_list); + edata_list_active_append(&dalloc_list, edata); + hpa_dalloc_batch(tsdn, self, &dalloc_list, deferred_work_generated); +} + +/* + * Calculate time until either purging or hugification ought to happen. + * Called by background threads. + */ +static uint64_t +hpa_time_until_deferred_work(tsdn_t *tsdn, pai_t *self) { + hpa_shard_t *shard = hpa_from_pai(self); + uint64_t time_ns = BACKGROUND_THREAD_DEFERRED_MAX; + + malloc_mutex_lock(tsdn, &shard->mtx); + + hpdata_t *to_hugify = psset_pick_hugify(&shard->psset); + if (to_hugify != NULL) { + nstime_t time_hugify_allowed = + hpdata_time_hugify_allowed(to_hugify); + uint64_t since_hugify_allowed_ms = + shard->central->hooks.ms_since(&time_hugify_allowed); + /* + * If not enough time has passed since hugification was allowed, + * sleep for the rest. + */ + if (since_hugify_allowed_ms < shard->opts.hugify_delay_ms) { + time_ns = shard->opts.hugify_delay_ms - + since_hugify_allowed_ms; + time_ns *= 1000 * 1000; + } else { + malloc_mutex_unlock(tsdn, &shard->mtx); + return BACKGROUND_THREAD_DEFERRED_MIN; + } + } + + if (hpa_should_purge(tsdn, shard)) { + /* + * If we haven't purged before, no need to check interval + * between purges. Simply purge as soon as possible. + */ + if (shard->stats.npurge_passes == 0) { + malloc_mutex_unlock(tsdn, &shard->mtx); + return BACKGROUND_THREAD_DEFERRED_MIN; + } + uint64_t since_last_purge_ms = shard->central->hooks.ms_since( + &shard->last_purge); + + if (since_last_purge_ms < shard->opts.min_purge_interval_ms) { + uint64_t until_purge_ns; + until_purge_ns = shard->opts.min_purge_interval_ms - + since_last_purge_ms; + until_purge_ns *= 1000 * 1000; + + if (until_purge_ns < time_ns) { + time_ns = until_purge_ns; + } + } else { + time_ns = BACKGROUND_THREAD_DEFERRED_MIN; + } + } + malloc_mutex_unlock(tsdn, &shard->mtx); + return time_ns; +} + +void +hpa_shard_disable(tsdn_t *tsdn, hpa_shard_t *shard) { + hpa_do_consistency_checks(shard); + + malloc_mutex_lock(tsdn, &shard->mtx); + edata_cache_fast_disable(tsdn, &shard->ecf); + malloc_mutex_unlock(tsdn, &shard->mtx); +} + +static void +hpa_shard_assert_stats_empty(psset_bin_stats_t *bin_stats) { + assert(bin_stats->npageslabs == 0); + assert(bin_stats->nactive == 0); +} + +static void +hpa_assert_empty(tsdn_t *tsdn, hpa_shard_t *shard, psset_t *psset) { + malloc_mutex_assert_owner(tsdn, &shard->mtx); + for (int huge = 0; huge <= 1; huge++) { + hpa_shard_assert_stats_empty(&psset->stats.full_slabs[huge]); + for (pszind_t i = 0; i < PSSET_NPSIZES; i++) { + hpa_shard_assert_stats_empty( + &psset->stats.nonfull_slabs[i][huge]); + } + } +} + +void +hpa_shard_destroy(tsdn_t *tsdn, hpa_shard_t *shard) { + hpa_do_consistency_checks(shard); + /* + * By the time we're here, the arena code should have dalloc'd all the + * active extents, which means we should have eventually evicted + * everything from the psset, so it shouldn't be able to serve even a + * 1-page allocation. + */ + if (config_debug) { + malloc_mutex_lock(tsdn, &shard->mtx); + hpa_assert_empty(tsdn, shard, &shard->psset); + malloc_mutex_unlock(tsdn, &shard->mtx); + } + hpdata_t *ps; + while ((ps = psset_pick_alloc(&shard->psset, PAGE)) != NULL) { + /* There should be no allocations anywhere. */ + assert(hpdata_empty(ps)); + psset_remove(&shard->psset, ps); + shard->central->hooks.unmap(hpdata_addr_get(ps), HUGEPAGE); + } +} + +void +hpa_shard_set_deferral_allowed(tsdn_t *tsdn, hpa_shard_t *shard, + bool deferral_allowed) { + hpa_do_consistency_checks(shard); + + malloc_mutex_lock(tsdn, &shard->mtx); + bool deferral_previously_allowed = shard->opts.deferral_allowed; + shard->opts.deferral_allowed = deferral_allowed; + if (deferral_previously_allowed && !deferral_allowed) { + hpa_shard_maybe_do_deferred_work(tsdn, shard, + /* forced */ true); + } + malloc_mutex_unlock(tsdn, &shard->mtx); +} + +void +hpa_shard_do_deferred_work(tsdn_t *tsdn, hpa_shard_t *shard) { + hpa_do_consistency_checks(shard); + + malloc_mutex_lock(tsdn, &shard->mtx); + hpa_shard_maybe_do_deferred_work(tsdn, shard, /* forced */ true); + malloc_mutex_unlock(tsdn, &shard->mtx); +} + +void +hpa_shard_prefork3(tsdn_t *tsdn, hpa_shard_t *shard) { + hpa_do_consistency_checks(shard); + + malloc_mutex_prefork(tsdn, &shard->grow_mtx); +} + +void +hpa_shard_prefork4(tsdn_t *tsdn, hpa_shard_t *shard) { + hpa_do_consistency_checks(shard); + + malloc_mutex_prefork(tsdn, &shard->mtx); +} + +void +hpa_shard_postfork_parent(tsdn_t *tsdn, hpa_shard_t *shard) { + hpa_do_consistency_checks(shard); + + malloc_mutex_postfork_parent(tsdn, &shard->grow_mtx); + malloc_mutex_postfork_parent(tsdn, &shard->mtx); +} + +void +hpa_shard_postfork_child(tsdn_t *tsdn, hpa_shard_t *shard) { + hpa_do_consistency_checks(shard); + + malloc_mutex_postfork_child(tsdn, &shard->grow_mtx); + malloc_mutex_postfork_child(tsdn, &shard->mtx); +} diff --git a/contrib/jemalloc/src/hpa_hooks.c b/contrib/jemalloc/src/hpa_hooks.c new file mode 100644 index 00000000000000..ade581e8dc0311 --- /dev/null +++ b/contrib/jemalloc/src/hpa_hooks.c @@ -0,0 +1,63 @@ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/hpa_hooks.h" + +static void *hpa_hooks_map(size_t size); +static void hpa_hooks_unmap(void *ptr, size_t size); +static void hpa_hooks_purge(void *ptr, size_t size); +static void hpa_hooks_hugify(void *ptr, size_t size); +static void hpa_hooks_dehugify(void *ptr, size_t size); +static void hpa_hooks_curtime(nstime_t *r_nstime, bool first_reading); +static uint64_t hpa_hooks_ms_since(nstime_t *past_nstime); + +hpa_hooks_t hpa_hooks_default = { + &hpa_hooks_map, + &hpa_hooks_unmap, + &hpa_hooks_purge, + &hpa_hooks_hugify, + &hpa_hooks_dehugify, + &hpa_hooks_curtime, + &hpa_hooks_ms_since +}; + +static void * +hpa_hooks_map(size_t size) { + bool commit = true; + return pages_map(NULL, size, HUGEPAGE, &commit); +} + +static void +hpa_hooks_unmap(void *ptr, size_t size) { + pages_unmap(ptr, size); +} + +static void +hpa_hooks_purge(void *ptr, size_t size) { + pages_purge_forced(ptr, size); +} + +static void +hpa_hooks_hugify(void *ptr, size_t size) { + bool err = pages_huge(ptr, size); + (void)err; +} + +static void +hpa_hooks_dehugify(void *ptr, size_t size) { + bool err = pages_nohuge(ptr, size); + (void)err; +} + +static void +hpa_hooks_curtime(nstime_t *r_nstime, bool first_reading) { + if (first_reading) { + nstime_init_zero(r_nstime); + } + nstime_update(r_nstime); +} + +static uint64_t +hpa_hooks_ms_since(nstime_t *past_nstime) { + return nstime_ns_since(past_nstime) / 1000 / 1000; +} diff --git a/contrib/jemalloc/src/hpdata.c b/contrib/jemalloc/src/hpdata.c new file mode 100644 index 00000000000000..e7d7294c782278 --- /dev/null +++ b/contrib/jemalloc/src/hpdata.c @@ -0,0 +1,325 @@ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/hpdata.h" + +static int +hpdata_age_comp(const hpdata_t *a, const hpdata_t *b) { + uint64_t a_age = hpdata_age_get(a); + uint64_t b_age = hpdata_age_get(b); + /* + * hpdata ages are operation counts in the psset; no two should be the + * same. + */ + assert(a_age != b_age); + return (a_age > b_age) - (a_age < b_age); +} + +ph_gen(, hpdata_age_heap, hpdata_t, age_link, hpdata_age_comp) + +void +hpdata_init(hpdata_t *hpdata, void *addr, uint64_t age) { + hpdata_addr_set(hpdata, addr); + hpdata_age_set(hpdata, age); + hpdata->h_huge = false; + hpdata->h_alloc_allowed = true; + hpdata->h_in_psset_alloc_container = false; + hpdata->h_purge_allowed = false; + hpdata->h_hugify_allowed = false; + hpdata->h_in_psset_hugify_container = false; + hpdata->h_mid_purge = false; + hpdata->h_mid_hugify = false; + hpdata->h_updating = false; + hpdata->h_in_psset = false; + hpdata_longest_free_range_set(hpdata, HUGEPAGE_PAGES); + hpdata->h_nactive = 0; + fb_init(hpdata->active_pages, HUGEPAGE_PAGES); + hpdata->h_ntouched = 0; + fb_init(hpdata->touched_pages, HUGEPAGE_PAGES); + + hpdata_assert_consistent(hpdata); +} + +void * +hpdata_reserve_alloc(hpdata_t *hpdata, size_t sz) { + hpdata_assert_consistent(hpdata); + /* + * This is a metadata change; the hpdata should therefore either not be + * in the psset, or should have explicitly marked itself as being + * mid-update. + */ + assert(!hpdata->h_in_psset || hpdata->h_updating); + assert(hpdata->h_alloc_allowed); + assert((sz & PAGE_MASK) == 0); + size_t npages = sz >> LG_PAGE; + assert(npages <= hpdata_longest_free_range_get(hpdata)); + + size_t result; + + size_t start = 0; + /* + * These are dead stores, but the compiler will issue warnings on them + * since it can't tell statically that found is always true below. + */ + size_t begin = 0; + size_t len = 0; + + size_t largest_unchosen_range = 0; + while (true) { + bool found = fb_urange_iter(hpdata->active_pages, + HUGEPAGE_PAGES, start, &begin, &len); + /* + * A precondition to this function is that hpdata must be able + * to serve the allocation. + */ + assert(found); + assert(len <= hpdata_longest_free_range_get(hpdata)); + if (len >= npages) { + /* + * We use first-fit within the page slabs; this gives + * bounded worst-case fragmentation within a slab. It's + * not necessarily right; we could experiment with + * various other options. + */ + break; + } + if (len > largest_unchosen_range) { + largest_unchosen_range = len; + } + start = begin + len; + } + /* We found a range; remember it. */ + result = begin; + fb_set_range(hpdata->active_pages, HUGEPAGE_PAGES, begin, npages); + hpdata->h_nactive += npages; + + /* + * We might be about to dirty some memory for the first time; update our + * count if so. + */ + size_t new_dirty = fb_ucount(hpdata->touched_pages, HUGEPAGE_PAGES, + result, npages); + fb_set_range(hpdata->touched_pages, HUGEPAGE_PAGES, result, npages); + hpdata->h_ntouched += new_dirty; + + /* + * If we allocated out of a range that was the longest in the hpdata, it + * might be the only one of that size and we'll have to adjust the + * metadata. + */ + if (len == hpdata_longest_free_range_get(hpdata)) { + start = begin + npages; + while (start < HUGEPAGE_PAGES) { + bool found = fb_urange_iter(hpdata->active_pages, + HUGEPAGE_PAGES, start, &begin, &len); + if (!found) { + break; + } + assert(len <= hpdata_longest_free_range_get(hpdata)); + if (len == hpdata_longest_free_range_get(hpdata)) { + largest_unchosen_range = len; + break; + } + if (len > largest_unchosen_range) { + largest_unchosen_range = len; + } + start = begin + len; + } + hpdata_longest_free_range_set(hpdata, largest_unchosen_range); + } + + hpdata_assert_consistent(hpdata); + return (void *)( + (uintptr_t)hpdata_addr_get(hpdata) + (result << LG_PAGE)); +} + +void +hpdata_unreserve(hpdata_t *hpdata, void *addr, size_t sz) { + hpdata_assert_consistent(hpdata); + /* See the comment in reserve. */ + assert(!hpdata->h_in_psset || hpdata->h_updating); + assert(((uintptr_t)addr & PAGE_MASK) == 0); + assert((sz & PAGE_MASK) == 0); + size_t begin = ((uintptr_t)addr - (uintptr_t)hpdata_addr_get(hpdata)) + >> LG_PAGE; + assert(begin < HUGEPAGE_PAGES); + size_t npages = sz >> LG_PAGE; + size_t old_longest_range = hpdata_longest_free_range_get(hpdata); + + fb_unset_range(hpdata->active_pages, HUGEPAGE_PAGES, begin, npages); + /* We might have just created a new, larger range. */ + size_t new_begin = (fb_fls(hpdata->active_pages, HUGEPAGE_PAGES, + begin) + 1); + size_t new_end = fb_ffs(hpdata->active_pages, HUGEPAGE_PAGES, + begin + npages - 1); + size_t new_range_len = new_end - new_begin; + + if (new_range_len > old_longest_range) { + hpdata_longest_free_range_set(hpdata, new_range_len); + } + + hpdata->h_nactive -= npages; + + hpdata_assert_consistent(hpdata); +} + +size_t +hpdata_purge_begin(hpdata_t *hpdata, hpdata_purge_state_t *purge_state) { + hpdata_assert_consistent(hpdata); + /* + * See the comment below; we might purge any inactive extent, so it's + * unsafe for any other thread to turn any inactive extent active while + * we're operating on it. + */ + assert(!hpdata_alloc_allowed_get(hpdata)); + + purge_state->npurged = 0; + purge_state->next_purge_search_begin = 0; + + /* + * Initialize to_purge. + * + * It's possible to end up in situations where two dirty extents are + * separated by a retained extent: + * - 1 page allocated. + * - 1 page allocated. + * - 1 pages allocated. + * + * If the middle page is freed and purged, and then the first and third + * pages are freed, and then another purge pass happens, the hpdata + * looks like this: + * - 1 page dirty. + * - 1 page retained. + * - 1 page dirty. + * + * But it's safe to do a single 3-page purge. + * + * We do this by first computing the dirty pages, and then filling in + * any gaps by extending each range in the dirty bitmap to extend until + * the next active page. This purges more pages, but the expensive part + * of purging is the TLB shootdowns, rather than the kernel state + * tracking; doing a little bit more of the latter is fine if it saves + * us from doing some of the former. + */ + + /* + * The dirty pages are those that are touched but not active. Note that + * in a normal-ish case, HUGEPAGE_PAGES is something like 512 and the + * fb_group_t is 64 bits, so this is 64 bytes, spread across 8 + * fb_group_ts. + */ + fb_group_t dirty_pages[FB_NGROUPS(HUGEPAGE_PAGES)]; + fb_init(dirty_pages, HUGEPAGE_PAGES); + fb_bit_not(dirty_pages, hpdata->active_pages, HUGEPAGE_PAGES); + fb_bit_and(dirty_pages, dirty_pages, hpdata->touched_pages, + HUGEPAGE_PAGES); + + fb_init(purge_state->to_purge, HUGEPAGE_PAGES); + size_t next_bit = 0; + while (next_bit < HUGEPAGE_PAGES) { + size_t next_dirty = fb_ffs(dirty_pages, HUGEPAGE_PAGES, + next_bit); + /* Recall that fb_ffs returns nbits if no set bit is found. */ + if (next_dirty == HUGEPAGE_PAGES) { + break; + } + size_t next_active = fb_ffs(hpdata->active_pages, + HUGEPAGE_PAGES, next_dirty); + /* + * Don't purge past the end of the dirty extent, into retained + * pages. This helps the kernel a tiny bit, but honestly it's + * mostly helpful for testing (where we tend to write test cases + * that think in terms of the dirty ranges). + */ + ssize_t last_dirty = fb_fls(dirty_pages, HUGEPAGE_PAGES, + next_active - 1); + assert(last_dirty >= 0); + assert((size_t)last_dirty >= next_dirty); + assert((size_t)last_dirty - next_dirty + 1 <= HUGEPAGE_PAGES); + + fb_set_range(purge_state->to_purge, HUGEPAGE_PAGES, next_dirty, + last_dirty - next_dirty + 1); + next_bit = next_active + 1; + } + + /* We should purge, at least, everything dirty. */ + size_t ndirty = hpdata->h_ntouched - hpdata->h_nactive; + purge_state->ndirty_to_purge = ndirty; + assert(ndirty <= fb_scount( + purge_state->to_purge, HUGEPAGE_PAGES, 0, HUGEPAGE_PAGES)); + assert(ndirty == fb_scount(dirty_pages, HUGEPAGE_PAGES, 0, + HUGEPAGE_PAGES)); + + hpdata_assert_consistent(hpdata); + + return ndirty; +} + +bool +hpdata_purge_next(hpdata_t *hpdata, hpdata_purge_state_t *purge_state, + void **r_purge_addr, size_t *r_purge_size) { + /* + * Note that we don't have a consistency check here; we're accessing + * hpdata without synchronization, and therefore have no right to expect + * a consistent state. + */ + assert(!hpdata_alloc_allowed_get(hpdata)); + + if (purge_state->next_purge_search_begin == HUGEPAGE_PAGES) { + return false; + } + size_t purge_begin; + size_t purge_len; + bool found_range = fb_srange_iter(purge_state->to_purge, HUGEPAGE_PAGES, + purge_state->next_purge_search_begin, &purge_begin, &purge_len); + if (!found_range) { + return false; + } + + *r_purge_addr = (void *)( + (uintptr_t)hpdata_addr_get(hpdata) + purge_begin * PAGE); + *r_purge_size = purge_len * PAGE; + + purge_state->next_purge_search_begin = purge_begin + purge_len; + purge_state->npurged += purge_len; + assert(purge_state->npurged <= HUGEPAGE_PAGES); + + return true; +} + +void +hpdata_purge_end(hpdata_t *hpdata, hpdata_purge_state_t *purge_state) { + assert(!hpdata_alloc_allowed_get(hpdata)); + hpdata_assert_consistent(hpdata); + /* See the comment in reserve. */ + assert(!hpdata->h_in_psset || hpdata->h_updating); + + assert(purge_state->npurged == fb_scount(purge_state->to_purge, + HUGEPAGE_PAGES, 0, HUGEPAGE_PAGES)); + assert(purge_state->npurged >= purge_state->ndirty_to_purge); + + fb_bit_not(purge_state->to_purge, purge_state->to_purge, + HUGEPAGE_PAGES); + fb_bit_and(hpdata->touched_pages, hpdata->touched_pages, + purge_state->to_purge, HUGEPAGE_PAGES); + assert(hpdata->h_ntouched >= purge_state->ndirty_to_purge); + hpdata->h_ntouched -= purge_state->ndirty_to_purge; + + hpdata_assert_consistent(hpdata); +} + +void +hpdata_hugify(hpdata_t *hpdata) { + hpdata_assert_consistent(hpdata); + hpdata->h_huge = true; + fb_set_range(hpdata->touched_pages, HUGEPAGE_PAGES, 0, HUGEPAGE_PAGES); + hpdata->h_ntouched = HUGEPAGE_PAGES; + hpdata_assert_consistent(hpdata); +} + +void +hpdata_dehugify(hpdata_t *hpdata) { + hpdata_assert_consistent(hpdata); + hpdata->h_huge = false; + hpdata_assert_consistent(hpdata); +} diff --git a/contrib/jemalloc/src/inspect.c b/contrib/jemalloc/src/inspect.c new file mode 100644 index 00000000000000..911b5d524ac3cd --- /dev/null +++ b/contrib/jemalloc/src/inspect.c @@ -0,0 +1,77 @@ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +void +inspect_extent_util_stats_get(tsdn_t *tsdn, const void *ptr, size_t *nfree, + size_t *nregs, size_t *size) { + assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL); + + const edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr); + if (unlikely(edata == NULL)) { + *nfree = *nregs = *size = 0; + return; + } + + *size = edata_size_get(edata); + if (!edata_slab_get(edata)) { + *nfree = 0; + *nregs = 1; + } else { + *nfree = edata_nfree_get(edata); + *nregs = bin_infos[edata_szind_get(edata)].nregs; + assert(*nfree <= *nregs); + assert(*nfree * edata_usize_get(edata) <= *size); + } +} + +void +inspect_extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr, + size_t *nfree, size_t *nregs, size_t *size, size_t *bin_nfree, + size_t *bin_nregs, void **slabcur_addr) { + assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL + && bin_nfree != NULL && bin_nregs != NULL && slabcur_addr != NULL); + + const edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr); + if (unlikely(edata == NULL)) { + *nfree = *nregs = *size = *bin_nfree = *bin_nregs = 0; + *slabcur_addr = NULL; + return; + } + + *size = edata_size_get(edata); + if (!edata_slab_get(edata)) { + *nfree = *bin_nfree = *bin_nregs = 0; + *nregs = 1; + *slabcur_addr = NULL; + return; + } + + *nfree = edata_nfree_get(edata); + const szind_t szind = edata_szind_get(edata); + *nregs = bin_infos[szind].nregs; + assert(*nfree <= *nregs); + assert(*nfree * edata_usize_get(edata) <= *size); + + arena_t *arena = (arena_t *)atomic_load_p( + &arenas[edata_arena_ind_get(edata)], ATOMIC_RELAXED); + assert(arena != NULL); + const unsigned binshard = edata_binshard_get(edata); + bin_t *bin = arena_get_bin(arena, szind, binshard); + + malloc_mutex_lock(tsdn, &bin->lock); + if (config_stats) { + *bin_nregs = *nregs * bin->stats.curslabs; + assert(*bin_nregs >= bin->stats.curregs); + *bin_nfree = *bin_nregs - bin->stats.curregs; + } else { + *bin_nfree = *bin_nregs = 0; + } + edata_t *slab; + if (bin->slabcur != NULL) { + slab = bin->slabcur; + } else { + slab = edata_heap_first(&bin->slabs_nonfull); + } + *slabcur_addr = slab != NULL ? edata_addr_get(slab) : NULL; + malloc_mutex_unlock(tsdn, &bin->lock); +} diff --git a/contrib/jemalloc/src/jemalloc.c b/contrib/jemalloc/src/jemalloc.c index fefb719ac5c45c..e4b183d1a24d4a 100644 --- a/contrib/jemalloc/src/jemalloc.c +++ b/contrib/jemalloc/src/jemalloc.c @@ -4,20 +4,26 @@ #include "jemalloc/internal/assert.h" #include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/buf_writer.h" #include "jemalloc/internal/ctl.h" +#include "jemalloc/internal/emap.h" #include "jemalloc/internal/extent_dss.h" #include "jemalloc/internal/extent_mmap.h" +#include "jemalloc/internal/fxp.h" +#include "jemalloc/internal/san.h" #include "jemalloc/internal/hook.h" #include "jemalloc/internal/jemalloc_internal_types.h" #include "jemalloc/internal/log.h" #include "jemalloc/internal/malloc_io.h" #include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/nstime.h" #include "jemalloc/internal/rtree.h" #include "jemalloc/internal/safety_check.h" #include "jemalloc/internal/sc.h" #include "jemalloc/internal/spin.h" #include "jemalloc/internal/sz.h" #include "jemalloc/internal/ticker.h" +#include "jemalloc/internal/thread_event.h" #include "jemalloc/internal/util.h" /******************************************************************************/ @@ -33,6 +39,29 @@ const char *je_malloc_conf JEMALLOC_ATTR(weak) #endif ; +/* + * The usual rule is that the closer to runtime you are, the higher priority + * your configuration settings are (so the jemalloc config options get lower + * priority than the per-binary setting, which gets lower priority than the /etc + * setting, which gets lower priority than the environment settings). + * + * But it's a fairly common use case in some testing environments for a user to + * be able to control the binary, but nothing else (e.g. a performancy canary + * uses the production OS and environment variables, but can run any binary in + * those circumstances). For these use cases, it's handy to have an in-binary + * mechanism for overriding environment variable settings, with the idea that if + * the results are positive they get promoted to the official settings, and + * moved from the binary to the environment variable. + * + * We don't actually want this to be widespread, so we'll give it a silly name + * and not mention it in headers or documentation. + */ +const char *je_malloc_conf_2_conf_harder +#ifndef _WIN32 + JEMALLOC_ATTR(weak) +#endif + ; + bool opt_abort = #ifdef JEMALLOC_DEBUG true @@ -70,16 +99,73 @@ bool opt_junk_free = false #endif ; +bool opt_trust_madvise = +#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS + false +#else + true +#endif + ; + +bool opt_cache_oblivious = +#ifdef JEMALLOC_CACHE_OBLIVIOUS + true +#else + false +#endif + ; + +zero_realloc_action_t opt_zero_realloc_action = +#ifdef JEMALLOC_ZERO_REALLOC_DEFAULT_FREE + zero_realloc_action_free +#else + zero_realloc_action_alloc +#endif + ; + +atomic_zu_t zero_realloc_count = ATOMIC_INIT(0); + +const char *zero_realloc_mode_names[] = { + "alloc", + "free", + "abort", +}; + +/* + * These are the documented values for junk fill debugging facilities -- see the + * man page. + */ +static const uint8_t junk_alloc_byte = 0xa5; +static const uint8_t junk_free_byte = 0x5a; + +static void default_junk_alloc(void *ptr, size_t usize) { + memset(ptr, junk_alloc_byte, usize); +} + +static void default_junk_free(void *ptr, size_t usize) { + memset(ptr, junk_free_byte, usize); +} + +void (*junk_alloc_callback)(void *ptr, size_t size) = &default_junk_alloc; +void (*junk_free_callback)(void *ptr, size_t size) = &default_junk_free; bool opt_utrace = false; bool opt_xmalloc = false; +bool opt_experimental_infallible_new = false; bool opt_zero = false; unsigned opt_narenas = 0; +fxp_t opt_narenas_ratio = FXP_INIT_INT(4); unsigned ncpus; /* Protects arenas initialization. */ malloc_mutex_t arenas_lock; + +/* The global hpa, and whether it's on. */ +bool opt_hpa = false; +hpa_shard_opts_t opt_hpa_opts = HPA_SHARD_OPTS_DEFAULT; +sec_opts_t opt_hpa_sec_opts = SEC_OPTS_DEFAULT; + /* * Arenas that are used to service external requests. Not all elements of the * arenas array are necessarily used; arenas are created lazily as needed. @@ -98,13 +184,7 @@ static arena_t *a0; /* arenas[0]. */ unsigned narenas_auto; unsigned manual_arena_base; -typedef enum { - malloc_init_uninitialized = 3, - malloc_init_a0_initialized = 2, - malloc_init_recursible = 1, - malloc_init_initialized = 0 /* Common case --> jnz. */ -} malloc_init_t; -static malloc_init_t malloc_init_state = malloc_init_uninitialized; +malloc_init_t malloc_init_state = malloc_init_uninitialized; /* False should be the common case. Set to true to trigger initialization. */ bool malloc_slow = true; @@ -184,7 +264,7 @@ typedef struct { ut.p = (a); \ ut.s = (b); \ ut.r = (c); \ - utrace(&ut, sizeof(ut)); \ + UTRACE_CALL(&ut, sizeof(ut)); \ errno = utrace_serrno; \ } \ } while (0) @@ -209,11 +289,6 @@ static bool malloc_init_hard(void); * Begin miscellaneous support functions. */ -bool -malloc_initialized(void) { - return (malloc_init_state == malloc_init_initialized); -} - JEMALLOC_ALWAYS_INLINE bool malloc_init_a0(void) { if (unlikely(malloc_init_state == malloc_init_uninitialized)) { @@ -261,7 +336,7 @@ a0dalloc(void *ptr) { } /* - * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive + * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-sensitive * situations that cannot tolerate TLS variable access (TLS allocation and very * early internal data structure initialization). */ @@ -319,7 +394,7 @@ narenas_total_get(void) { /* Create a new arena and insert it into the arenas array at index ind. */ static arena_t * -arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { +arena_init_locked(tsdn_t *tsdn, unsigned ind, const arena_config_t *config) { arena_t *arena; assert(ind <= narenas_total_get()); @@ -341,7 +416,7 @@ arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { } /* Actually initialize the arena. */ - arena = arena_new(tsdn, ind, extent_hooks); + arena = arena_new(tsdn, ind, config); return arena; } @@ -365,11 +440,11 @@ arena_new_create_background_thread(tsdn_t *tsdn, unsigned ind) { } arena_t * -arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { +arena_init(tsdn_t *tsdn, unsigned ind, const arena_config_t *config) { arena_t *arena; malloc_mutex_lock(tsdn, &arenas_lock); - arena = arena_init_locked(tsdn, ind, extent_hooks); + arena = arena_init_locked(tsdn, ind, config); malloc_mutex_unlock(tsdn, &arenas_lock); arena_new_create_background_thread(tsdn, ind); @@ -398,14 +473,19 @@ arena_bind(tsd_t *tsd, unsigned ind, bool internal) { } void -arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) { - arena_t *oldarena, *newarena; +arena_migrate(tsd_t *tsd, arena_t *oldarena, arena_t *newarena) { + assert(oldarena != NULL); + assert(newarena != NULL); - oldarena = arena_get(tsd_tsdn(tsd), oldind, false); - newarena = arena_get(tsd_tsdn(tsd), newind, false); arena_nthreads_dec(oldarena, false); arena_nthreads_inc(newarena, false); tsd_arena_set(tsd, newarena); + + if (arena_nthreads_get(oldarena, false) == 0) { + /* Purge if the old arena has no associated threads anymore. */ + arena_decay(tsd_tsdn(tsd), oldarena, + /* is_background_thread */ false, /* all */ true); + } } static void @@ -422,82 +502,6 @@ arena_unbind(tsd_t *tsd, unsigned ind, bool internal) { } } -arena_tdata_t * -arena_tdata_get_hard(tsd_t *tsd, unsigned ind) { - arena_tdata_t *tdata, *arenas_tdata_old; - arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd); - unsigned narenas_tdata_old, i; - unsigned narenas_tdata = tsd_narenas_tdata_get(tsd); - unsigned narenas_actual = narenas_total_get(); - - /* - * Dissociate old tdata array (and set up for deallocation upon return) - * if it's too small. - */ - if (arenas_tdata != NULL && narenas_tdata < narenas_actual) { - arenas_tdata_old = arenas_tdata; - narenas_tdata_old = narenas_tdata; - arenas_tdata = NULL; - narenas_tdata = 0; - tsd_arenas_tdata_set(tsd, arenas_tdata); - tsd_narenas_tdata_set(tsd, narenas_tdata); - } else { - arenas_tdata_old = NULL; - narenas_tdata_old = 0; - } - - /* Allocate tdata array if it's missing. */ - if (arenas_tdata == NULL) { - bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd); - narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1; - - if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) { - *arenas_tdata_bypassp = true; - arenas_tdata = (arena_tdata_t *)a0malloc( - sizeof(arena_tdata_t) * narenas_tdata); - *arenas_tdata_bypassp = false; - } - if (arenas_tdata == NULL) { - tdata = NULL; - goto label_return; - } - assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp); - tsd_arenas_tdata_set(tsd, arenas_tdata); - tsd_narenas_tdata_set(tsd, narenas_tdata); - } - - /* - * Copy to tdata array. It's possible that the actual number of arenas - * has increased since narenas_total_get() was called above, but that - * causes no correctness issues unless two threads concurrently execute - * the arenas.create mallctl, which we trust mallctl synchronization to - * prevent. - */ - - /* Copy/initialize tickers. */ - for (i = 0; i < narenas_actual; i++) { - if (i < narenas_tdata_old) { - ticker_copy(&arenas_tdata[i].decay_ticker, - &arenas_tdata_old[i].decay_ticker); - } else { - ticker_init(&arenas_tdata[i].decay_ticker, - DECAY_NTICKS_PER_UPDATE); - } - } - if (narenas_tdata > narenas_actual) { - memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t) - * (narenas_tdata - narenas_actual)); - } - - /* Read the refreshed tdata array. */ - tdata = &arenas_tdata[ind]; -label_return: - if (arenas_tdata_old != NULL) { - a0dalloc(arenas_tdata_old); - } - return tdata; -} - /* Slow path, called only by arena_choose(). */ arena_t * arena_choose_hard(tsd_t *tsd, bool internal) { @@ -580,8 +584,7 @@ arena_choose_hard(tsd_t *tsd, bool internal) { /* Initialize a new arena. */ choose[j] = first_null; arena = arena_init_locked(tsd_tsdn(tsd), - choose[j], - (extent_hooks_t *)&extent_hooks_default); + choose[j], &arena_config_default); if (arena == NULL) { malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock); @@ -633,20 +636,6 @@ arena_cleanup(tsd_t *tsd) { } } -void -arenas_tdata_cleanup(tsd_t *tsd) { - arena_tdata_t *arenas_tdata; - - /* Prevent tsd->arenas_tdata from being (re)created. */ - *tsd_arenas_tdata_bypassp_get(tsd) = true; - - arenas_tdata = tsd_arenas_tdata_get(tsd); - if (arenas_tdata != NULL) { - tsd_arenas_tdata_set(tsd, NULL); - a0dalloc(arenas_tdata); - } -} - static void stats_print_atexit(void) { if (config_stats) { @@ -665,11 +654,13 @@ stats_print_atexit(void) { for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { arena_t *arena = arena_get(tsdn, i, false); if (arena != NULL) { - tcache_t *tcache; + tcache_slow_t *tcache_slow; malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); - ql_foreach(tcache, &arena->tcache_ql, link) { - tcache_stats_merge(tsdn, tcache, arena); + ql_foreach(tcache_slow, &arena->tcache_ql, + link) { + tcache_stats_merge(tsdn, + tcache_slow->tcache, arena); } malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx); @@ -734,18 +725,28 @@ malloc_ncpus(void) { SYSTEM_INFO si; GetSystemInfo(&si); result = si.dwNumberOfProcessors; -#elif defined(JEMALLOC_GLIBC_MALLOC_HOOK) && defined(CPU_COUNT) +#elif defined(CPU_COUNT) /* * glibc >= 2.6 has the CPU_COUNT macro. * * glibc's sysconf() uses isspace(). glibc allocates for the first time * *before* setting up the isspace tables. Therefore we need a * different method to get the number of CPUs. + * + * The getaffinity approach is also preferred when only a subset of CPUs + * is available, to avoid using more arenas than necessary. */ { +# if defined(__FreeBSD__) || defined(__DragonFly__) + cpuset_t set; +# else cpu_set_t set; - +# endif +# if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY) + sched_getaffinity(0, sizeof(set), &set); +# else pthread_getaffinity_np(pthread_self(), sizeof(set), &set); +# endif result = CPU_COUNT(&set); } #else @@ -754,9 +755,47 @@ malloc_ncpus(void) { return ((result == -1) ? 1 : (unsigned)result); } +/* + * Ensure that number of CPUs is determistinc, i.e. it is the same based on: + * - sched_getaffinity() + * - _SC_NPROCESSORS_ONLN + * - _SC_NPROCESSORS_CONF + * Since otherwise tricky things is possible with percpu arenas in use. + */ +static bool +malloc_cpu_count_is_deterministic() +{ +#ifdef _WIN32 + return true; +#else + long cpu_onln = sysconf(_SC_NPROCESSORS_ONLN); + long cpu_conf = sysconf(_SC_NPROCESSORS_CONF); + if (cpu_onln != cpu_conf) { + return false; + } +# if defined(CPU_COUNT) +# if defined(__FreeBSD__) || defined(__DragonFly__) + cpuset_t set; +# else + cpu_set_t set; +# endif /* __FreeBSD__ */ +# if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY) + sched_getaffinity(0, sizeof(set), &set); +# else /* !JEMALLOC_HAVE_SCHED_SETAFFINITY */ + pthread_getaffinity_np(pthread_self(), sizeof(set), &set); +# endif /* JEMALLOC_HAVE_SCHED_SETAFFINITY */ + long cpu_affinity = CPU_COUNT(&set); + if (cpu_affinity != cpu_conf) { + return false; + } +# endif /* CPU_COUNT */ + return true; +#endif +} + static void -init_opt_stats_print_opts(const char *v, size_t vlen) { - size_t opts_len = strlen(opt_stats_print_opts); +init_opt_stats_opts(const char *v, size_t vlen, char *dest) { + size_t opts_len = strlen(dest); assert(opts_len <= stats_print_tot_num_options); for (size_t i = 0; i < vlen; i++) { @@ -767,16 +806,16 @@ init_opt_stats_print_opts(const char *v, size_t vlen) { default: continue; } - if (strchr(opt_stats_print_opts, v[i]) != NULL) { + if (strchr(dest, v[i]) != NULL) { /* Ignore repeated. */ continue; } - opt_stats_print_opts[opts_len++] = v[i]; - opt_stats_print_opts[opts_len] = '\0'; + dest[opts_len++] = v[i]; + dest[opts_len] = '\0'; assert(opts_len <= stats_print_tot_num_options); } - assert(opts_len == strlen(opt_stats_print_opts)); + assert(opts_len == strlen(dest)); } /* Reads the next size pair in a multi-sized option. */ @@ -858,10 +897,12 @@ malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, if (opts != *opts_p) { malloc_write(": Conf string ends " "with key\n"); + had_conf_error = true; } return true; default: malloc_write(": Malformed conf string\n"); + had_conf_error = true; return true; } } @@ -880,6 +921,7 @@ malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, if (*opts == '\0') { malloc_write(": Conf string ends " "with comma\n"); + had_conf_error = true; } *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p; accept = true; @@ -936,7 +978,7 @@ malloc_slow_flag_init(void) { } /* Number of sources for initializing malloc_conf */ -#define MALLOC_CONF_NSOURCES 4 +#define MALLOC_CONF_NSOURCES 5 static const char * obtain_malloc_conf(unsigned which_source, char buf[PATH_MAX + 1]) { @@ -1014,6 +1056,9 @@ obtain_malloc_conf(unsigned which_source, char buf[PATH_MAX + 1]) { ret = NULL; } break; + } case 4: { + ret = je_malloc_conf_2_conf_harder; + break; } default: not_reached(); ret = NULL; @@ -1030,7 +1075,9 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], "string pointed to by the global variable malloc_conf", "\"name\" of the file referenced by the symbolic link named " "/etc/malloc.conf", - "value of the environment variable MALLOC_CONF" + "value of the environment variable MALLOC_CONF", + "string pointed to by the global variable " + "malloc_conf_2_conf_harder", }; unsigned i; const char *opts, *k, *v; @@ -1098,39 +1145,50 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], #define CONF_CHECK_MIN(um, min) ((um) < (min)) #define CONF_DONT_CHECK_MAX(um, max) false #define CONF_CHECK_MAX(um, max) ((um) > (max)) -#define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \ + +#define CONF_VALUE_READ(max_t, result) \ + char *end; \ + set_errno(0); \ + result = (max_t)malloc_strtoumax(v, &end, 0); +#define CONF_VALUE_READ_FAIL() \ + (get_errno() != 0 || (uintptr_t)end - (uintptr_t)v != vlen) + +#define CONF_HANDLE_T(t, max_t, o, n, min, max, check_min, check_max, clip) \ if (CONF_MATCH(n)) { \ - uintmax_t um; \ - char *end; \ - \ - set_errno(0); \ - um = malloc_strtoumax(v, &end, 0); \ - if (get_errno() != 0 || (uintptr_t)end -\ - (uintptr_t)v != vlen) { \ + max_t mv; \ + CONF_VALUE_READ(max_t, mv) \ + if (CONF_VALUE_READ_FAIL()) { \ CONF_ERROR("Invalid conf value",\ k, klen, v, vlen); \ } else if (clip) { \ - if (check_min(um, (t)(min))) { \ + if (check_min(mv, (t)(min))) { \ o = (t)(min); \ } else if ( \ - check_max(um, (t)(max))) { \ + check_max(mv, (t)(max))) { \ o = (t)(max); \ } else { \ - o = (t)um; \ + o = (t)mv; \ } \ } else { \ - if (check_min(um, (t)(min)) || \ - check_max(um, (t)(max))) { \ + if (check_min(mv, (t)(min)) || \ + check_max(mv, (t)(max))) { \ CONF_ERROR( \ "Out-of-range " \ "conf value", \ k, klen, v, vlen); \ } else { \ - o = (t)um; \ + o = (t)mv; \ } \ } \ CONF_CONTINUE; \ } +#define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \ + CONF_HANDLE_T(t, uintmax_t, o, n, min, max, check_min, \ + check_max, clip) +#define CONF_HANDLE_T_SIGNED(t, o, n, min, max, check_min, check_max, clip)\ + CONF_HANDLE_T(t, intmax_t, o, n, min, max, check_min, \ + check_max, clip) + #define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \ clip) \ CONF_HANDLE_T_U(unsigned, o, n, min, max, \ @@ -1138,27 +1196,15 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], #define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip) \ CONF_HANDLE_T_U(size_t, o, n, min, max, \ check_min, check_max, clip) +#define CONF_HANDLE_INT64_T(o, n, min, max, check_min, check_max, clip) \ + CONF_HANDLE_T_SIGNED(int64_t, o, n, min, max, \ + check_min, check_max, clip) +#define CONF_HANDLE_UINT64_T(o, n, min, max, check_min, check_max, clip)\ + CONF_HANDLE_T_U(uint64_t, o, n, min, max, \ + check_min, check_max, clip) #define CONF_HANDLE_SSIZE_T(o, n, min, max) \ - if (CONF_MATCH(n)) { \ - long l; \ - char *end; \ - \ - set_errno(0); \ - l = strtol(v, &end, 0); \ - if (get_errno() != 0 || (uintptr_t)end -\ - (uintptr_t)v != vlen) { \ - CONF_ERROR("Invalid conf value",\ - k, klen, v, vlen); \ - } else if (l < (ssize_t)(min) || l > \ - (ssize_t)(max)) { \ - CONF_ERROR( \ - "Out-of-range conf value", \ - k, klen, v, vlen); \ - } else { \ - o = l; \ - } \ - CONF_CONTINUE; \ - } + CONF_HANDLE_T_SIGNED(ssize_t, o, n, min, max, \ + CONF_CHECK_MIN, CONF_CHECK_MAX, false) #define CONF_HANDLE_CHAR_P(o, n, d) \ if (CONF_MATCH(n)) { \ size_t cpylen = (vlen <= \ @@ -1178,13 +1224,14 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], CONF_HANDLE_BOOL(opt_abort, "abort") CONF_HANDLE_BOOL(opt_abort_conf, "abort_conf") + CONF_HANDLE_BOOL(opt_trust_madvise, "trust_madvise") if (strncmp("metadata_thp", k, klen) == 0) { - int i; + int m; bool match = false; - for (i = 0; i < metadata_thp_mode_limit; i++) { - if (strncmp(metadata_thp_mode_names[i], + for (m = 0; m < metadata_thp_mode_limit; m++) { + if (strncmp(metadata_thp_mode_names[m], v, vlen) == 0) { - opt_metadata_thp = i; + opt_metadata_thp = m; match = true; break; } @@ -1197,18 +1244,18 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], } CONF_HANDLE_BOOL(opt_retain, "retain") if (strncmp("dss", k, klen) == 0) { - int i; + int m; bool match = false; - for (i = 0; i < dss_prec_limit; i++) { - if (strncmp(dss_prec_names[i], v, vlen) + for (m = 0; m < dss_prec_limit; m++) { + if (strncmp(dss_prec_names[m], v, vlen) == 0) { - if (extent_dss_prec_set(i)) { + if (extent_dss_prec_set(m)) { CONF_ERROR( "Error setting dss", k, klen, v, vlen); } else { opt_dss = - dss_prec_names[i]; + dss_prec_names[m]; match = true; break; } @@ -1220,9 +1267,27 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], } CONF_CONTINUE; } - CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1, - UINT_MAX, CONF_CHECK_MIN, CONF_DONT_CHECK_MAX, - false) + if (CONF_MATCH("narenas")) { + if (CONF_MATCH_VALUE("default")) { + opt_narenas = 0; + CONF_CONTINUE; + } else { + CONF_HANDLE_UNSIGNED(opt_narenas, + "narenas", 1, UINT_MAX, + CONF_CHECK_MIN, CONF_DONT_CHECK_MAX, + /* clip */ false) + } + } + if (CONF_MATCH("narenas_ratio")) { + char *end; + bool err = fxp_parse(&opt_narenas_ratio, v, + &end); + if (err || (size_t)(end - v) != vlen) { + CONF_ERROR("Invalid conf value", + k, klen, v, vlen); + } + CONF_CONTINUE; + } if (CONF_MATCH("bin_shards")) { const char *bin_shards_segment_cur = v; size_t vlen_left = vlen; @@ -1245,6 +1310,9 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], } while (vlen_left > 0); CONF_CONTINUE; } + CONF_HANDLE_INT64_T(opt_mutex_max_spin, + "mutex_max_spin", -1, INT64_MAX, CONF_CHECK_MIN, + CONF_DONT_CHECK_MAX, false); CONF_HANDLE_SSIZE_T(opt_dirty_decay_ms, "dirty_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) < QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) : @@ -1255,7 +1323,16 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], SSIZE_MAX); CONF_HANDLE_BOOL(opt_stats_print, "stats_print") if (CONF_MATCH("stats_print_opts")) { - init_opt_stats_print_opts(v, vlen); + init_opt_stats_opts(v, vlen, + opt_stats_print_opts); + CONF_CONTINUE; + } + CONF_HANDLE_INT64_T(opt_stats_interval, + "stats_interval", -1, INT64_MAX, + CONF_CHECK_MIN, CONF_DONT_CHECK_MAX, false) + if (CONF_MATCH("stats_interval_opts")) { + init_opt_stats_opts(v, vlen, + opt_stats_interval_opts); CONF_CONTINUE; } if (config_fill) { @@ -1291,9 +1368,61 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], if (config_xmalloc) { CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc") } + if (config_enable_cxx) { + CONF_HANDLE_BOOL( + opt_experimental_infallible_new, + "experimental_infallible_new") + } + CONF_HANDLE_BOOL(opt_tcache, "tcache") - CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, "lg_tcache_max", - -1, (sizeof(size_t) << 3) - 1) + CONF_HANDLE_SIZE_T(opt_tcache_max, "tcache_max", + 0, TCACHE_MAXCLASS_LIMIT, CONF_DONT_CHECK_MIN, + CONF_CHECK_MAX, /* clip */ true) + if (CONF_MATCH("lg_tcache_max")) { + size_t m; + CONF_VALUE_READ(size_t, m) + if (CONF_VALUE_READ_FAIL()) { + CONF_ERROR("Invalid conf value", + k, klen, v, vlen); + } else { + /* clip if necessary */ + if (m > TCACHE_LG_MAXCLASS_LIMIT) { + m = TCACHE_LG_MAXCLASS_LIMIT; + } + opt_tcache_max = (size_t)1 << m; + } + CONF_CONTINUE; + } + /* + * Anyone trying to set a value outside -16 to 16 is + * deeply confused. + */ + CONF_HANDLE_SSIZE_T(opt_lg_tcache_nslots_mul, + "lg_tcache_nslots_mul", -16, 16) + /* Ditto with values past 2048. */ + CONF_HANDLE_UNSIGNED(opt_tcache_nslots_small_min, + "tcache_nslots_small_min", 1, 2048, + CONF_CHECK_MIN, CONF_CHECK_MAX, /* clip */ true) + CONF_HANDLE_UNSIGNED(opt_tcache_nslots_small_max, + "tcache_nslots_small_max", 1, 2048, + CONF_CHECK_MIN, CONF_CHECK_MAX, /* clip */ true) + CONF_HANDLE_UNSIGNED(opt_tcache_nslots_large, + "tcache_nslots_large", 1, 2048, + CONF_CHECK_MIN, CONF_CHECK_MAX, /* clip */ true) + CONF_HANDLE_SIZE_T(opt_tcache_gc_incr_bytes, + "tcache_gc_incr_bytes", 1024, SIZE_T_MAX, + CONF_CHECK_MIN, CONF_DONT_CHECK_MAX, + /* clip */ true) + CONF_HANDLE_SIZE_T(opt_tcache_gc_delay_bytes, + "tcache_gc_delay_bytes", 0, SIZE_T_MAX, + CONF_DONT_CHECK_MIN, CONF_DONT_CHECK_MAX, + /* clip */ false) + CONF_HANDLE_UNSIGNED(opt_lg_tcache_flush_small_div, + "lg_tcache_flush_small_div", 1, 16, + CONF_CHECK_MIN, CONF_CHECK_MAX, /* clip */ true) + CONF_HANDLE_UNSIGNED(opt_lg_tcache_flush_large_div, + "lg_tcache_flush_large_div", 1, 16, + CONF_CHECK_MIN, CONF_CHECK_MAX, /* clip */ true) /* * The runtime option of oversize_threshold remains @@ -1313,16 +1442,16 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], if (strncmp("percpu_arena", k, klen) == 0) { bool match = false; - for (int i = percpu_arena_mode_names_base; i < - percpu_arena_mode_names_limit; i++) { - if (strncmp(percpu_arena_mode_names[i], + for (int m = percpu_arena_mode_names_base; m < + percpu_arena_mode_names_limit; m++) { + if (strncmp(percpu_arena_mode_names[m], v, vlen) == 0) { if (!have_percpu_arena) { CONF_ERROR( "No getcpu support", k, klen, v, vlen); } - opt_percpu_arena = i; + opt_percpu_arena = m; match = true; break; } @@ -1340,7 +1469,83 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], opt_max_background_threads, CONF_CHECK_MIN, CONF_CHECK_MAX, true); + CONF_HANDLE_BOOL(opt_hpa, "hpa") + CONF_HANDLE_SIZE_T(opt_hpa_opts.slab_max_alloc, + "hpa_slab_max_alloc", PAGE, HUGEPAGE, + CONF_CHECK_MIN, CONF_CHECK_MAX, true); + + /* + * Accept either a ratio-based or an exact hugification + * threshold. + */ + CONF_HANDLE_SIZE_T(opt_hpa_opts.hugification_threshold, + "hpa_hugification_threshold", PAGE, HUGEPAGE, + CONF_CHECK_MIN, CONF_CHECK_MAX, true); + if (CONF_MATCH("hpa_hugification_threshold_ratio")) { + fxp_t ratio; + char *end; + bool err = fxp_parse(&ratio, v, + &end); + if (err || (size_t)(end - v) != vlen + || ratio > FXP_INIT_INT(1)) { + CONF_ERROR("Invalid conf value", + k, klen, v, vlen); + } else { + opt_hpa_opts.hugification_threshold = + fxp_mul_frac(HUGEPAGE, ratio); + } + CONF_CONTINUE; + } + + CONF_HANDLE_UINT64_T( + opt_hpa_opts.hugify_delay_ms, "hpa_hugify_delay_ms", + 0, 0, CONF_DONT_CHECK_MIN, CONF_DONT_CHECK_MAX, + false); + + CONF_HANDLE_UINT64_T( + opt_hpa_opts.min_purge_interval_ms, + "hpa_min_purge_interval_ms", 0, 0, + CONF_DONT_CHECK_MIN, CONF_DONT_CHECK_MAX, false); + + if (CONF_MATCH("hpa_dirty_mult")) { + if (CONF_MATCH_VALUE("-1")) { + opt_hpa_opts.dirty_mult = (fxp_t)-1; + CONF_CONTINUE; + } + fxp_t ratio; + char *end; + bool err = fxp_parse(&ratio, v, + &end); + if (err || (size_t)(end - v) != vlen) { + CONF_ERROR("Invalid conf value", + k, klen, v, vlen); + } else { + opt_hpa_opts.dirty_mult = ratio; + } + CONF_CONTINUE; + } + + CONF_HANDLE_SIZE_T(opt_hpa_sec_opts.nshards, + "hpa_sec_nshards", 0, 0, CONF_CHECK_MIN, + CONF_DONT_CHECK_MAX, true); + CONF_HANDLE_SIZE_T(opt_hpa_sec_opts.max_alloc, + "hpa_sec_max_alloc", PAGE, 0, CONF_CHECK_MIN, + CONF_DONT_CHECK_MAX, true); + CONF_HANDLE_SIZE_T(opt_hpa_sec_opts.max_bytes, + "hpa_sec_max_bytes", PAGE, 0, CONF_CHECK_MIN, + CONF_DONT_CHECK_MAX, true); + CONF_HANDLE_SIZE_T(opt_hpa_sec_opts.bytes_after_flush, + "hpa_sec_bytes_after_flush", PAGE, 0, + CONF_CHECK_MIN, CONF_DONT_CHECK_MAX, true); + CONF_HANDLE_SIZE_T(opt_hpa_sec_opts.batch_fill_extra, + "hpa_sec_batch_fill_extra", 0, HUGEPAGE_PAGES, + CONF_CHECK_MIN, CONF_CHECK_MAX, true); + if (CONF_MATCH("slab_sizes")) { + if (CONF_MATCH_VALUE("default")) { + sc_data_init(sc_data); + CONF_CONTINUE; + } bool err; const char *slab_size_segment_cur = v; size_t vlen_left = vlen; @@ -1382,7 +1587,44 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump") CONF_HANDLE_BOOL(opt_prof_final, "prof_final") CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak") + CONF_HANDLE_BOOL(opt_prof_leak_error, + "prof_leak_error") CONF_HANDLE_BOOL(opt_prof_log, "prof_log") + CONF_HANDLE_SSIZE_T(opt_prof_recent_alloc_max, + "prof_recent_alloc_max", -1, SSIZE_MAX) + CONF_HANDLE_BOOL(opt_prof_stats, "prof_stats") + CONF_HANDLE_BOOL(opt_prof_sys_thread_name, + "prof_sys_thread_name") + if (CONF_MATCH("prof_time_resolution")) { + if (CONF_MATCH_VALUE("default")) { + opt_prof_time_res = + prof_time_res_default; + } else if (CONF_MATCH_VALUE("high")) { + if (!config_high_res_timer) { + CONF_ERROR( + "No high resolution" + " timer support", + k, klen, v, vlen); + } else { + opt_prof_time_res = + prof_time_res_high; + } + } else { + CONF_ERROR("Invalid conf value", + k, klen, v, vlen); + } + CONF_CONTINUE; + } + /* + * Undocumented. When set to false, don't + * correct for an unbiasing bug in jeprof + * attribution. This can be handy if you want + * to get consistent numbers from your binary + * across different jemalloc versions, even if + * those numbers are incorrect. The default is + * true. + */ + CONF_HANDLE_BOOL(opt_prof_unbias, "prof_unbias") } if (config_log) { if (CONF_MATCH("log")) { @@ -1396,15 +1638,15 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], } if (CONF_MATCH("thp")) { bool match = false; - for (int i = 0; i < thp_mode_names_limit; i++) { - if (strncmp(thp_mode_names[i],v, vlen) + for (int m = 0; m < thp_mode_names_limit; m++) { + if (strncmp(thp_mode_names[m],v, vlen) == 0) { - if (!have_madvise_huge) { + if (!have_madvise_huge && !have_memcntl) { CONF_ERROR( "No THP support", k, klen, v, vlen); } - opt_thp = i; + opt_thp = m; match = true; break; } @@ -1415,6 +1657,55 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], } CONF_CONTINUE; } + if (CONF_MATCH("zero_realloc")) { + if (CONF_MATCH_VALUE("alloc")) { + opt_zero_realloc_action + = zero_realloc_action_alloc; + } else if (CONF_MATCH_VALUE("free")) { + opt_zero_realloc_action + = zero_realloc_action_free; + } else if (CONF_MATCH_VALUE("abort")) { + opt_zero_realloc_action + = zero_realloc_action_abort; + } else { + CONF_ERROR("Invalid conf value", + k, klen, v, vlen); + } + CONF_CONTINUE; + } + if (config_uaf_detection && + CONF_MATCH("lg_san_uaf_align")) { + ssize_t a; + CONF_VALUE_READ(ssize_t, a) + if (CONF_VALUE_READ_FAIL() || a < -1) { + CONF_ERROR("Invalid conf value", + k, klen, v, vlen); + } + if (a == -1) { + opt_lg_san_uaf_align = -1; + CONF_CONTINUE; + } + + /* clip if necessary */ + ssize_t max_allowed = (sizeof(size_t) << 3) - 1; + ssize_t min_allowed = LG_PAGE; + if (a > max_allowed) { + a = max_allowed; + } else if (a < min_allowed) { + a = min_allowed; + } + + opt_lg_san_uaf_align = a; + CONF_CONTINUE; + } + + CONF_HANDLE_SIZE_T(opt_san_guard_small, + "san_guard_small", 0, SIZE_T_MAX, + CONF_DONT_CHECK_MIN, CONF_DONT_CHECK_MAX, false) + CONF_HANDLE_SIZE_T(opt_san_guard_large, + "san_guard_large", 0, SIZE_T_MAX, + CONF_DONT_CHECK_MIN, CONF_DONT_CHECK_MAX, false) + CONF_ERROR("Invalid conf pair", k, klen, v, vlen); #undef CONF_ERROR #undef CONF_CONTINUE @@ -1425,7 +1716,9 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], #undef CONF_CHECK_MIN #undef CONF_DONT_CHECK_MAX #undef CONF_CHECK_MAX +#undef CONF_HANDLE_T #undef CONF_HANDLE_T_U +#undef CONF_HANDLE_T_SIGNED #undef CONF_HANDLE_UNSIGNED #undef CONF_HANDLE_SIZE_T #undef CONF_HANDLE_SSIZE_T @@ -1440,15 +1733,33 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], atomic_store_b(&log_init_done, true, ATOMIC_RELEASE); } +static bool +malloc_conf_init_check_deps(void) { + if (opt_prof_leak_error && !opt_prof_final) { + malloc_printf(": prof_leak_error is set w/o " + "prof_final.\n"); + return true; + } + + return false; +} + static void malloc_conf_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) { - const char *opts_cache[MALLOC_CONF_NSOURCES] = {NULL, NULL, NULL, NULL}; + const char *opts_cache[MALLOC_CONF_NSOURCES] = {NULL, NULL, NULL, NULL, + NULL}; char buf[PATH_MAX + 1]; /* The first call only set the confirm_conf option and opts_cache */ malloc_conf_init_helper(NULL, NULL, true, opts_cache, buf); malloc_conf_init_helper(sc_data, bin_shard_sizes, false, opts_cache, NULL); + if (malloc_conf_init_check_deps()) { + /* check_deps does warning msg only; abort below if needed. */ + if (opt_abort_conf) { + malloc_abort_invalid_conf(); + } + } } #undef MALLOC_CONF_NSOURCES @@ -1492,8 +1803,8 @@ malloc_init_hard_a0_locked() { * Ordering here is somewhat tricky; we need sc_boot() first, since that * determines what the size classes will be, and then * malloc_conf_init(), since any slab size tweaking will need to be done - * before sz_boot and bin_boot, which assume that the values they read - * out of sc_data_global are final. + * before sz_boot and bin_info_boot, which assume that the values they + * read out of sc_data_global are final. */ sc_boot(&sc_data); unsigned bin_shard_sizes[SC_NBINS]; @@ -1507,8 +1818,9 @@ malloc_init_hard_a0_locked() { prof_boot0(); } malloc_conf_init(&sc_data, bin_shard_sizes); - sz_boot(&sc_data); - bin_boot(&sc_data, bin_shard_sizes); + san_init(opt_lg_san_uaf_align); + sz_boot(&sc_data, opt_cache_oblivious); + bin_info_boot(&sc_data, bin_shard_sizes); if (opt_stats_print) { /* Print statistics at exit. */ @@ -1519,12 +1831,20 @@ malloc_init_hard_a0_locked() { } } } + + if (stats_boot()) { + return true; + } if (pages_boot()) { return true; } if (base_boot(TSDN_NULL)) { return true; } + /* emap_global is static, hence zeroed. */ + if (emap_init(&arena_emap_global, b0get(), /* zeroed */ true)) { + return true; + } if (extent_boot()) { return true; } @@ -1534,8 +1854,20 @@ malloc_init_hard_a0_locked() { if (config_prof) { prof_boot1(); } - arena_boot(&sc_data); - if (tcache_boot(TSDN_NULL)) { + if (opt_hpa && !hpa_supported()) { + malloc_printf(": HPA not supported in the current " + "configuration; %s.", + opt_abort_conf ? "aborting" : "disabling"); + if (opt_abort_conf) { + malloc_abort_invalid_conf(); + } else { + opt_hpa = false; + } + } + if (arena_boot(&sc_data, b0get(), opt_hpa)) { + return true; + } + if (tcache_boot(TSDN_NULL, b0get())) { return true; } if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS, @@ -1554,11 +1886,29 @@ malloc_init_hard_a0_locked() { * Initialize one arena here. The rest are lazily created in * arena_choose_hard(). */ - if (arena_init(TSDN_NULL, 0, (extent_hooks_t *)&extent_hooks_default) - == NULL) { + if (arena_init(TSDN_NULL, 0, &arena_config_default) == NULL) { return true; } a0 = arena_get(TSDN_NULL, 0, false); + + if (opt_hpa && !hpa_supported()) { + malloc_printf(": HPA not supported in the current " + "configuration; %s.", + opt_abort_conf ? "aborting" : "disabling"); + if (opt_abort_conf) { + malloc_abort_invalid_conf(); + } else { + opt_hpa = false; + } + } else if (opt_hpa) { + hpa_shard_opts_t hpa_shard_opts = opt_hpa_opts; + hpa_shard_opts.deferral_allowed = background_thread_enabled(); + if (pa_shard_enable_hpa(TSDN_NULL, &a0->pa_shard, + &hpa_shard_opts, &opt_hpa_sec_opts)) { + return true; + } + } + malloc_init_state = malloc_init_a0_initialized; return false; @@ -1580,6 +1930,29 @@ malloc_init_hard_recursible(void) { malloc_init_state = malloc_init_recursible; ncpus = malloc_ncpus(); + if (opt_percpu_arena != percpu_arena_disabled) { + bool cpu_count_is_deterministic = + malloc_cpu_count_is_deterministic(); + if (!cpu_count_is_deterministic) { + /* + * If # of CPU is not deterministic, and narenas not + * specified, disables per cpu arena since it may not + * detect CPU IDs properly. + */ + if (opt_narenas == 0) { + opt_percpu_arena = percpu_arena_disabled; + malloc_write(": Number of CPUs " + "detected is not deterministic. Per-CPU " + "arena disabled.\n"); + if (opt_abort_conf) { + malloc_abort_invalid_conf(); + } + if (opt_abort) { + abort(); + } + } + } + } #if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \ && !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \ @@ -1610,7 +1983,13 @@ malloc_narenas_default(void) { * default. */ if (ncpus > 1) { - return ncpus << 2; + fxp_t fxp_ncpus = FXP_INIT_INT(ncpus); + fxp_t goal = fxp_mul(fxp_ncpus, opt_narenas_ratio); + uint32_t int_goal = fxp_round_nearest(goal); + if (int_goal == 0) { + return 1; + } + return int_goal; } else { return 1; } @@ -1769,10 +2148,11 @@ malloc_init_hard(void) { /* Set reentrancy level to 1 during init. */ pre_reentrancy(tsd, NULL); /* Initialize narenas before prof_boot2 (for allocation). */ - if (malloc_init_narenas() || background_thread_boot1(tsd_tsdn(tsd))) { + if (malloc_init_narenas() + || background_thread_boot1(tsd_tsdn(tsd), b0get())) { UNLOCK_RETURN(tsd_tsdn(tsd), true, true) } - if (config_prof && prof_boot2(tsd)) { + if (config_prof && prof_boot2(tsd, b0get())) { UNLOCK_RETURN(tsd_tsdn(tsd), true, true) } @@ -1911,38 +2291,107 @@ dynamic_opts_init(dynamic_opts_t *dynamic_opts) { dynamic_opts->arena_ind = ARENA_IND_AUTOMATIC; } -/* ind is ignored if dopts->alignment > 0. */ -JEMALLOC_ALWAYS_INLINE void * -imalloc_no_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd, - size_t size, size_t usize, szind_t ind) { - tcache_t *tcache; - arena_t *arena; +/* + * ind parameter is optional and is only checked and filled if alignment == 0; + * return true if result is out of range. + */ +JEMALLOC_ALWAYS_INLINE bool +aligned_usize_get(size_t size, size_t alignment, size_t *usize, szind_t *ind, + bool bump_empty_aligned_alloc) { + assert(usize != NULL); + if (alignment == 0) { + if (ind != NULL) { + *ind = sz_size2index(size); + if (unlikely(*ind >= SC_NSIZES)) { + return true; + } + *usize = sz_index2size(*ind); + assert(*usize > 0 && *usize <= SC_LARGE_MAXCLASS); + return false; + } + *usize = sz_s2u(size); + } else { + if (bump_empty_aligned_alloc && unlikely(size == 0)) { + size = 1; + } + *usize = sz_sa2u(size, alignment); + } + if (unlikely(*usize == 0 || *usize > SC_LARGE_MAXCLASS)) { + return true; + } + return false; +} - /* Fill in the tcache. */ - if (dopts->tcache_ind == TCACHE_IND_AUTOMATIC) { - if (likely(!sopts->slow)) { +JEMALLOC_ALWAYS_INLINE bool +zero_get(bool guarantee, bool slow) { + if (config_fill && slow && unlikely(opt_zero)) { + return true; + } else { + return guarantee; + } +} + +JEMALLOC_ALWAYS_INLINE tcache_t * +tcache_get_from_ind(tsd_t *tsd, unsigned tcache_ind, bool slow, bool is_alloc) { + tcache_t *tcache; + if (tcache_ind == TCACHE_IND_AUTOMATIC) { + if (likely(!slow)) { /* Getting tcache ptr unconditionally. */ tcache = tsd_tcachep_get(tsd); assert(tcache == tcache_get(tsd)); - } else { + } else if (is_alloc || + likely(tsd_reentrancy_level_get(tsd) == 0)) { tcache = tcache_get(tsd); + } else { + tcache = NULL; } - } else if (dopts->tcache_ind == TCACHE_IND_NONE) { - tcache = NULL; } else { - tcache = tcaches_get(tsd, dopts->tcache_ind); + /* + * Should not specify tcache on deallocation path when being + * reentrant. + */ + assert(is_alloc || tsd_reentrancy_level_get(tsd) == 0 || + tsd_state_nocleanup(tsd)); + if (tcache_ind == TCACHE_IND_NONE) { + tcache = NULL; + } else { + tcache = tcaches_get(tsd, tcache_ind); + } } + return tcache; +} - /* Fill in the arena. */ - if (dopts->arena_ind == ARENA_IND_AUTOMATIC) { +/* Return true if a manual arena is specified and arena_get() OOMs. */ +JEMALLOC_ALWAYS_INLINE bool +arena_get_from_ind(tsd_t *tsd, unsigned arena_ind, arena_t **arena_p) { + if (arena_ind == ARENA_IND_AUTOMATIC) { /* * In case of automatic arena management, we defer arena * computation until as late as we can, hoping to fill the * allocation out of the tcache. */ - arena = NULL; + *arena_p = NULL; } else { - arena = arena_get(tsd_tsdn(tsd), dopts->arena_ind, true); + *arena_p = arena_get(tsd_tsdn(tsd), arena_ind, true); + if (unlikely(*arena_p == NULL) && arena_ind >= narenas_auto) { + return true; + } + } + return false; +} + +/* ind is ignored if dopts->alignment > 0. */ +JEMALLOC_ALWAYS_INLINE void * +imalloc_no_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd, + size_t size, size_t usize, szind_t ind) { + /* Fill in the tcache. */ + tcache_t *tcache = tcache_get_from_ind(tsd, dopts->tcache_ind, + sopts->slow, /* is_alloc */ true); + + /* Fill in the arena. */ + arena_t *arena; + if (arena_get_from_ind(tsd, dopts->arena_ind, &arena)) { + return NULL; } if (unlikely(dopts->alignment != 0)) { @@ -1966,6 +2415,7 @@ imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd, szind_t ind_large; size_t bumped_usize = usize; + dopts->alignment = prof_sample_align(dopts->alignment); if (usize <= SC_SMALL_MAXCLASS) { assert(((dopts->alignment == 0) ? sz_s2u(SC_LARGE_MINCLASS) : @@ -1982,6 +2432,7 @@ imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd, } else { ret = imalloc_no_sample(sopts, dopts, tsd, usize, usize, ind); } + assert(prof_sample_aligned(ret)); return ret; } @@ -2035,16 +2486,14 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) { /* Filled in by compute_size_with_overflow below. */ size_t size = 0; /* - * For unaligned allocations, we need only ind. For aligned - * allocations, or in case of stats or profiling we need usize. - * - * These are actually dead stores, in that their values are reset before - * any branch on their value is taken. Sometimes though, it's - * convenient to pass them as arguments before this point. To avoid - * undefined behavior then, we initialize them with dummy stores. + * The zero initialization for ind is actually dead store, in that its + * value is reset before any branch on its value is taken. Sometimes + * though, it's convenient to pass it as arguments before this point. + * To avoid undefined behavior then, we initialize it with dummy stores. */ szind_t ind = 0; - size_t usize = 0; + /* usize will always be properly initialized. */ + size_t usize; /* Reentrancy is only checked on slow path. */ int8_t reentrancy_level; @@ -2061,31 +2510,12 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) { } /* This is the beginning of the "core" algorithm. */ - - if (dopts->alignment == 0) { - ind = sz_size2index(size); - if (unlikely(ind >= SC_NSIZES)) { - goto label_oom; - } - if (config_stats || (config_prof && opt_prof) || sopts->usize) { - usize = sz_index2size(ind); - dopts->usize = usize; - assert(usize > 0 && usize - <= SC_LARGE_MAXCLASS); - } - } else { - if (sopts->bump_empty_aligned_alloc) { - if (unlikely(size == 0)) { - size = 1; - } - } - usize = sz_sa2u(size, dopts->alignment); - dopts->usize = usize; - if (unlikely(usize == 0 - || usize > SC_LARGE_MAXCLASS)) { - goto label_oom; - } + dopts->zero = zero_get(dopts->zero, sopts->slow); + if (aligned_usize_get(size, dopts->alignment, &usize, &ind, + sopts->bump_empty_aligned_alloc)) { + goto label_oom; } + dopts->usize = usize; /* Validate the user input. */ if (sopts->assert_nonempty_alloc) { assert (size != 0); @@ -2111,26 +2541,25 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) { dopts->arena_ind = 0; } + /* + * If dopts->alignment > 0, then ind is still 0, but usize was computed + * in the previous if statement. Down the positive alignment path, + * imalloc_no_sample and imalloc_sample will ignore ind. + */ + /* If profiling is on, get our profiling context. */ if (config_prof && opt_prof) { - /* - * Note that if we're going down this path, usize must have been - * initialized in the previous if statement. - */ - prof_tctx_t *tctx = prof_alloc_prep( - tsd, usize, prof_active_get_unlocked(), true); + bool prof_active = prof_active_get_unlocked(); + bool sample_event = te_prof_sample_event_lookahead(tsd, usize); + prof_tctx_t *tctx = prof_alloc_prep(tsd, prof_active, + sample_event); - alloc_ctx_t alloc_ctx; + emap_alloc_ctx_t alloc_ctx; if (likely((uintptr_t)tctx == (uintptr_t)1U)) { - alloc_ctx.slab = (usize - <= SC_SMALL_MAXCLASS); + alloc_ctx.slab = (usize <= SC_SMALL_MAXCLASS); allocation = imalloc_no_sample( sopts, dopts, tsd, usize, usize, ind); } else if ((uintptr_t)tctx > (uintptr_t)1U) { - /* - * Note that ind might still be 0 here. This is fine; - * imalloc_sample ignores ind if dopts->alignment > 0. - */ allocation = imalloc_sample( sopts, dopts, tsd, usize, ind); alloc_ctx.slab = false; @@ -2139,17 +2568,12 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) { } if (unlikely(allocation == NULL)) { - prof_alloc_rollback(tsd, tctx, true); + prof_alloc_rollback(tsd, tctx); goto label_oom; } - prof_malloc(tsd_tsdn(tsd), allocation, usize, &alloc_ctx, tctx); + prof_malloc(tsd, allocation, size, usize, &alloc_ctx, tctx); } else { - /* - * If dopts->alignment > 0, then ind is still 0, but usize was - * computed in the previous if statement. Down the positive - * alignment path, imalloc_no_sample ignores ind and size - * (relying only on usize). - */ + assert(!opt_prof); allocation = imalloc_no_sample(sopts, dopts, tsd, size, usize, ind); if (unlikely(allocation == NULL)) { @@ -2161,12 +2585,17 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) { * Allocation has been done at this point. We still have some * post-allocation work to do though. */ + + thread_alloc_event(tsd, usize); + assert(dopts->alignment == 0 || ((uintptr_t)allocation & (dopts->alignment - 1)) == ZU(0)); - if (config_stats) { - assert(usize == isalloc(tsd_tsdn(tsd), allocation)); - *tsd_thread_allocatedp_get(tsd) += usize; + assert(usize == isalloc(tsd_tsdn(tsd), allocation)); + + if (config_fill && sopts->slow && !dopts->zero + && unlikely(opt_junk_alloc)) { + junk_alloc_callback(allocation, usize); } if (sopts->slow) { @@ -2277,7 +2706,11 @@ malloc_default(size_t size) { static_opts_t sopts; dynamic_opts_t dopts; - LOG("core.malloc.entry", "size: %zu", size); + /* + * This variant has logging hook on exit but not on entry. It's callled + * only by je_malloc, below, which emits the entry one for us (and, if + * it calls us, does so only via tail call). + */ static_opts_init(&sopts); dynamic_opts_init(&dopts); @@ -2310,86 +2743,11 @@ malloc_default(size_t size) { * Begin malloc(3)-compatible functions. */ -/* - * malloc() fastpath. - * - * Fastpath assumes size <= SC_LOOKUP_MAXCLASS, and that we hit - * tcache. If either of these is false, we tail-call to the slowpath, - * malloc_default(). Tail-calling is used to avoid any caller-saved - * registers. - * - * fastpath supports ticker and profiling, both of which will also - * tail-call to the slowpath if they fire. - */ JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) je_malloc(size_t size) { - LOG("core.malloc.entry", "size: %zu", size); - - if (tsd_get_allocates() && unlikely(!malloc_initialized())) { - return malloc_default(size); - } - - tsd_t *tsd = tsd_get(false); - if (unlikely(!tsd || !tsd_fast(tsd) || (size > SC_LOOKUP_MAXCLASS))) { - return malloc_default(size); - } - - tcache_t *tcache = tsd_tcachep_get(tsd); - - if (unlikely(ticker_trytick(&tcache->gc_ticker))) { - return malloc_default(size); - } - - szind_t ind = sz_size2index_lookup(size); - size_t usize; - if (config_stats || config_prof) { - usize = sz_index2size(ind); - } - /* Fast path relies on size being a bin. I.e. SC_LOOKUP_MAXCLASS < SC_SMALL_MAXCLASS */ - assert(ind < SC_NBINS); - assert(size <= SC_SMALL_MAXCLASS); - - if (config_prof) { - int64_t bytes_until_sample = tsd_bytes_until_sample_get(tsd); - bytes_until_sample -= usize; - tsd_bytes_until_sample_set(tsd, bytes_until_sample); - - if (unlikely(bytes_until_sample < 0)) { - /* - * Avoid a prof_active check on the fastpath. - * If prof_active is false, set bytes_until_sample to - * a large value. If prof_active is set to true, - * bytes_until_sample will be reset. - */ - if (!prof_active) { - tsd_bytes_until_sample_set(tsd, SSIZE_MAX); - } - return malloc_default(size); - } - } - - cache_bin_t *bin = tcache_small_bin_get(tcache, ind); - bool tcache_success; - void* ret = cache_bin_alloc_easy(bin, &tcache_success); - - if (tcache_success) { - if (config_stats) { - *tsd_thread_allocatedp_get(tsd) += usize; - bin->tstats.nrequests++; - } - if (config_prof) { - tcache->prof_accumbytes += usize; - } - - LOG("core.malloc.exit", "result: %p", ret); - - /* Fastpath success */ - return ret; - } - - return malloc_default(size); + return imalloc_fastpath(size, &malloc_default); } JEMALLOC_EXPORT int JEMALLOC_NOTHROW @@ -2506,56 +2864,6 @@ je_calloc(size_t num, size_t size) { return ret; } -static void * -irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, - prof_tctx_t *tctx, hook_ralloc_args_t *hook_args) { - void *p; - - if (tctx == NULL) { - return NULL; - } - if (usize <= SC_SMALL_MAXCLASS) { - p = iralloc(tsd, old_ptr, old_usize, - SC_LARGE_MINCLASS, 0, false, hook_args); - if (p == NULL) { - return NULL; - } - arena_prof_promote(tsd_tsdn(tsd), p, usize); - } else { - p = iralloc(tsd, old_ptr, old_usize, usize, 0, false, - hook_args); - } - - return p; -} - -JEMALLOC_ALWAYS_INLINE void * -irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, - alloc_ctx_t *alloc_ctx, hook_ralloc_args_t *hook_args) { - void *p; - bool prof_active; - prof_tctx_t *old_tctx, *tctx; - - prof_active = prof_active_get_unlocked(); - old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx); - tctx = prof_alloc_prep(tsd, usize, prof_active, true); - if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { - p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx, - hook_args); - } else { - p = iralloc(tsd, old_ptr, old_usize, usize, 0, false, - hook_args); - } - if (unlikely(p == NULL)) { - prof_alloc_rollback(tsd, tctx, true); - return NULL; - } - prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize, - old_tctx); - - return p; -} - JEMALLOC_ALWAYS_INLINE void ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) { if (!slow_path) { @@ -2569,30 +2877,50 @@ ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) { assert(ptr != NULL); assert(malloc_initialized() || IS_INITIALIZER); - alloc_ctx_t alloc_ctx; - rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); - rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, - (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); + emap_alloc_ctx_t alloc_ctx; + emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr, + &alloc_ctx); assert(alloc_ctx.szind != SC_NSIZES); - size_t usize; + size_t usize = sz_index2size(alloc_ctx.szind); if (config_prof && opt_prof) { - usize = sz_index2size(alloc_ctx.szind); prof_free(tsd, ptr, usize, &alloc_ctx); - } else if (config_stats) { - usize = sz_index2size(alloc_ctx.szind); - } - if (config_stats) { - *tsd_thread_deallocatedp_get(tsd) += usize; } if (likely(!slow_path)) { idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false, false); } else { + if (config_fill && slow_path && opt_junk_free) { + junk_free_callback(ptr, usize); + } idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false, true); } + thread_dalloc_event(tsd, usize); +} + +JEMALLOC_ALWAYS_INLINE bool +maybe_check_alloc_ctx(tsd_t *tsd, void *ptr, emap_alloc_ctx_t *alloc_ctx) { + if (config_opt_size_checks) { + emap_alloc_ctx_t dbg_ctx; + emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr, + &dbg_ctx); + if (alloc_ctx->szind != dbg_ctx.szind) { + safety_check_fail_sized_dealloc( + /* current_dealloc */ true, ptr, + /* true_size */ sz_size2index(dbg_ctx.szind), + /* input_size */ sz_size2index(alloc_ctx->szind)); + return true; + } + if (alloc_ctx->slab != dbg_ctx.slab) { + safety_check_fail( + "Internal heap corruption detected: " + "mismatch in slab bit"); + return true; + } + } + return false; } JEMALLOC_ALWAYS_INLINE void @@ -2608,147 +2936,63 @@ isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) { assert(ptr != NULL); assert(malloc_initialized() || IS_INITIALIZER); - alloc_ctx_t alloc_ctx, *ctx; - if (!config_cache_oblivious && ((uintptr_t)ptr & PAGE_MASK) != 0) { - /* - * When cache_oblivious is disabled and ptr is not page aligned, - * the allocation was not sampled -- usize can be used to - * determine szind directly. - */ + emap_alloc_ctx_t alloc_ctx; + if (!config_prof) { alloc_ctx.szind = sz_size2index(usize); - alloc_ctx.slab = true; - ctx = &alloc_ctx; - if (config_debug) { - alloc_ctx_t dbg_ctx; - rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); - rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, - rtree_ctx, (uintptr_t)ptr, true, &dbg_ctx.szind, - &dbg_ctx.slab); - assert(dbg_ctx.szind == alloc_ctx.szind); - assert(dbg_ctx.slab == alloc_ctx.slab); - } - } else if (config_prof && opt_prof) { - rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); - rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, - (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); - assert(alloc_ctx.szind == sz_size2index(usize)); - ctx = &alloc_ctx; + alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS); } else { - ctx = NULL; + if (likely(!prof_sample_aligned(ptr))) { + /* + * When the ptr is not page aligned, it was not sampled. + * usize can be trusted to determine szind and slab. + */ + alloc_ctx.szind = sz_size2index(usize); + alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS); + } else if (opt_prof) { + emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, + ptr, &alloc_ctx); + + if (config_opt_safety_checks) { + /* Small alloc may have !slab (sampled). */ + if (unlikely(alloc_ctx.szind != + sz_size2index(usize))) { + safety_check_fail_sized_dealloc( + /* current_dealloc */ true, ptr, + /* true_size */ sz_index2size( + alloc_ctx.szind), + /* input_size */ usize); + } + } + } else { + alloc_ctx.szind = sz_size2index(usize); + alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS); + } + } + bool fail = maybe_check_alloc_ctx(tsd, ptr, &alloc_ctx); + if (fail) { + /* + * This is a heap corruption bug. In real life we'll crash; for + * the unit test we just want to avoid breaking anything too + * badly to get a test result out. Let's leak instead of trying + * to free. + */ + return; } if (config_prof && opt_prof) { - prof_free(tsd, ptr, usize, ctx); - } - if (config_stats) { - *tsd_thread_deallocatedp_get(tsd) += usize; + prof_free(tsd, ptr, usize, &alloc_ctx); } - if (likely(!slow_path)) { - isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, false); - } else { - isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, true); - } -} - -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN -void JEMALLOC_NOTHROW * -JEMALLOC_ALLOC_SIZE(2) -je_realloc(void *ptr, size_t arg_size) { - void *ret; - tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL); - size_t usize JEMALLOC_CC_SILENCE_INIT(0); - size_t old_usize = 0; - size_t size = arg_size; - - LOG("core.realloc.entry", "ptr: %p, size: %zu\n", ptr, size); - - if (unlikely(size == 0)) { - size = 1; - } - - if (likely(ptr != NULL)) { - assert(malloc_initialized() || IS_INITIALIZER); - tsd_t *tsd = tsd_fetch(); - - check_entry_exit_locking(tsd_tsdn(tsd)); - - - hook_ralloc_args_t hook_args = {true, {(uintptr_t)ptr, - (uintptr_t)arg_size, 0, 0}}; - - alloc_ctx_t alloc_ctx; - rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); - rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, - (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); - assert(alloc_ctx.szind != SC_NSIZES); - old_usize = sz_index2size(alloc_ctx.szind); - assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); - if (config_prof && opt_prof) { - usize = sz_s2u(size); - if (unlikely(usize == 0 - || usize > SC_LARGE_MAXCLASS)) { - ret = NULL; - } else { - ret = irealloc_prof(tsd, ptr, old_usize, usize, - &alloc_ctx, &hook_args); - } - } else { - if (config_stats) { - usize = sz_s2u(size); - } - ret = iralloc(tsd, ptr, old_usize, size, 0, false, - &hook_args); - } - tsdn = tsd_tsdn(tsd); + isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, &alloc_ctx, + false); } else { - /* realloc(NULL, size) is equivalent to malloc(size). */ - static_opts_t sopts; - dynamic_opts_t dopts; - - static_opts_init(&sopts); - dynamic_opts_init(&dopts); - - sopts.null_out_result_on_error = true; - sopts.set_errno_on_error = true; - sopts.oom_string = - ": Error in realloc(): out of memory\n"; - - dopts.result = &ret; - dopts.num_items = 1; - dopts.item_size = size; - - imalloc(&sopts, &dopts); - if (sopts.slow) { - uintptr_t args[3] = {(uintptr_t)ptr, arg_size}; - hook_invoke_alloc(hook_alloc_realloc, ret, - (uintptr_t)ret, args); - } - - return ret; - } - - if (unlikely(ret == NULL)) { - if (config_xmalloc && unlikely(opt_xmalloc)) { - malloc_write(": Error in realloc(): " - "out of memory\n"); - abort(); + if (config_fill && slow_path && opt_junk_free) { + junk_free_callback(ptr, usize); } - set_errno(ENOMEM); - } - if (config_stats && likely(ret != NULL)) { - tsd_t *tsd; - - assert(usize == isalloc(tsdn, ret)); - tsd = tsdn_tsd(tsdn); - *tsd_thread_allocatedp_get(tsd) += usize; - *tsd_thread_deallocatedp_get(tsd) += old_usize; + isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, &alloc_ctx, + true); } - UTRACE(ptr, size, ret); - check_entry_exit_locking(tsdn); - - LOG("core.realloc.exit", "result: %p", ret); - return ret; + thread_dalloc_event(tsd, usize); } JEMALLOC_NOINLINE @@ -2767,79 +3011,149 @@ free_default(void *ptr) { tsd_t *tsd = tsd_fetch_min(); check_entry_exit_locking(tsd_tsdn(tsd)); - tcache_t *tcache; if (likely(tsd_fast(tsd))) { - tsd_assert_fast(tsd); - /* Unconditionally get tcache ptr on fast path. */ - tcache = tsd_tcachep_get(tsd); - ifree(tsd, ptr, tcache, false); + tcache_t *tcache = tcache_get_from_ind(tsd, + TCACHE_IND_AUTOMATIC, /* slow */ false, + /* is_alloc */ false); + ifree(tsd, ptr, tcache, /* slow */ false); } else { - if (likely(tsd_reentrancy_level_get(tsd) == 0)) { - tcache = tcache_get(tsd); - } else { - tcache = NULL; - } + tcache_t *tcache = tcache_get_from_ind(tsd, + TCACHE_IND_AUTOMATIC, /* slow */ true, + /* is_alloc */ false); uintptr_t args_raw[3] = {(uintptr_t)ptr}; hook_invoke_dalloc(hook_dalloc_free, ptr, args_raw); - ifree(tsd, ptr, tcache, true); + ifree(tsd, ptr, tcache, /* slow */ true); } + check_entry_exit_locking(tsd_tsdn(tsd)); } } +JEMALLOC_ALWAYS_INLINE bool +free_fastpath_nonfast_aligned(void *ptr, bool check_prof) { + /* + * free_fastpath do not handle two uncommon cases: 1) sampled profiled + * objects and 2) sampled junk & stash for use-after-free detection. + * Both have special alignments which are used to escape the fastpath. + * + * prof_sample is page-aligned, which covers the UAF check when both + * are enabled (the assertion below). Avoiding redundant checks since + * this is on the fastpath -- at most one runtime branch from this. + */ + if (config_debug && cache_bin_nonfast_aligned(ptr)) { + assert(prof_sample_aligned(ptr)); + } + + if (config_prof && check_prof) { + /* When prof is enabled, the prof_sample alignment is enough. */ + if (prof_sample_aligned(ptr)) { + return true; + } else { + return false; + } + } + + if (config_uaf_detection) { + if (cache_bin_nonfast_aligned(ptr)) { + return true; + } else { + return false; + } + } + + return false; +} + +/* Returns whether or not the free attempt was successful. */ JEMALLOC_ALWAYS_INLINE bool free_fastpath(void *ptr, size_t size, bool size_hint) { tsd_t *tsd = tsd_get(false); - if (unlikely(!tsd || !tsd_fast(tsd))) { + /* The branch gets optimized away unless tsd_get_allocates(). */ + if (unlikely(tsd == NULL)) { return false; } - - tcache_t *tcache = tsd_tcachep_get(tsd); - - alloc_ctx_t alloc_ctx; /* - * If !config_cache_oblivious, we can check PAGE alignment to - * detect sampled objects. Otherwise addresses are - * randomized, and we have to look it up in the rtree anyway. - * See also isfree(). + * The tsd_fast() / initialized checks are folded into the branch + * testing (deallocated_after >= threshold) later in this function. + * The threshold will be set to 0 when !tsd_fast. */ - if (!size_hint || config_cache_oblivious) { - rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); - bool res = rtree_szind_slab_read_fast(tsd_tsdn(tsd), &extents_rtree, - rtree_ctx, (uintptr_t)ptr, - &alloc_ctx.szind, &alloc_ctx.slab); + assert(tsd_fast(tsd) || + *tsd_thread_deallocated_next_event_fastp_get_unsafe(tsd) == 0); + + emap_alloc_ctx_t alloc_ctx; + if (!size_hint) { + bool err = emap_alloc_ctx_try_lookup_fast(tsd, + &arena_emap_global, ptr, &alloc_ctx); /* Note: profiled objects will have alloc_ctx.slab set */ - if (!res || !alloc_ctx.slab) { + if (unlikely(err || !alloc_ctx.slab || + free_fastpath_nonfast_aligned(ptr, + /* check_prof */ false))) { return false; } assert(alloc_ctx.szind != SC_NSIZES); } else { /* - * Check for both sizes that are too large, and for sampled objects. - * Sampled objects are always page-aligned. The sampled object check - * will also check for null ptr. + * Check for both sizes that are too large, and for sampled / + * special aligned objects. The alignment check will also check + * for null ptr. */ - if (size > SC_LOOKUP_MAXCLASS || (((uintptr_t)ptr & PAGE_MASK) == 0)) { + if (unlikely(size > SC_LOOKUP_MAXCLASS || + free_fastpath_nonfast_aligned(ptr, + /* check_prof */ true))) { return false; } alloc_ctx.szind = sz_size2index_lookup(size); + /* Max lookup class must be small. */ + assert(alloc_ctx.szind < SC_NBINS); + /* This is a dead store, except when opt size checking is on. */ + alloc_ctx.slab = true; } + /* + * Currently the fastpath only handles small sizes. The branch on + * SC_LOOKUP_MAXCLASS makes sure of it. This lets us avoid checking + * tcache szind upper limit (i.e. tcache_maxclass) as well. + */ + assert(alloc_ctx.slab); - if (unlikely(ticker_trytick(&tcache->gc_ticker))) { + uint64_t deallocated, threshold; + te_free_fastpath_ctx(tsd, &deallocated, &threshold); + + size_t usize = sz_index2size(alloc_ctx.szind); + uint64_t deallocated_after = deallocated + usize; + /* + * Check for events and tsd non-nominal (fast_threshold will be set to + * 0) in a single branch. Note that this handles the uninitialized case + * as well (TSD init will be triggered on the non-fastpath). Therefore + * anything depends on a functional TSD (e.g. the alloc_ctx sanity check + * below) needs to be after this branch. + */ + if (unlikely(deallocated_after >= threshold)) { return false; } + assert(tsd_fast(tsd)); + bool fail = maybe_check_alloc_ctx(tsd, ptr, &alloc_ctx); + if (fail) { + /* See the comment in isfree. */ + return true; + } + + tcache_t *tcache = tcache_get_from_ind(tsd, TCACHE_IND_AUTOMATIC, + /* slow */ false, /* is_alloc */ false); + cache_bin_t *bin = &tcache->bins[alloc_ctx.szind]; + + /* + * If junking were enabled, this is where we would do it. It's not + * though, since we ensured above that we're on the fast path. Assert + * that to double-check. + */ + assert(!opt_junk_free); - cache_bin_t *bin = tcache_small_bin_get(tcache, alloc_ctx.szind); - cache_bin_info_t *bin_info = &tcache_bin_info[alloc_ctx.szind]; - if (!cache_bin_dalloc_easy(bin, bin_info, ptr)) { + if (!cache_bin_dalloc_easy(bin, ptr)) { return false; } - if (config_stats) { - size_t usize = sz_index2size(alloc_ctx.szind); - *tsd_thread_deallocatedp_get(tsd) += usize; - } + *tsd_thread_deallocatedp_get(tsd) = deallocated_after; return true; } @@ -2950,6 +3264,8 @@ je_valloc(size_t size) { * passed an extra argument for the caller return address, which will be * ignored. */ +#include // defines __GLIBC__ if we are compiling against glibc + JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free; JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc; JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc; @@ -2958,7 +3274,7 @@ JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) = je_memalign; # endif -# ifdef CPU_COUNT +# ifdef __GLIBC__ /* * To enable static linking with glibc, the libc specific malloc interface must * be implemented also, so none of glibc's malloc.o functions are added to the @@ -3001,6 +3317,26 @@ int __posix_memalign(void** r, size_t a, size_t s) PREALIAS(je_posix_memalign); * Begin non-standard functions. */ +JEMALLOC_ALWAYS_INLINE unsigned +mallocx_tcache_get(int flags) { + if (likely((flags & MALLOCX_TCACHE_MASK) == 0)) { + return TCACHE_IND_AUTOMATIC; + } else if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) { + return TCACHE_IND_NONE; + } else { + return MALLOCX_TCACHE_GET(flags); + } +} + +JEMALLOC_ALWAYS_INLINE unsigned +mallocx_arena_get(int flags) { + if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) { + return MALLOCX_ARENA_GET(flags); + } else { + return ARENA_IND_AUTOMATIC; + } +} + #ifdef JEMALLOC_EXPERIMENTAL_SMALLOCX_API #define JEMALLOC_SMALLOCX_CONCAT_HELPER(x, y) x ## y @@ -3045,25 +3381,10 @@ JEMALLOC_SMALLOCX_CONCAT_HELPER2(je_smallocx_, JEMALLOC_VERSION_GID_IDENT) dopts.num_items = 1; dopts.item_size = size; if (unlikely(flags != 0)) { - if ((flags & MALLOCX_LG_ALIGN_MASK) != 0) { - dopts.alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags); - } - + dopts.alignment = MALLOCX_ALIGN_GET(flags); dopts.zero = MALLOCX_ZERO_GET(flags); - - if ((flags & MALLOCX_TCACHE_MASK) != 0) { - if ((flags & MALLOCX_TCACHE_MASK) - == MALLOCX_TCACHE_NONE) { - dopts.tcache_ind = TCACHE_IND_NONE; - } else { - dopts.tcache_ind = MALLOCX_TCACHE_GET(flags); - } - } else { - dopts.tcache_ind = TCACHE_IND_AUTOMATIC; - } - - if ((flags & MALLOCX_ARENA_MASK) != 0) - dopts.arena_ind = MALLOCX_ARENA_GET(flags); + dopts.tcache_ind = mallocx_tcache_get(flags); + dopts.arena_ind = mallocx_arena_get(flags); } imalloc(&sopts, &dopts); @@ -3098,25 +3419,10 @@ je_mallocx(size_t size, int flags) { dopts.num_items = 1; dopts.item_size = size; if (unlikely(flags != 0)) { - if ((flags & MALLOCX_LG_ALIGN_MASK) != 0) { - dopts.alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags); - } - + dopts.alignment = MALLOCX_ALIGN_GET(flags); dopts.zero = MALLOCX_ZERO_GET(flags); - - if ((flags & MALLOCX_TCACHE_MASK) != 0) { - if ((flags & MALLOCX_TCACHE_MASK) - == MALLOCX_TCACHE_NONE) { - dopts.tcache_ind = TCACHE_IND_NONE; - } else { - dopts.tcache_ind = MALLOCX_TCACHE_GET(flags); - } - } else { - dopts.tcache_ind = TCACHE_IND_AUTOMATIC; - } - - if ((flags & MALLOCX_ARENA_MASK) != 0) - dopts.arena_ind = MALLOCX_ARENA_GET(flags); + dopts.tcache_ind = mallocx_tcache_get(flags); + dopts.arena_ind = mallocx_arena_get(flags); } imalloc(&sopts, &dopts); @@ -3139,6 +3445,8 @@ irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize, if (tctx == NULL) { return NULL; } + + alignment = prof_sample_align(alignment); if (usize <= SC_SMALL_MAXCLASS) { p = iralloct(tsdn, old_ptr, old_usize, SC_LARGE_MINCLASS, alignment, zero, tcache, @@ -3151,66 +3459,48 @@ irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize, p = iralloct(tsdn, old_ptr, old_usize, usize, alignment, zero, tcache, arena, hook_args); } + assert(prof_sample_aligned(p)); return p; } JEMALLOC_ALWAYS_INLINE void * irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size, - size_t alignment, size_t *usize, bool zero, tcache_t *tcache, - arena_t *arena, alloc_ctx_t *alloc_ctx, hook_ralloc_args_t *hook_args) { + size_t alignment, size_t usize, bool zero, tcache_t *tcache, + arena_t *arena, emap_alloc_ctx_t *alloc_ctx, + hook_ralloc_args_t *hook_args) { + prof_info_t old_prof_info; + prof_info_get_and_reset_recent(tsd, old_ptr, alloc_ctx, &old_prof_info); + bool prof_active = prof_active_get_unlocked(); + bool sample_event = te_prof_sample_event_lookahead(tsd, usize); + prof_tctx_t *tctx = prof_alloc_prep(tsd, prof_active, sample_event); void *p; - bool prof_active; - prof_tctx_t *old_tctx, *tctx; - - prof_active = prof_active_get_unlocked(); - old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx); - tctx = prof_alloc_prep(tsd, *usize, prof_active, false); if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { p = irallocx_prof_sample(tsd_tsdn(tsd), old_ptr, old_usize, - *usize, alignment, zero, tcache, arena, tctx, hook_args); + usize, alignment, zero, tcache, arena, tctx, hook_args); } else { p = iralloct(tsd_tsdn(tsd), old_ptr, old_usize, size, alignment, zero, tcache, arena, hook_args); } if (unlikely(p == NULL)) { - prof_alloc_rollback(tsd, tctx, false); + prof_alloc_rollback(tsd, tctx); return NULL; } - - if (p == old_ptr && alignment != 0) { - /* - * The allocation did not move, so it is possible that the size - * class is smaller than would guarantee the requested - * alignment, and that the alignment constraint was - * serendipitously satisfied. Additionally, old_usize may not - * be the same as the current usize because of in-place large - * reallocation. Therefore, query the actual value of usize. - */ - *usize = isalloc(tsd_tsdn(tsd), p); - } - prof_realloc(tsd, p, *usize, tctx, prof_active, false, old_ptr, - old_usize, old_tctx); + assert(usize == isalloc(tsd_tsdn(tsd), p)); + prof_realloc(tsd, p, size, usize, tctx, prof_active, old_ptr, + old_usize, &old_prof_info, sample_event); return p; } -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN -void JEMALLOC_NOTHROW * -JEMALLOC_ALLOC_SIZE(2) -je_rallocx(void *ptr, size_t size, int flags) { +static void * +do_rallocx(void *ptr, size_t size, int flags, bool is_realloc) { void *p; tsd_t *tsd; size_t usize; size_t old_usize; size_t alignment = MALLOCX_ALIGN_GET(flags); - bool zero = flags & MALLOCX_ZERO; arena_t *arena; - tcache_t *tcache; - - LOG("core.rallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr, - size, flags); - assert(ptr != NULL); assert(size != 0); @@ -3218,44 +3508,31 @@ je_rallocx(void *ptr, size_t size, int flags) { tsd = tsd_fetch(); check_entry_exit_locking(tsd_tsdn(tsd)); - if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) { - unsigned arena_ind = MALLOCX_ARENA_GET(flags); - arena = arena_get(tsd_tsdn(tsd), arena_ind, true); - if (unlikely(arena == NULL)) { - goto label_oom; - } - } else { - arena = NULL; - } + bool zero = zero_get(MALLOCX_ZERO_GET(flags), /* slow */ true); - if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { - if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) { - tcache = NULL; - } else { - tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); - } - } else { - tcache = tcache_get(tsd); + unsigned arena_ind = mallocx_arena_get(flags); + if (arena_get_from_ind(tsd, arena_ind, &arena)) { + goto label_oom; } - alloc_ctx_t alloc_ctx; - rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); - rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, - (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); + unsigned tcache_ind = mallocx_tcache_get(flags); + tcache_t *tcache = tcache_get_from_ind(tsd, tcache_ind, + /* slow */ true, /* is_alloc */ true); + + emap_alloc_ctx_t alloc_ctx; + emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr, + &alloc_ctx); assert(alloc_ctx.szind != SC_NSIZES); old_usize = sz_index2size(alloc_ctx.szind); assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); + if (aligned_usize_get(size, alignment, &usize, NULL, false)) { + goto label_oom; + } - hook_ralloc_args_t hook_args = {false, {(uintptr_t)ptr, size, flags, - 0}}; + hook_ralloc_args_t hook_args = {is_realloc, {(uintptr_t)ptr, size, + flags, 0}}; if (config_prof && opt_prof) { - usize = (alignment == 0) ? - sz_s2u(size) : sz_sa2u(size, alignment); - if (unlikely(usize == 0 - || usize > SC_LARGE_MAXCLASS)) { - goto label_oom; - } - p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize, + p = irallocx_prof(tsd, ptr, old_usize, size, alignment, usize, zero, tcache, arena, &alloc_ctx, &hook_args); if (unlikely(p == NULL)) { goto label_oom; @@ -3266,20 +3543,22 @@ je_rallocx(void *ptr, size_t size, int flags) { if (unlikely(p == NULL)) { goto label_oom; } - if (config_stats) { - usize = isalloc(tsd_tsdn(tsd), p); - } + assert(usize == isalloc(tsd_tsdn(tsd), p)); } assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); + thread_alloc_event(tsd, usize); + thread_dalloc_event(tsd, old_usize); - if (config_stats) { - *tsd_thread_allocatedp_get(tsd) += usize; - *tsd_thread_deallocatedp_get(tsd) += old_usize; - } UTRACE(ptr, size, p); check_entry_exit_locking(tsd_tsdn(tsd)); - LOG("core.rallocx.exit", "result: %p", p); + if (config_fill && unlikely(opt_junk_alloc) && usize > old_usize + && !zero) { + size_t excess_len = usize - old_usize; + void *excess_start = (void *)((uintptr_t)p + old_usize); + junk_alloc_callback(excess_start, excess_len); + } + return p; label_oom: if (config_xmalloc && unlikely(opt_xmalloc)) { @@ -3289,10 +3568,103 @@ je_rallocx(void *ptr, size_t size, int flags) { UTRACE(ptr, size, 0); check_entry_exit_locking(tsd_tsdn(tsd)); - LOG("core.rallocx.exit", "result: %p", NULL); return NULL; } +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ALLOC_SIZE(2) +je_rallocx(void *ptr, size_t size, int flags) { + LOG("core.rallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr, + size, flags); + void *ret = do_rallocx(ptr, size, flags, false); + LOG("core.rallocx.exit", "result: %p", ret); + return ret; +} + +static void * +do_realloc_nonnull_zero(void *ptr) { + if (config_stats) { + atomic_fetch_add_zu(&zero_realloc_count, 1, ATOMIC_RELAXED); + } + if (opt_zero_realloc_action == zero_realloc_action_alloc) { + /* + * The user might have gotten an alloc setting while expecting a + * free setting. If that's the case, we at least try to + * reduce the harm, and turn off the tcache while allocating, so + * that we'll get a true first fit. + */ + return do_rallocx(ptr, 1, MALLOCX_TCACHE_NONE, true); + } else if (opt_zero_realloc_action == zero_realloc_action_free) { + UTRACE(ptr, 0, 0); + tsd_t *tsd = tsd_fetch(); + check_entry_exit_locking(tsd_tsdn(tsd)); + + tcache_t *tcache = tcache_get_from_ind(tsd, + TCACHE_IND_AUTOMATIC, /* slow */ true, + /* is_alloc */ false); + uintptr_t args[3] = {(uintptr_t)ptr, 0}; + hook_invoke_dalloc(hook_dalloc_realloc, ptr, args); + ifree(tsd, ptr, tcache, true); + + check_entry_exit_locking(tsd_tsdn(tsd)); + return NULL; + } else { + safety_check_fail("Called realloc(non-null-ptr, 0) with " + "zero_realloc:abort set\n"); + /* In real code, this will never run; the safety check failure + * will call abort. In the unit test, we just want to bail out + * without corrupting internal state that the test needs to + * finish. + */ + return NULL; + } +} + +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ALLOC_SIZE(2) +je_realloc(void *ptr, size_t size) { + LOG("core.realloc.entry", "ptr: %p, size: %zu\n", ptr, size); + + if (likely(ptr != NULL && size != 0)) { + void *ret = do_rallocx(ptr, size, 0, true); + LOG("core.realloc.exit", "result: %p", ret); + return ret; + } else if (ptr != NULL && size == 0) { + void *ret = do_realloc_nonnull_zero(ptr); + LOG("core.realloc.exit", "result: %p", ret); + return ret; + } else { + /* realloc(NULL, size) is equivalent to malloc(size). */ + void *ret; + + static_opts_t sopts; + dynamic_opts_t dopts; + + static_opts_init(&sopts); + dynamic_opts_init(&dopts); + + sopts.null_out_result_on_error = true; + sopts.set_errno_on_error = true; + sopts.oom_string = + ": Error in realloc(): out of memory\n"; + + dopts.result = &ret; + dopts.num_items = 1; + dopts.item_size = size; + + imalloc(&sopts, &dopts); + if (sopts.slow) { + uintptr_t args[3] = {(uintptr_t)ptr, size}; + hook_invoke_alloc(hook_alloc_realloc, ret, + (uintptr_t)ret, args); + } + LOG("core.realloc.exit", "result: %p", ret); + return ret; + } +} + JEMALLOC_ALWAYS_INLINE size_t ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, size_t extra, size_t alignment, bool zero) { @@ -3309,51 +3681,46 @@ ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, static size_t ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx) { - size_t usize; - - if (tctx == NULL) { + /* Sampled allocation needs to be page aligned. */ + if (tctx == NULL || !prof_sample_aligned(ptr)) { return old_usize; } - usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment, - zero); - return usize; + return ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment, + zero); } JEMALLOC_ALWAYS_INLINE size_t ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size, - size_t extra, size_t alignment, bool zero, alloc_ctx_t *alloc_ctx) { - size_t usize_max, usize; - bool prof_active; - prof_tctx_t *old_tctx, *tctx; + size_t extra, size_t alignment, bool zero, emap_alloc_ctx_t *alloc_ctx) { + /* + * old_prof_info is only used for asserting that the profiling info + * isn't changed by the ixalloc() call. + */ + prof_info_t old_prof_info; + prof_info_get(tsd, ptr, alloc_ctx, &old_prof_info); - prof_active = prof_active_get_unlocked(); - old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr, alloc_ctx); /* * usize isn't knowable before ixalloc() returns when extra is non-zero. * Therefore, compute its maximum possible value and use that in * prof_alloc_prep() to decide whether to capture a backtrace. * prof_realloc() will use the actual usize to decide whether to sample. */ - if (alignment == 0) { - usize_max = sz_s2u(size+extra); - assert(usize_max > 0 - && usize_max <= SC_LARGE_MAXCLASS); - } else { - usize_max = sz_sa2u(size+extra, alignment); - if (unlikely(usize_max == 0 - || usize_max > SC_LARGE_MAXCLASS)) { - /* - * usize_max is out of range, and chances are that - * allocation will fail, but use the maximum possible - * value and carry on with prof_alloc_prep(), just in - * case allocation succeeds. - */ - usize_max = SC_LARGE_MAXCLASS; - } + size_t usize_max; + if (aligned_usize_get(size + extra, alignment, &usize_max, NULL, + false)) { + /* + * usize_max is out of range, and chances are that allocation + * will fail, but use the maximum possible value and carry on + * with prof_alloc_prep(), just in case allocation succeeds. + */ + usize_max = SC_LARGE_MAXCLASS; } - tctx = prof_alloc_prep(tsd, usize_max, prof_active, false); + bool prof_active = prof_active_get_unlocked(); + bool sample_event = te_prof_sample_event_lookahead(tsd, usize_max); + prof_tctx_t *tctx = prof_alloc_prep(tsd, prof_active, sample_event); + size_t usize; if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize, size, extra, alignment, zero, tctx); @@ -3361,13 +3728,28 @@ ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size, usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, extra, alignment, zero); } + + /* + * At this point we can still safely get the original profiling + * information associated with the ptr, because (a) the edata_t object + * associated with the ptr still lives and (b) the profiling info + * fields are not touched. "(a)" is asserted in the outer je_xallocx() + * function, and "(b)" is indirectly verified below by checking that + * the alloc_tctx field is unchanged. + */ + prof_info_t prof_info; if (usize == old_usize) { - prof_alloc_rollback(tsd, tctx, false); - return usize; + prof_info_get(tsd, ptr, alloc_ctx, &prof_info); + prof_alloc_rollback(tsd, tctx); + } else { + prof_info_get_and_reset_recent(tsd, ptr, alloc_ctx, &prof_info); + assert(usize <= usize_max); + sample_event = te_prof_sample_event_lookahead(tsd, usize); + prof_realloc(tsd, ptr, size, usize, tctx, prof_active, ptr, + old_usize, &prof_info, sample_event); } - prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize, - old_tctx); + assert(old_prof_info.alloc_tctx == prof_info.alloc_tctx); return usize; } @@ -3376,7 +3758,7 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) { tsd_t *tsd; size_t usize, old_usize; size_t alignment = MALLOCX_ALIGN_GET(flags); - bool zero = flags & MALLOCX_ZERO; + bool zero = zero_get(MALLOCX_ZERO_GET(flags), /* slow */ true); LOG("core.xallocx.entry", "ptr: %p, size: %zu, extra: %zu, " "flags: %d", ptr, size, extra, flags); @@ -3388,10 +3770,17 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) { tsd = tsd_fetch(); check_entry_exit_locking(tsd_tsdn(tsd)); - alloc_ctx_t alloc_ctx; - rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); - rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, - (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); + /* + * old_edata is only for verifying that xallocx() keeps the edata_t + * object associated with the ptr (though the content of the edata_t + * object can be changed). + */ + edata_t *old_edata = emap_edata_lookup(tsd_tsdn(tsd), + &arena_emap_global, ptr); + + emap_alloc_ctx_t alloc_ctx; + emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr, + &alloc_ctx); assert(alloc_ctx.szind != SC_NSIZES); old_usize = sz_index2size(alloc_ctx.szind); assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); @@ -3419,13 +3808,25 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) { usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, extra, alignment, zero); } + + /* + * xallocx() should keep using the same edata_t object (though its + * content can be changed). + */ + assert(emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr) + == old_edata); + if (unlikely(usize == old_usize)) { goto label_not_resized; } + thread_alloc_event(tsd, usize); + thread_dalloc_event(tsd, old_usize); - if (config_stats) { - *tsd_thread_allocatedp_get(tsd) += usize; - *tsd_thread_deallocatedp_get(tsd) += old_usize; + if (config_fill && unlikely(opt_junk_alloc) && usize > old_usize && + !zero) { + size_t excess_len = usize - old_usize; + void *excess_start = (void *)((uintptr_t)ptr + old_usize); + junk_alloc_callback(excess_start, excess_len); } label_not_resized: if (unlikely(!tsd_fast(tsd))) { @@ -3475,31 +3876,13 @@ je_dallocx(void *ptr, int flags) { assert(ptr != NULL); assert(malloc_initialized() || IS_INITIALIZER); - tsd_t *tsd = tsd_fetch(); + tsd_t *tsd = tsd_fetch_min(); bool fast = tsd_fast(tsd); check_entry_exit_locking(tsd_tsdn(tsd)); - tcache_t *tcache; - if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { - /* Not allowed to be reentrant and specify a custom tcache. */ - assert(tsd_reentrancy_level_get(tsd) == 0); - if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) { - tcache = NULL; - } else { - tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); - } - } else { - if (likely(fast)) { - tcache = tsd_tcachep_get(tsd); - assert(tcache == tcache_get(tsd)); - } else { - if (likely(tsd_reentrancy_level_get(tsd) == 0)) { - tcache = tcache_get(tsd); - } else { - tcache = NULL; - } - } - } + unsigned tcache_ind = mallocx_tcache_get(flags); + tcache_t *tcache = tcache_get_from_ind(tsd, tcache_ind, !fast, + /* is_alloc */ false); UTRACE(ptr, 0, 0); if (likely(fast)) { @@ -3518,13 +3901,9 @@ je_dallocx(void *ptr, int flags) { JEMALLOC_ALWAYS_INLINE size_t inallocx(tsdn_t *tsdn, size_t size, int flags) { check_entry_exit_locking(tsdn); - size_t usize; - if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) { - usize = sz_s2u(size); - } else { - usize = sz_sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags)); - } + /* In case of out of range, let the user see it rather than fail. */ + aligned_usize_get(size, MALLOCX_ALIGN_GET(flags), &usize, NULL, false); check_entry_exit_locking(tsdn); return usize; } @@ -3534,33 +3913,14 @@ sdallocx_default(void *ptr, size_t size, int flags) { assert(ptr != NULL); assert(malloc_initialized() || IS_INITIALIZER); - tsd_t *tsd = tsd_fetch(); + tsd_t *tsd = tsd_fetch_min(); bool fast = tsd_fast(tsd); size_t usize = inallocx(tsd_tsdn(tsd), size, flags); - assert(usize == isalloc(tsd_tsdn(tsd), ptr)); check_entry_exit_locking(tsd_tsdn(tsd)); - tcache_t *tcache; - if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { - /* Not allowed to be reentrant and specify a custom tcache. */ - assert(tsd_reentrancy_level_get(tsd) == 0); - if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) { - tcache = NULL; - } else { - tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); - } - } else { - if (likely(fast)) { - tcache = tsd_tcachep_get(tsd); - assert(tcache == tcache_get(tsd)); - } else { - if (likely(tsd_reentrancy_level_get(tsd) == 0)) { - tcache = tcache_get(tsd); - } else { - tcache = NULL; - } - } - } + unsigned tcache_ind = mallocx_tcache_get(flags); + tcache_t *tcache = tcache_get_from_ind(tsd, tcache_ind, !fast, + /* is_alloc */ false); UTRACE(ptr, 0, 0); if (likely(fast)) { @@ -3572,7 +3932,6 @@ sdallocx_default(void *ptr, size_t size, int flags) { isfree(tsd, ptr, usize, tcache, true); } check_entry_exit_locking(tsd_tsdn(tsd)); - } JEMALLOC_EXPORT void JEMALLOC_NOTHROW @@ -3580,7 +3939,7 @@ je_sdallocx(void *ptr, size_t size, int flags) { LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr, size, flags); - if (flags !=0 || !free_fastpath(ptr, size, true)) { + if (flags != 0 || !free_fastpath(ptr, size, true)) { sdallocx_default(ptr, size, flags); } @@ -3689,6 +4048,7 @@ je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, return ret; } +#define STATS_PRINT_BUFSIZE 65536 JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, const char *opts) { @@ -3698,23 +4058,30 @@ je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, tsdn = tsdn_fetch(); check_entry_exit_locking(tsdn); - stats_print(write_cb, cbopaque, opts); + + if (config_debug) { + stats_print(write_cb, cbopaque, opts); + } else { + buf_writer_t buf_writer; + buf_writer_init(tsdn, &buf_writer, write_cb, cbopaque, NULL, + STATS_PRINT_BUFSIZE); + stats_print(buf_writer_cb, &buf_writer, opts); + buf_writer_terminate(tsdn, &buf_writer); + } + check_entry_exit_locking(tsdn); LOG("core.malloc_stats_print.exit", ""); } +#undef STATS_PRINT_BUFSIZE -JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW -je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) { - size_t ret; - tsdn_t *tsdn; - - LOG("core.malloc_usable_size.entry", "ptr: %p", ptr); - +JEMALLOC_ALWAYS_INLINE size_t +je_malloc_usable_size_impl(JEMALLOC_USABLE_SIZE_CONST void *ptr) { assert(malloc_initialized() || IS_INITIALIZER); - tsdn = tsdn_fetch(); + tsdn_t *tsdn = tsdn_fetch(); check_entry_exit_locking(tsdn); + size_t ret; if (unlikely(ptr == NULL)) { ret = 0; } else { @@ -3725,12 +4092,211 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) { ret = isalloc(tsdn, ptr); } } - check_entry_exit_locking(tsdn); + + return ret; +} + +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW +je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) { + LOG("core.malloc_usable_size.entry", "ptr: %p", ptr); + + size_t ret = je_malloc_usable_size_impl(ptr); + LOG("core.malloc_usable_size.exit", "result: %zu", ret); return ret; } +#ifdef JEMALLOC_HAVE_MALLOC_SIZE +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW +je_malloc_size(const void *ptr) { + LOG("core.malloc_size.entry", "ptr: %p", ptr); + + size_t ret = je_malloc_usable_size_impl(ptr); + + LOG("core.malloc_size.exit", "result: %zu", ret); + return ret; +} +#endif + +static void +batch_alloc_prof_sample_assert(tsd_t *tsd, size_t batch, size_t usize) { + assert(config_prof && opt_prof); + bool prof_sample_event = te_prof_sample_event_lookahead(tsd, + batch * usize); + assert(!prof_sample_event); + size_t surplus; + prof_sample_event = te_prof_sample_event_lookahead_surplus(tsd, + (batch + 1) * usize, &surplus); + assert(prof_sample_event); + assert(surplus < usize); +} + +size_t +batch_alloc(void **ptrs, size_t num, size_t size, int flags) { + LOG("core.batch_alloc.entry", + "ptrs: %p, num: %zu, size: %zu, flags: %d", ptrs, num, size, flags); + + tsd_t *tsd = tsd_fetch(); + check_entry_exit_locking(tsd_tsdn(tsd)); + + size_t filled = 0; + + if (unlikely(tsd == NULL || tsd_reentrancy_level_get(tsd) > 0)) { + goto label_done; + } + + size_t alignment = MALLOCX_ALIGN_GET(flags); + size_t usize; + if (aligned_usize_get(size, alignment, &usize, NULL, false)) { + goto label_done; + } + szind_t ind = sz_size2index(usize); + bool zero = zero_get(MALLOCX_ZERO_GET(flags), /* slow */ true); + + /* + * The cache bin and arena will be lazily initialized; it's hard to + * know in advance whether each of them needs to be initialized. + */ + cache_bin_t *bin = NULL; + arena_t *arena = NULL; + + size_t nregs = 0; + if (likely(ind < SC_NBINS)) { + nregs = bin_infos[ind].nregs; + assert(nregs > 0); + } + + while (filled < num) { + size_t batch = num - filled; + size_t surplus = SIZE_MAX; /* Dead store. */ + bool prof_sample_event = config_prof && opt_prof + && prof_active_get_unlocked() + && te_prof_sample_event_lookahead_surplus(tsd, + batch * usize, &surplus); + + if (prof_sample_event) { + /* + * Adjust so that the batch does not trigger prof + * sampling. + */ + batch -= surplus / usize + 1; + batch_alloc_prof_sample_assert(tsd, batch, usize); + } + + size_t progress = 0; + + if (likely(ind < SC_NBINS) && batch >= nregs) { + if (arena == NULL) { + unsigned arena_ind = mallocx_arena_get(flags); + if (arena_get_from_ind(tsd, arena_ind, + &arena)) { + goto label_done; + } + if (arena == NULL) { + arena = arena_choose(tsd, NULL); + } + if (unlikely(arena == NULL)) { + goto label_done; + } + } + size_t arena_batch = batch - batch % nregs; + size_t n = arena_fill_small_fresh(tsd_tsdn(tsd), arena, + ind, ptrs + filled, arena_batch, zero); + progress += n; + filled += n; + } + + if (likely(ind < nhbins) && progress < batch) { + if (bin == NULL) { + unsigned tcache_ind = mallocx_tcache_get(flags); + tcache_t *tcache = tcache_get_from_ind(tsd, + tcache_ind, /* slow */ true, + /* is_alloc */ true); + if (tcache != NULL) { + bin = &tcache->bins[ind]; + } + } + /* + * If we don't have a tcache bin, we don't want to + * immediately give up, because there's the possibility + * that the user explicitly requested to bypass the + * tcache, or that the user explicitly turned off the + * tcache; in such cases, we go through the slow path, + * i.e. the mallocx() call at the end of the while loop. + */ + if (bin != NULL) { + size_t bin_batch = batch - progress; + /* + * n can be less than bin_batch, meaning that + * the cache bin does not have enough memory. + * In such cases, we rely on the slow path, + * i.e. the mallocx() call at the end of the + * while loop, to fill in the cache, and in the + * next iteration of the while loop, the tcache + * will contain a lot of memory, and we can + * harvest them here. Compared to the + * alternative approach where we directly go to + * the arena bins here, the overhead of our + * current approach should usually be minimal, + * since we never try to fetch more memory than + * what a slab contains via the tcache. An + * additional benefit is that the tcache will + * not be empty for the next allocation request. + */ + size_t n = cache_bin_alloc_batch(bin, bin_batch, + ptrs + filled); + if (config_stats) { + bin->tstats.nrequests += n; + } + if (zero) { + for (size_t i = 0; i < n; ++i) { + memset(ptrs[filled + i], 0, + usize); + } + } + if (config_prof && opt_prof + && unlikely(ind >= SC_NBINS)) { + for (size_t i = 0; i < n; ++i) { + prof_tctx_reset_sampled(tsd, + ptrs[filled + i]); + } + } + progress += n; + filled += n; + } + } + + /* + * For thread events other than prof sampling, trigger them as + * if there's a single allocation of size (n * usize). This is + * fine because: + * (a) these events do not alter the allocation itself, and + * (b) it's possible that some event would have been triggered + * multiple times, instead of only once, if the allocations + * were handled individually, but it would do no harm (or + * even be beneficial) to coalesce the triggerings. + */ + thread_alloc_event(tsd, progress * usize); + + if (progress < batch || prof_sample_event) { + void *p = je_mallocx(size, flags); + if (p == NULL) { /* OOM */ + break; + } + if (progress == batch) { + assert(prof_sampled(tsd, p)); + } + ptrs[filled++] = p; + } + } + +label_done: + check_entry_exit_locking(tsd_tsdn(tsd)); + LOG("core.batch_alloc.exit", "result: %zu", filled); + return filled; +} + /* * End non-standard functions. */ @@ -3894,7 +4460,7 @@ _malloc_prefork(void) background_thread_prefork1(tsd_tsdn(tsd)); } /* Break arena prefork into stages to preserve lock order. */ - for (i = 0; i < 8; i++) { + for (i = 0; i < 9; i++) { for (j = 0; j < narenas; j++) { if ((arena = arena_get(tsd_tsdn(tsd), j, false)) != NULL) { @@ -3923,12 +4489,17 @@ _malloc_prefork(void) case 7: arena_prefork7(tsd_tsdn(tsd), arena); break; + case 8: + arena_prefork8(tsd_tsdn(tsd), arena); + break; default: not_reached(); } } } + } prof_prefork1(tsd_tsdn(tsd)); + stats_prefork(tsd_tsdn(tsd)); tsd_prefork(tsd); } @@ -3956,6 +4527,7 @@ _malloc_postfork(void) witness_postfork_parent(tsd_witness_tsdp_get(tsd)); /* Release all mutexes, now that fork() has completed. */ + stats_postfork_parent(tsd_tsdn(tsd)); for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { arena_t *arena; @@ -3985,6 +4557,7 @@ jemalloc_postfork_child(void) { witness_postfork_child(tsd_witness_tsdp_get(tsd)); /* Release all mutexes, now that fork() has completed. */ + stats_postfork_child(tsd_tsdn(tsd)); for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { arena_t *arena; diff --git a/contrib/jemalloc/src/large.c b/contrib/jemalloc/src/large.c index 8e7a781d330092..5fc4bf584c451d 100644 --- a/contrib/jemalloc/src/large.c +++ b/contrib/jemalloc/src/large.c @@ -1,11 +1,11 @@ -#define JEMALLOC_LARGE_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/assert.h" +#include "jemalloc/internal/emap.h" #include "jemalloc/internal/extent_mmap.h" #include "jemalloc/internal/mutex.h" -#include "jemalloc/internal/rtree.h" +#include "jemalloc/internal/prof_recent.h" #include "jemalloc/internal/util.h" /******************************************************************************/ @@ -21,8 +21,7 @@ void * large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero) { size_t ausize; - extent_t *extent; - bool is_zeroed; + edata_t *edata; UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false); assert(!tsdn_null(tsdn) || arena != NULL); @@ -32,163 +31,80 @@ large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, return NULL; } - if (config_fill && unlikely(opt_zero)) { - zero = true; - } - /* - * Copy zero into is_zeroed and pass the copy when allocating the - * extent, so that it is possible to make correct junk/zero fill - * decisions below, even if is_zeroed ends up true when zero is false. - */ - is_zeroed = zero; if (likely(!tsdn_null(tsdn))) { arena = arena_choose_maybe_huge(tsdn_tsd(tsdn), arena, usize); } - if (unlikely(arena == NULL) || (extent = arena_extent_alloc_large(tsdn, - arena, usize, alignment, &is_zeroed)) == NULL) { + if (unlikely(arena == NULL) || (edata = arena_extent_alloc_large(tsdn, + arena, usize, alignment, zero)) == NULL) { return NULL; } /* See comments in arena_bin_slabs_full_insert(). */ if (!arena_is_auto(arena)) { - /* Insert extent into large. */ + /* Insert edata into large. */ malloc_mutex_lock(tsdn, &arena->large_mtx); - extent_list_append(&arena->large, extent); + edata_list_active_append(&arena->large, edata); malloc_mutex_unlock(tsdn, &arena->large_mtx); } - if (config_prof && arena_prof_accum(tsdn, arena, usize)) { - prof_idump(tsdn); - } - - if (zero) { - assert(is_zeroed); - } else if (config_fill && unlikely(opt_junk_alloc)) { - memset(extent_addr_get(extent), JEMALLOC_ALLOC_JUNK, - extent_usize_get(extent)); - } arena_decay_tick(tsdn, arena); - return extent_addr_get(extent); + return edata_addr_get(edata); } -static void -large_dalloc_junk_impl(void *ptr, size_t size) { - memset(ptr, JEMALLOC_FREE_JUNK, size); -} -large_dalloc_junk_t *JET_MUTABLE large_dalloc_junk = large_dalloc_junk_impl; - -static void -large_dalloc_maybe_junk_impl(void *ptr, size_t size) { - if (config_fill && have_dss && unlikely(opt_junk_free)) { - /* - * Only bother junk filling if the extent isn't about to be - * unmapped. - */ - if (opt_retain || (have_dss && extent_in_dss(ptr))) { - large_dalloc_junk(ptr, size); - } - } -} -large_dalloc_maybe_junk_t *JET_MUTABLE large_dalloc_maybe_junk = - large_dalloc_maybe_junk_impl; - static bool -large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) { - arena_t *arena = extent_arena_get(extent); - size_t oldusize = extent_usize_get(extent); - extent_hooks_t *extent_hooks = extent_hooks_get(arena); - size_t diff = extent_size_get(extent) - (usize + sz_large_pad); +large_ralloc_no_move_shrink(tsdn_t *tsdn, edata_t *edata, size_t usize) { + arena_t *arena = arena_get_from_edata(edata); + ehooks_t *ehooks = arena_get_ehooks(arena); + size_t old_size = edata_size_get(edata); + size_t old_usize = edata_usize_get(edata); - assert(oldusize > usize); + assert(old_usize > usize); - if (extent_hooks->split == NULL) { + if (ehooks_split_will_fail(ehooks)) { return true; } - /* Split excess pages. */ - if (diff != 0) { - extent_t *trail = extent_split_wrapper(tsdn, arena, - &extent_hooks, extent, usize + sz_large_pad, - sz_size2index(usize), false, diff, SC_NSIZES, false); - if (trail == NULL) { - return true; - } - - if (config_fill && unlikely(opt_junk_free)) { - large_dalloc_maybe_junk(extent_addr_get(trail), - extent_size_get(trail)); - } - - arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, trail); + bool deferred_work_generated = false; + bool err = pa_shrink(tsdn, &arena->pa_shard, edata, old_size, + usize + sz_large_pad, sz_size2index(usize), + &deferred_work_generated); + if (err) { + return true; } - - arena_extent_ralloc_large_shrink(tsdn, arena, extent, oldusize); + if (deferred_work_generated) { + arena_handle_deferred_work(tsdn, arena); + } + arena_extent_ralloc_large_shrink(tsdn, arena, edata, old_usize); return false; } static bool -large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize, +large_ralloc_no_move_expand(tsdn_t *tsdn, edata_t *edata, size_t usize, bool zero) { - arena_t *arena = extent_arena_get(extent); - size_t oldusize = extent_usize_get(extent); - extent_hooks_t *extent_hooks = extent_hooks_get(arena); - size_t trailsize = usize - oldusize; + arena_t *arena = arena_get_from_edata(edata); - if (extent_hooks->merge == NULL) { - return true; - } + size_t old_size = edata_size_get(edata); + size_t old_usize = edata_usize_get(edata); + size_t new_size = usize + sz_large_pad; - if (config_fill && unlikely(opt_zero)) { - zero = true; - } - /* - * Copy zero into is_zeroed_trail and pass the copy when allocating the - * extent, so that it is possible to make correct junk/zero fill - * decisions below, even if is_zeroed_trail ends up true when zero is - * false. - */ - bool is_zeroed_trail = zero; - bool commit = true; - extent_t *trail; - bool new_mapping; - if ((trail = extents_alloc(tsdn, arena, &extent_hooks, - &arena->extents_dirty, extent_past_get(extent), trailsize, 0, - CACHELINE, false, SC_NSIZES, &is_zeroed_trail, &commit)) != NULL - || (trail = extents_alloc(tsdn, arena, &extent_hooks, - &arena->extents_muzzy, extent_past_get(extent), trailsize, 0, - CACHELINE, false, SC_NSIZES, &is_zeroed_trail, &commit)) != NULL) { - if (config_stats) { - new_mapping = false; - } - } else { - if ((trail = extent_alloc_wrapper(tsdn, arena, &extent_hooks, - extent_past_get(extent), trailsize, 0, CACHELINE, false, - SC_NSIZES, &is_zeroed_trail, &commit)) == NULL) { - return true; - } - if (config_stats) { - new_mapping = true; - } - } + szind_t szind = sz_size2index(usize); - if (extent_merge_wrapper(tsdn, arena, &extent_hooks, extent, trail)) { - extent_dalloc_wrapper(tsdn, arena, &extent_hooks, trail); - return true; + bool deferred_work_generated = false; + bool err = pa_expand(tsdn, &arena->pa_shard, edata, old_size, new_size, + szind, zero, &deferred_work_generated); + + if (deferred_work_generated) { + arena_handle_deferred_work(tsdn, arena); } - rtree_ctx_t rtree_ctx_fallback; - rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); - szind_t szind = sz_size2index(usize); - extent_szind_set(extent, szind); - rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, - (uintptr_t)extent_addr_get(extent), szind, false); - if (config_stats && new_mapping) { - arena_stats_mapped_add(tsdn, &arena->stats, trailsize); + if (err) { + return true; } if (zero) { - if (config_cache_oblivious) { + if (opt_cache_oblivious) { + assert(sz_large_pad == PAGE); /* * Zero the trailing bytes of the original allocation's * last page, since they are in an indeterminate state. @@ -197,28 +113,23 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize, * of CACHELINE in [0 .. PAGE). */ void *zbase = (void *) - ((uintptr_t)extent_addr_get(extent) + oldusize); + ((uintptr_t)edata_addr_get(edata) + old_usize); void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase + PAGE)); size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase; assert(nzero > 0); memset(zbase, 0, nzero); } - assert(is_zeroed_trail); - } else if (config_fill && unlikely(opt_junk_alloc)) { - memset((void *)((uintptr_t)extent_addr_get(extent) + oldusize), - JEMALLOC_ALLOC_JUNK, usize - oldusize); } - - arena_extent_ralloc_large_expand(tsdn, arena, extent, oldusize); + arena_extent_ralloc_large_expand(tsdn, arena, edata, old_usize); return false; } bool -large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min, +large_ralloc_no_move(tsdn_t *tsdn, edata_t *edata, size_t usize_min, size_t usize_max, bool zero) { - size_t oldusize = extent_usize_get(extent); + size_t oldusize = edata_usize_get(edata); /* The following should have been caught by callers. */ assert(usize_min > 0 && usize_max <= SC_LARGE_MAXCLASS); @@ -228,16 +139,15 @@ large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min, if (usize_max > oldusize) { /* Attempt to expand the allocation in-place. */ - if (!large_ralloc_no_move_expand(tsdn, extent, usize_max, + if (!large_ralloc_no_move_expand(tsdn, edata, usize_max, zero)) { - arena_decay_tick(tsdn, extent_arena_get(extent)); + arena_decay_tick(tsdn, arena_get_from_edata(edata)); return false; } /* Try again, this time with usize_min. */ if (usize_min < usize_max && usize_min > oldusize && - large_ralloc_no_move_expand(tsdn, extent, usize_min, - zero)) { - arena_decay_tick(tsdn, extent_arena_get(extent)); + large_ralloc_no_move_expand(tsdn, edata, usize_min, zero)) { + arena_decay_tick(tsdn, arena_get_from_edata(edata)); return false; } } @@ -247,14 +157,14 @@ large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min, * the new size. */ if (oldusize >= usize_min && oldusize <= usize_max) { - arena_decay_tick(tsdn, extent_arena_get(extent)); + arena_decay_tick(tsdn, arena_get_from_edata(edata)); return false; } /* Attempt to shrink the allocation in-place. */ if (oldusize > usize_max) { - if (!large_ralloc_no_move_shrink(tsdn, extent, usize_max)) { - arena_decay_tick(tsdn, extent_arena_get(extent)); + if (!large_ralloc_no_move_shrink(tsdn, edata, usize_max)) { + arena_decay_tick(tsdn, arena_get_from_edata(edata)); return false; } } @@ -274,9 +184,9 @@ void * large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize, size_t alignment, bool zero, tcache_t *tcache, hook_ralloc_args_t *hook_args) { - extent_t *extent = iealloc(tsdn, ptr); + edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr); - size_t oldusize = extent_usize_get(extent); + size_t oldusize = edata_usize_get(edata); /* The following should have been caught by callers. */ assert(usize > 0 && usize <= SC_LARGE_MAXCLASS); /* Both allocation sizes must be large to avoid a move. */ @@ -284,11 +194,11 @@ large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize, && usize >= SC_LARGE_MINCLASS); /* Try to avoid moving the allocation. */ - if (!large_ralloc_no_move(tsdn, extent, usize, usize, zero)) { + if (!large_ralloc_no_move(tsdn, edata, usize, usize, zero)) { hook_invoke_expand(hook_args->is_realloc ? hook_expand_realloc : hook_expand_rallocx, ptr, oldusize, usize, (uintptr_t)ptr, hook_args->args); - return extent_addr_get(extent); + return edata_addr_get(edata); } /* @@ -309,87 +219,104 @@ large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize, ? hook_dalloc_realloc : hook_dalloc_rallocx, ptr, hook_args->args); size_t copysize = (usize < oldusize) ? usize : oldusize; - memcpy(ret, extent_addr_get(extent), copysize); - isdalloct(tsdn, extent_addr_get(extent), oldusize, tcache, NULL, true); + memcpy(ret, edata_addr_get(edata), copysize); + isdalloct(tsdn, edata_addr_get(edata), oldusize, tcache, NULL, true); return ret; } /* - * junked_locked indicates whether the extent's data have been junk-filled, and - * whether the arena's large_mtx is currently held. + * locked indicates whether the arena's large_mtx is currently held. */ static void -large_dalloc_prep_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent, - bool junked_locked) { - if (!junked_locked) { +large_dalloc_prep_impl(tsdn_t *tsdn, arena_t *arena, edata_t *edata, + bool locked) { + if (!locked) { /* See comments in arena_bin_slabs_full_insert(). */ if (!arena_is_auto(arena)) { malloc_mutex_lock(tsdn, &arena->large_mtx); - extent_list_remove(&arena->large, extent); + edata_list_active_remove(&arena->large, edata); malloc_mutex_unlock(tsdn, &arena->large_mtx); } - large_dalloc_maybe_junk(extent_addr_get(extent), - extent_usize_get(extent)); } else { /* Only hold the large_mtx if necessary. */ if (!arena_is_auto(arena)) { malloc_mutex_assert_owner(tsdn, &arena->large_mtx); - extent_list_remove(&arena->large, extent); + edata_list_active_remove(&arena->large, edata); } } - arena_extent_dalloc_large_prep(tsdn, arena, extent); + arena_extent_dalloc_large_prep(tsdn, arena, edata); } static void -large_dalloc_finish_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { - extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; - arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, extent); +large_dalloc_finish_impl(tsdn_t *tsdn, arena_t *arena, edata_t *edata) { + bool deferred_work_generated = false; + pa_dalloc(tsdn, &arena->pa_shard, edata, &deferred_work_generated); + if (deferred_work_generated) { + arena_handle_deferred_work(tsdn, arena); + } } void -large_dalloc_prep_junked_locked(tsdn_t *tsdn, extent_t *extent) { - large_dalloc_prep_impl(tsdn, extent_arena_get(extent), extent, true); +large_dalloc_prep_locked(tsdn_t *tsdn, edata_t *edata) { + large_dalloc_prep_impl(tsdn, arena_get_from_edata(edata), edata, true); } void -large_dalloc_finish(tsdn_t *tsdn, extent_t *extent) { - large_dalloc_finish_impl(tsdn, extent_arena_get(extent), extent); +large_dalloc_finish(tsdn_t *tsdn, edata_t *edata) { + large_dalloc_finish_impl(tsdn, arena_get_from_edata(edata), edata); } void -large_dalloc(tsdn_t *tsdn, extent_t *extent) { - arena_t *arena = extent_arena_get(extent); - large_dalloc_prep_impl(tsdn, arena, extent, false); - large_dalloc_finish_impl(tsdn, arena, extent); +large_dalloc(tsdn_t *tsdn, edata_t *edata) { + arena_t *arena = arena_get_from_edata(edata); + large_dalloc_prep_impl(tsdn, arena, edata, false); + large_dalloc_finish_impl(tsdn, arena, edata); arena_decay_tick(tsdn, arena); } size_t -large_salloc(tsdn_t *tsdn, const extent_t *extent) { - return extent_usize_get(extent); -} - -prof_tctx_t * -large_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent) { - return extent_prof_tctx_get(extent); +large_salloc(tsdn_t *tsdn, const edata_t *edata) { + return edata_usize_get(edata); } void -large_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx) { - extent_prof_tctx_set(extent, tctx); +large_prof_info_get(tsd_t *tsd, edata_t *edata, prof_info_t *prof_info, + bool reset_recent) { + assert(prof_info != NULL); + + prof_tctx_t *alloc_tctx = edata_prof_tctx_get(edata); + prof_info->alloc_tctx = alloc_tctx; + + if ((uintptr_t)alloc_tctx > (uintptr_t)1U) { + nstime_copy(&prof_info->alloc_time, + edata_prof_alloc_time_get(edata)); + prof_info->alloc_size = edata_prof_alloc_size_get(edata); + if (reset_recent) { + /* + * Reset the pointer on the recent allocation record, + * so that this allocation is recorded as released. + */ + prof_recent_alloc_reset(tsd, edata); + } + } } -void -large_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent) { - large_prof_tctx_set(tsdn, extent, (prof_tctx_t *)(uintptr_t)1U); +static void +large_prof_tctx_set(edata_t *edata, prof_tctx_t *tctx) { + edata_prof_tctx_set(edata, tctx); } -nstime_t -large_prof_alloc_time_get(const extent_t *extent) { - return extent_prof_alloc_time_get(extent); +void +large_prof_tctx_reset(edata_t *edata) { + large_prof_tctx_set(edata, (prof_tctx_t *)(uintptr_t)1U); } void -large_prof_alloc_time_set(extent_t *extent, nstime_t t) { - extent_prof_alloc_time_set(extent, t); +large_prof_info_set(edata_t *edata, prof_tctx_t *tctx, size_t size) { + nstime_t t; + nstime_prof_init_update(&t); + edata_prof_alloc_time_set(edata, &t); + edata_prof_alloc_size_set(edata, size); + edata_prof_recent_alloc_init(edata); + large_prof_tctx_set(edata, tctx); } diff --git a/contrib/jemalloc/src/malloc_io.c b/contrib/jemalloc/src/malloc_io.c index cda589c467c7d2..93a30497fc958e 100644 --- a/contrib/jemalloc/src/malloc_io.c +++ b/contrib/jemalloc/src/malloc_io.c @@ -1,4 +1,3 @@ -#define JEMALLOC_MALLOC_IO_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" @@ -53,7 +52,6 @@ /******************************************************************************/ /* Function prototypes for non-inline static functions. */ -static void wrtmessage(void *cbopaque, const char *s); #define U2S_BUFSIZE ((1U << (LG_SIZEOF_INTMAX_T + 3)) + 1) static char *u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p); @@ -68,7 +66,7 @@ static char *x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, /******************************************************************************/ /* malloc_message() setup. */ -static void +void wrtmessage(void *cbopaque, const char *s) { malloc_write_fd(STDERR_FILENO, s, strlen(s)); } @@ -149,10 +147,10 @@ malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base) { break; case '-': neg = true; - /* Fall through. */ + JEMALLOC_FALLTHROUGH; case '+': p++; - /* Fall through. */ + JEMALLOC_FALLTHROUGH; default: goto label_prefix; } @@ -303,7 +301,7 @@ d2s(intmax_t x, char sign, char *s, size_t *slen_p) { if (!neg) { break; } - /* Fall through. */ + JEMALLOC_FALLTHROUGH; case ' ': case '+': s--; @@ -337,6 +335,7 @@ x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p) { return s; } +JEMALLOC_COLD size_t malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) { size_t i; @@ -362,7 +361,11 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) { if (!left_justify && pad_len != 0) { \ size_t j; \ for (j = 0; j < pad_len; j++) { \ - APPEND_C(' '); \ + if (pad_zero) { \ + APPEND_C('0'); \ + } else { \ + APPEND_C(' '); \ + } \ } \ } \ /* Value. */ \ @@ -434,6 +437,8 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) { unsigned char len = '?'; char *s; size_t slen; + bool first_width_digit = true; + bool pad_zero = false; f++; /* Flags. */ @@ -470,7 +475,12 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) { width = -width; } break; - case '0': case '1': case '2': case '3': case '4': + case '0': + if (first_width_digit) { + pad_zero = true; + } + JEMALLOC_FALLTHROUGH; + case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { uintmax_t uwidth; set_errno(0); @@ -478,6 +488,7 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) { assert(uwidth != UINTMAX_MAX || get_errno() != ERANGE); width = (int)uwidth; + first_width_digit = false; break; } default: break; @@ -535,6 +546,18 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) { intmax_t val JEMALLOC_CC_SILENCE_INIT(0); char buf[D2S_BUFSIZE]; + /* + * Outputting negative, zero-padded numbers + * would require a nontrivial rework of the + * interaction between the width and padding + * (since 0 padding goes between the '-' and the + * number, while ' ' padding goes either before + * the - or after the number. Since we + * currently don't ever need 0-padded negative + * numbers, just don't bother supporting it. + */ + assert(!pad_zero); + GET_ARG_NUMERIC(val, len); s = d2s(val, (plus_plus ? '+' : (plus_space ? ' ' : '-')), buf, &slen); @@ -634,8 +657,8 @@ malloc_snprintf(char *str, size_t size, const char *format, ...) { } void -malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque, - const char *format, va_list ap) { +malloc_vcprintf(write_cb_t *write_cb, void *cbopaque, const char *format, + va_list ap) { char buf[MALLOC_PRINTF_BUFSIZE]; if (write_cb == NULL) { @@ -658,8 +681,7 @@ malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque, */ JEMALLOC_FORMAT_PRINTF(3, 4) void -malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque, - const char *format, ...) { +malloc_cprintf(write_cb_t *write_cb, void *cbopaque, const char *format, ...) { va_list ap; va_start(ap, format); diff --git a/contrib/jemalloc/src/mutex.c b/contrib/jemalloc/src/mutex.c index 88a7730c64c140..1eedd6262830e1 100644 --- a/contrib/jemalloc/src/mutex.c +++ b/contrib/jemalloc/src/mutex.c @@ -1,4 +1,3 @@ -#define JEMALLOC_MUTEX_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" @@ -10,6 +9,12 @@ #define _CRT_SPINCOUNT 4000 #endif +/* + * Based on benchmark results, a fixed spin with this amount of retries works + * well for our critical sections. + */ +int64_t opt_mutex_max_spin = 600; + /******************************************************************************/ /* Data. */ @@ -57,13 +62,13 @@ _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, void malloc_mutex_lock_slow(malloc_mutex_t *mutex) { mutex_prof_data_t *data = &mutex->prof_data; - nstime_t before = NSTIME_ZERO_INITIALIZER; + nstime_t before; if (ncpus == 1) { goto label_spin_done; } - int cnt = 0, max_cnt = MALLOC_MUTEX_MAX_SPIN; + int cnt = 0; do { spin_cpu_spinwait(); if (!atomic_load_b(&mutex->locked, ATOMIC_RELAXED) @@ -71,7 +76,7 @@ malloc_mutex_lock_slow(malloc_mutex_t *mutex) { data->n_spin_acquired++; return; } - } while (cnt++ < max_cnt); + } while (cnt++ < opt_mutex_max_spin || opt_mutex_max_spin == -1); if (!config_stats) { /* Only spin is useful when stats is off. */ @@ -79,7 +84,7 @@ malloc_mutex_lock_slow(malloc_mutex_t *mutex) { return; } label_spin_done: - nstime_update(&before); + nstime_init_update(&before); /* Copy before to after to avoid clock skews. */ nstime_t after; nstime_copy(&after, &before); @@ -115,8 +120,8 @@ malloc_mutex_lock_slow(malloc_mutex_t *mutex) { static void mutex_prof_data_init(mutex_prof_data_t *data) { memset(data, 0, sizeof(mutex_prof_data_t)); - nstime_init(&data->max_wait_time, 0); - nstime_init(&data->tot_wait_time, 0); + nstime_init_zero(&data->max_wait_time); + nstime_init_zero(&data->tot_wait_time); data->prev_owner = NULL; } diff --git a/contrib/jemalloc/src/mutex_pool.c b/contrib/jemalloc/src/mutex_pool.c deleted file mode 100644 index f24d10e44a8072..00000000000000 --- a/contrib/jemalloc/src/mutex_pool.c +++ /dev/null @@ -1,18 +0,0 @@ -#define JEMALLOC_MUTEX_POOL_C_ - -#include "jemalloc/internal/jemalloc_preamble.h" -#include "jemalloc/internal/jemalloc_internal_includes.h" - -#include "jemalloc/internal/mutex.h" -#include "jemalloc/internal/mutex_pool.h" - -bool -mutex_pool_init(mutex_pool_t *pool, const char *name, witness_rank_t rank) { - for (int i = 0; i < MUTEX_POOL_SIZE; ++i) { - if (malloc_mutex_init(&pool->mutexes[i], name, rank, - malloc_mutex_address_ordered)) { - return true; - } - } - return false; -} diff --git a/contrib/jemalloc/src/nstime.c b/contrib/jemalloc/src/nstime.c index 71db353965ff9b..a1a53777febbe5 100644 --- a/contrib/jemalloc/src/nstime.c +++ b/contrib/jemalloc/src/nstime.c @@ -8,96 +8,169 @@ #define BILLION UINT64_C(1000000000) #define MILLION UINT64_C(1000000) +static void +nstime_set_initialized(nstime_t *time) { +#ifdef JEMALLOC_DEBUG + time->magic = NSTIME_MAGIC; +#endif +} + +static void +nstime_assert_initialized(const nstime_t *time) { +#ifdef JEMALLOC_DEBUG + /* + * Some parts (e.g. stats) rely on memset to zero initialize. Treat + * these as valid initialization. + */ + assert(time->magic == NSTIME_MAGIC || + (time->magic == 0 && time->ns == 0)); +#endif +} + +static void +nstime_pair_assert_initialized(const nstime_t *t1, const nstime_t *t2) { + nstime_assert_initialized(t1); + nstime_assert_initialized(t2); +} + +static void +nstime_initialize_operand(nstime_t *time) { + /* + * Operations like nstime_add may have the initial operand being zero + * initialized (covered by the assert below). Full-initialize needed + * before changing it to non-zero. + */ + nstime_assert_initialized(time); + nstime_set_initialized(time); +} + void nstime_init(nstime_t *time, uint64_t ns) { + nstime_set_initialized(time); time->ns = ns; } void nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec) { + nstime_set_initialized(time); time->ns = sec * BILLION + nsec; } uint64_t nstime_ns(const nstime_t *time) { + nstime_assert_initialized(time); return time->ns; } uint64_t nstime_msec(const nstime_t *time) { + nstime_assert_initialized(time); return time->ns / MILLION; } uint64_t nstime_sec(const nstime_t *time) { + nstime_assert_initialized(time); return time->ns / BILLION; } uint64_t nstime_nsec(const nstime_t *time) { + nstime_assert_initialized(time); return time->ns % BILLION; } void nstime_copy(nstime_t *time, const nstime_t *source) { + /* Source is required to be initialized. */ + nstime_assert_initialized(source); *time = *source; + nstime_assert_initialized(time); } int nstime_compare(const nstime_t *a, const nstime_t *b) { + nstime_pair_assert_initialized(a, b); return (a->ns > b->ns) - (a->ns < b->ns); } void nstime_add(nstime_t *time, const nstime_t *addend) { + nstime_pair_assert_initialized(time, addend); assert(UINT64_MAX - time->ns >= addend->ns); + nstime_initialize_operand(time); time->ns += addend->ns; } void nstime_iadd(nstime_t *time, uint64_t addend) { + nstime_assert_initialized(time); assert(UINT64_MAX - time->ns >= addend); + nstime_initialize_operand(time); time->ns += addend; } void nstime_subtract(nstime_t *time, const nstime_t *subtrahend) { + nstime_pair_assert_initialized(time, subtrahend); assert(nstime_compare(time, subtrahend) >= 0); + /* No initialize operand -- subtraction must be initialized. */ time->ns -= subtrahend->ns; } void nstime_isubtract(nstime_t *time, uint64_t subtrahend) { + nstime_assert_initialized(time); assert(time->ns >= subtrahend); + /* No initialize operand -- subtraction must be initialized. */ time->ns -= subtrahend; } void nstime_imultiply(nstime_t *time, uint64_t multiplier) { + nstime_assert_initialized(time); assert((((time->ns | multiplier) & (UINT64_MAX << (sizeof(uint64_t) << 2))) == 0) || ((time->ns * multiplier) / multiplier == time->ns)); + nstime_initialize_operand(time); time->ns *= multiplier; } void nstime_idivide(nstime_t *time, uint64_t divisor) { + nstime_assert_initialized(time); assert(divisor != 0); + nstime_initialize_operand(time); time->ns /= divisor; } uint64_t nstime_divide(const nstime_t *time, const nstime_t *divisor) { + nstime_pair_assert_initialized(time, divisor); assert(divisor->ns != 0); + /* No initialize operand -- *time itself remains unchanged. */ return time->ns / divisor->ns; } +/* Returns time since *past, w/o updating *past. */ +uint64_t +nstime_ns_since(const nstime_t *past) { + nstime_assert_initialized(past); + + nstime_t now; + nstime_copy(&now, past); + nstime_update(&now); + + assert(nstime_compare(&now, past) >= 0); + return now.ns - past->ns; +} + #ifdef _WIN32 # define NSTIME_MONOTONIC true static void @@ -152,7 +225,42 @@ nstime_monotonic_impl(void) { } nstime_monotonic_t *JET_MUTABLE nstime_monotonic = nstime_monotonic_impl; -static bool +prof_time_res_t opt_prof_time_res = + prof_time_res_default; + +const char *prof_time_res_mode_names[] = { + "default", + "high", +}; + + +static void +nstime_get_realtime(nstime_t *time) { +#if defined(JEMALLOC_HAVE_CLOCK_REALTIME) && !defined(_WIN32) + struct timespec ts; + + clock_gettime(CLOCK_REALTIME, &ts); + nstime_init2(time, ts.tv_sec, ts.tv_nsec); +#else + unreachable(); +#endif +} + +static void +nstime_prof_update_impl(nstime_t *time) { + nstime_t old_time; + + nstime_copy(&old_time, time); + + if (opt_prof_time_res == prof_time_res_high) { + nstime_get_realtime(time); + } else { + nstime_get(time); + } +} +nstime_prof_update_t *JET_MUTABLE nstime_prof_update = nstime_prof_update_impl; + +static void nstime_update_impl(nstime_t *time) { nstime_t old_time; @@ -162,9 +270,20 @@ nstime_update_impl(nstime_t *time) { /* Handle non-monotonic clocks. */ if (unlikely(nstime_compare(&old_time, time) > 0)) { nstime_copy(time, &old_time); - return true; } - - return false; } nstime_update_t *JET_MUTABLE nstime_update = nstime_update_impl; + +void +nstime_init_update(nstime_t *time) { + nstime_init_zero(time); + nstime_update(time); +} + +void +nstime_prof_init_update(nstime_t *time) { + nstime_init_zero(time); + nstime_prof_update(time); +} + + diff --git a/contrib/jemalloc/src/pa.c b/contrib/jemalloc/src/pa.c new file mode 100644 index 00000000000000..eb7e4620ea53e4 --- /dev/null +++ b/contrib/jemalloc/src/pa.c @@ -0,0 +1,277 @@ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/san.h" +#include "jemalloc/internal/hpa.h" + +static void +pa_nactive_add(pa_shard_t *shard, size_t add_pages) { + atomic_fetch_add_zu(&shard->nactive, add_pages, ATOMIC_RELAXED); +} + +static void +pa_nactive_sub(pa_shard_t *shard, size_t sub_pages) { + assert(atomic_load_zu(&shard->nactive, ATOMIC_RELAXED) >= sub_pages); + atomic_fetch_sub_zu(&shard->nactive, sub_pages, ATOMIC_RELAXED); +} + +bool +pa_central_init(pa_central_t *central, base_t *base, bool hpa, + hpa_hooks_t *hpa_hooks) { + bool err; + if (hpa) { + err = hpa_central_init(¢ral->hpa, base, hpa_hooks); + if (err) { + return true; + } + } + return false; +} + +bool +pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, pa_central_t *central, + emap_t *emap, base_t *base, unsigned ind, pa_shard_stats_t *stats, + malloc_mutex_t *stats_mtx, nstime_t *cur_time, + size_t pac_oversize_threshold, ssize_t dirty_decay_ms, + ssize_t muzzy_decay_ms) { + /* This will change eventually, but for now it should hold. */ + assert(base_ind_get(base) == ind); + if (edata_cache_init(&shard->edata_cache, base)) { + return true; + } + + if (pac_init(tsdn, &shard->pac, base, emap, &shard->edata_cache, + cur_time, pac_oversize_threshold, dirty_decay_ms, muzzy_decay_ms, + &stats->pac_stats, stats_mtx)) { + return true; + } + + shard->ind = ind; + + shard->ever_used_hpa = false; + atomic_store_b(&shard->use_hpa, false, ATOMIC_RELAXED); + + atomic_store_zu(&shard->nactive, 0, ATOMIC_RELAXED); + + shard->stats_mtx = stats_mtx; + shard->stats = stats; + memset(shard->stats, 0, sizeof(*shard->stats)); + + shard->central = central; + shard->emap = emap; + shard->base = base; + + return false; +} + +bool +pa_shard_enable_hpa(tsdn_t *tsdn, pa_shard_t *shard, + const hpa_shard_opts_t *hpa_opts, const sec_opts_t *hpa_sec_opts) { + if (hpa_shard_init(&shard->hpa_shard, &shard->central->hpa, shard->emap, + shard->base, &shard->edata_cache, shard->ind, hpa_opts)) { + return true; + } + if (sec_init(tsdn, &shard->hpa_sec, shard->base, &shard->hpa_shard.pai, + hpa_sec_opts)) { + return true; + } + shard->ever_used_hpa = true; + atomic_store_b(&shard->use_hpa, true, ATOMIC_RELAXED); + + return false; +} + +void +pa_shard_disable_hpa(tsdn_t *tsdn, pa_shard_t *shard) { + atomic_store_b(&shard->use_hpa, false, ATOMIC_RELAXED); + if (shard->ever_used_hpa) { + sec_disable(tsdn, &shard->hpa_sec); + hpa_shard_disable(tsdn, &shard->hpa_shard); + } +} + +void +pa_shard_reset(tsdn_t *tsdn, pa_shard_t *shard) { + atomic_store_zu(&shard->nactive, 0, ATOMIC_RELAXED); + if (shard->ever_used_hpa) { + sec_flush(tsdn, &shard->hpa_sec); + } +} + +static bool +pa_shard_uses_hpa(pa_shard_t *shard) { + return atomic_load_b(&shard->use_hpa, ATOMIC_RELAXED); +} + +void +pa_shard_destroy(tsdn_t *tsdn, pa_shard_t *shard) { + pac_destroy(tsdn, &shard->pac); + if (shard->ever_used_hpa) { + sec_flush(tsdn, &shard->hpa_sec); + hpa_shard_disable(tsdn, &shard->hpa_shard); + } +} + +static pai_t * +pa_get_pai(pa_shard_t *shard, edata_t *edata) { + return (edata_pai_get(edata) == EXTENT_PAI_PAC + ? &shard->pac.pai : &shard->hpa_sec.pai); +} + +edata_t * +pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size, size_t alignment, + bool slab, szind_t szind, bool zero, bool guarded, + bool *deferred_work_generated) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + assert(!guarded || alignment <= PAGE); + + edata_t *edata = NULL; + if (!guarded && pa_shard_uses_hpa(shard)) { + edata = pai_alloc(tsdn, &shard->hpa_sec.pai, size, alignment, + zero, /* guarded */ false, slab, deferred_work_generated); + } + /* + * Fall back to the PAC if the HPA is off or couldn't serve the given + * allocation request. + */ + if (edata == NULL) { + edata = pai_alloc(tsdn, &shard->pac.pai, size, alignment, zero, + guarded, slab, deferred_work_generated); + } + if (edata != NULL) { + assert(edata_size_get(edata) == size); + pa_nactive_add(shard, size >> LG_PAGE); + emap_remap(tsdn, shard->emap, edata, szind, slab); + edata_szind_set(edata, szind); + edata_slab_set(edata, slab); + if (slab && (size > 2 * PAGE)) { + emap_register_interior(tsdn, shard->emap, edata, szind); + } + assert(edata_arena_ind_get(edata) == shard->ind); + } + return edata; +} + +bool +pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size, + size_t new_size, szind_t szind, bool zero, bool *deferred_work_generated) { + assert(new_size > old_size); + assert(edata_size_get(edata) == old_size); + assert((new_size & PAGE_MASK) == 0); + if (edata_guarded_get(edata)) { + return true; + } + size_t expand_amount = new_size - old_size; + + pai_t *pai = pa_get_pai(shard, edata); + + bool error = pai_expand(tsdn, pai, edata, old_size, new_size, zero, + deferred_work_generated); + if (error) { + return true; + } + + pa_nactive_add(shard, expand_amount >> LG_PAGE); + edata_szind_set(edata, szind); + emap_remap(tsdn, shard->emap, edata, szind, /* slab */ false); + return false; +} + +bool +pa_shrink(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size, + size_t new_size, szind_t szind, bool *deferred_work_generated) { + assert(new_size < old_size); + assert(edata_size_get(edata) == old_size); + assert((new_size & PAGE_MASK) == 0); + if (edata_guarded_get(edata)) { + return true; + } + size_t shrink_amount = old_size - new_size; + + pai_t *pai = pa_get_pai(shard, edata); + bool error = pai_shrink(tsdn, pai, edata, old_size, new_size, + deferred_work_generated); + if (error) { + return true; + } + pa_nactive_sub(shard, shrink_amount >> LG_PAGE); + + edata_szind_set(edata, szind); + emap_remap(tsdn, shard->emap, edata, szind, /* slab */ false); + return false; +} + +void +pa_dalloc(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, + bool *deferred_work_generated) { + emap_remap(tsdn, shard->emap, edata, SC_NSIZES, /* slab */ false); + if (edata_slab_get(edata)) { + emap_deregister_interior(tsdn, shard->emap, edata); + /* + * The slab state of the extent isn't cleared. It may be used + * by the pai implementation, e.g. to make caching decisions. + */ + } + edata_addr_set(edata, edata_base_get(edata)); + edata_szind_set(edata, SC_NSIZES); + pa_nactive_sub(shard, edata_size_get(edata) >> LG_PAGE); + pai_t *pai = pa_get_pai(shard, edata); + pai_dalloc(tsdn, pai, edata, deferred_work_generated); +} + +bool +pa_shard_retain_grow_limit_get_set(tsdn_t *tsdn, pa_shard_t *shard, + size_t *old_limit, size_t *new_limit) { + return pac_retain_grow_limit_get_set(tsdn, &shard->pac, old_limit, + new_limit); +} + +bool +pa_decay_ms_set(tsdn_t *tsdn, pa_shard_t *shard, extent_state_t state, + ssize_t decay_ms, pac_purge_eagerness_t eagerness) { + return pac_decay_ms_set(tsdn, &shard->pac, state, decay_ms, eagerness); +} + +ssize_t +pa_decay_ms_get(pa_shard_t *shard, extent_state_t state) { + return pac_decay_ms_get(&shard->pac, state); +} + +void +pa_shard_set_deferral_allowed(tsdn_t *tsdn, pa_shard_t *shard, + bool deferral_allowed) { + if (pa_shard_uses_hpa(shard)) { + hpa_shard_set_deferral_allowed(tsdn, &shard->hpa_shard, + deferral_allowed); + } +} + +void +pa_shard_do_deferred_work(tsdn_t *tsdn, pa_shard_t *shard) { + if (pa_shard_uses_hpa(shard)) { + hpa_shard_do_deferred_work(tsdn, &shard->hpa_shard); + } +} + +/* + * Get time until next deferred work ought to happen. If there are multiple + * things that have been deferred, this function calculates the time until + * the soonest of those things. + */ +uint64_t +pa_shard_time_until_deferred_work(tsdn_t *tsdn, pa_shard_t *shard) { + uint64_t time = pai_time_until_deferred_work(tsdn, &shard->pac.pai); + if (time == BACKGROUND_THREAD_DEFERRED_MIN) { + return time; + } + + if (pa_shard_uses_hpa(shard)) { + uint64_t hpa = + pai_time_until_deferred_work(tsdn, &shard->hpa_shard.pai); + if (hpa < time) { + time = hpa; + } + } + return time; +} diff --git a/contrib/jemalloc/src/pa_extra.c b/contrib/jemalloc/src/pa_extra.c new file mode 100644 index 00000000000000..0f488be69c5dde --- /dev/null +++ b/contrib/jemalloc/src/pa_extra.c @@ -0,0 +1,191 @@ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +/* + * This file is logically part of the PA module. While pa.c contains the core + * allocator functionality, this file contains boring integration functionality; + * things like the pre- and post- fork handlers, and stats merging for CTL + * refreshes. + */ + +void +pa_shard_prefork0(tsdn_t *tsdn, pa_shard_t *shard) { + malloc_mutex_prefork(tsdn, &shard->pac.decay_dirty.mtx); + malloc_mutex_prefork(tsdn, &shard->pac.decay_muzzy.mtx); +} + +void +pa_shard_prefork2(tsdn_t *tsdn, pa_shard_t *shard) { + if (shard->ever_used_hpa) { + sec_prefork2(tsdn, &shard->hpa_sec); + } +} + +void +pa_shard_prefork3(tsdn_t *tsdn, pa_shard_t *shard) { + malloc_mutex_prefork(tsdn, &shard->pac.grow_mtx); + if (shard->ever_used_hpa) { + hpa_shard_prefork3(tsdn, &shard->hpa_shard); + } +} + +void +pa_shard_prefork4(tsdn_t *tsdn, pa_shard_t *shard) { + ecache_prefork(tsdn, &shard->pac.ecache_dirty); + ecache_prefork(tsdn, &shard->pac.ecache_muzzy); + ecache_prefork(tsdn, &shard->pac.ecache_retained); + if (shard->ever_used_hpa) { + hpa_shard_prefork4(tsdn, &shard->hpa_shard); + } +} + +void +pa_shard_prefork5(tsdn_t *tsdn, pa_shard_t *shard) { + edata_cache_prefork(tsdn, &shard->edata_cache); +} + +void +pa_shard_postfork_parent(tsdn_t *tsdn, pa_shard_t *shard) { + edata_cache_postfork_parent(tsdn, &shard->edata_cache); + ecache_postfork_parent(tsdn, &shard->pac.ecache_dirty); + ecache_postfork_parent(tsdn, &shard->pac.ecache_muzzy); + ecache_postfork_parent(tsdn, &shard->pac.ecache_retained); + malloc_mutex_postfork_parent(tsdn, &shard->pac.grow_mtx); + malloc_mutex_postfork_parent(tsdn, &shard->pac.decay_dirty.mtx); + malloc_mutex_postfork_parent(tsdn, &shard->pac.decay_muzzy.mtx); + if (shard->ever_used_hpa) { + sec_postfork_parent(tsdn, &shard->hpa_sec); + hpa_shard_postfork_parent(tsdn, &shard->hpa_shard); + } +} + +void +pa_shard_postfork_child(tsdn_t *tsdn, pa_shard_t *shard) { + edata_cache_postfork_child(tsdn, &shard->edata_cache); + ecache_postfork_child(tsdn, &shard->pac.ecache_dirty); + ecache_postfork_child(tsdn, &shard->pac.ecache_muzzy); + ecache_postfork_child(tsdn, &shard->pac.ecache_retained); + malloc_mutex_postfork_child(tsdn, &shard->pac.grow_mtx); + malloc_mutex_postfork_child(tsdn, &shard->pac.decay_dirty.mtx); + malloc_mutex_postfork_child(tsdn, &shard->pac.decay_muzzy.mtx); + if (shard->ever_used_hpa) { + sec_postfork_child(tsdn, &shard->hpa_sec); + hpa_shard_postfork_child(tsdn, &shard->hpa_shard); + } +} + +void +pa_shard_basic_stats_merge(pa_shard_t *shard, size_t *nactive, size_t *ndirty, + size_t *nmuzzy) { + *nactive += atomic_load_zu(&shard->nactive, ATOMIC_RELAXED); + *ndirty += ecache_npages_get(&shard->pac.ecache_dirty); + *nmuzzy += ecache_npages_get(&shard->pac.ecache_muzzy); +} + +void +pa_shard_stats_merge(tsdn_t *tsdn, pa_shard_t *shard, + pa_shard_stats_t *pa_shard_stats_out, pac_estats_t *estats_out, + hpa_shard_stats_t *hpa_stats_out, sec_stats_t *sec_stats_out, + size_t *resident) { + cassert(config_stats); + + pa_shard_stats_out->pac_stats.retained += + ecache_npages_get(&shard->pac.ecache_retained) << LG_PAGE; + pa_shard_stats_out->edata_avail += atomic_load_zu( + &shard->edata_cache.count, ATOMIC_RELAXED); + + size_t resident_pgs = 0; + resident_pgs += atomic_load_zu(&shard->nactive, ATOMIC_RELAXED); + resident_pgs += ecache_npages_get(&shard->pac.ecache_dirty); + *resident += (resident_pgs << LG_PAGE); + + /* Dirty decay stats */ + locked_inc_u64_unsynchronized( + &pa_shard_stats_out->pac_stats.decay_dirty.npurge, + locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx), + &shard->pac.stats->decay_dirty.npurge)); + locked_inc_u64_unsynchronized( + &pa_shard_stats_out->pac_stats.decay_dirty.nmadvise, + locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx), + &shard->pac.stats->decay_dirty.nmadvise)); + locked_inc_u64_unsynchronized( + &pa_shard_stats_out->pac_stats.decay_dirty.purged, + locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx), + &shard->pac.stats->decay_dirty.purged)); + + /* Muzzy decay stats */ + locked_inc_u64_unsynchronized( + &pa_shard_stats_out->pac_stats.decay_muzzy.npurge, + locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx), + &shard->pac.stats->decay_muzzy.npurge)); + locked_inc_u64_unsynchronized( + &pa_shard_stats_out->pac_stats.decay_muzzy.nmadvise, + locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx), + &shard->pac.stats->decay_muzzy.nmadvise)); + locked_inc_u64_unsynchronized( + &pa_shard_stats_out->pac_stats.decay_muzzy.purged, + locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx), + &shard->pac.stats->decay_muzzy.purged)); + + atomic_load_add_store_zu(&pa_shard_stats_out->pac_stats.abandoned_vm, + atomic_load_zu(&shard->pac.stats->abandoned_vm, ATOMIC_RELAXED)); + + for (pszind_t i = 0; i < SC_NPSIZES; i++) { + size_t dirty, muzzy, retained, dirty_bytes, muzzy_bytes, + retained_bytes; + dirty = ecache_nextents_get(&shard->pac.ecache_dirty, i); + muzzy = ecache_nextents_get(&shard->pac.ecache_muzzy, i); + retained = ecache_nextents_get(&shard->pac.ecache_retained, i); + dirty_bytes = ecache_nbytes_get(&shard->pac.ecache_dirty, i); + muzzy_bytes = ecache_nbytes_get(&shard->pac.ecache_muzzy, i); + retained_bytes = ecache_nbytes_get(&shard->pac.ecache_retained, + i); + + estats_out[i].ndirty = dirty; + estats_out[i].nmuzzy = muzzy; + estats_out[i].nretained = retained; + estats_out[i].dirty_bytes = dirty_bytes; + estats_out[i].muzzy_bytes = muzzy_bytes; + estats_out[i].retained_bytes = retained_bytes; + } + + if (shard->ever_used_hpa) { + hpa_shard_stats_merge(tsdn, &shard->hpa_shard, hpa_stats_out); + sec_stats_merge(tsdn, &shard->hpa_sec, sec_stats_out); + } +} + +static void +pa_shard_mtx_stats_read_single(tsdn_t *tsdn, mutex_prof_data_t *mutex_prof_data, + malloc_mutex_t *mtx, int ind) { + malloc_mutex_lock(tsdn, mtx); + malloc_mutex_prof_read(tsdn, &mutex_prof_data[ind], mtx); + malloc_mutex_unlock(tsdn, mtx); +} + +void +pa_shard_mtx_stats_read(tsdn_t *tsdn, pa_shard_t *shard, + mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes]) { + pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data, + &shard->edata_cache.mtx, arena_prof_mutex_extent_avail); + pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data, + &shard->pac.ecache_dirty.mtx, arena_prof_mutex_extents_dirty); + pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data, + &shard->pac.ecache_muzzy.mtx, arena_prof_mutex_extents_muzzy); + pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data, + &shard->pac.ecache_retained.mtx, arena_prof_mutex_extents_retained); + pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data, + &shard->pac.decay_dirty.mtx, arena_prof_mutex_decay_dirty); + pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data, + &shard->pac.decay_muzzy.mtx, arena_prof_mutex_decay_muzzy); + + if (shard->ever_used_hpa) { + pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data, + &shard->hpa_shard.mtx, arena_prof_mutex_hpa_shard); + pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data, + &shard->hpa_shard.grow_mtx, + arena_prof_mutex_hpa_shard_grow); + sec_mutex_stats_read(tsdn, &shard->hpa_sec, + &mutex_prof_data[arena_prof_mutex_hpa_sec]); + } +} diff --git a/contrib/jemalloc/src/pac.c b/contrib/jemalloc/src/pac.c new file mode 100644 index 00000000000000..53e3d823758e8b --- /dev/null +++ b/contrib/jemalloc/src/pac.c @@ -0,0 +1,587 @@ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/pac.h" +#include "jemalloc/internal/san.h" + +static edata_t *pac_alloc_impl(tsdn_t *tsdn, pai_t *self, size_t size, + size_t alignment, bool zero, bool guarded, bool frequent_reuse, + bool *deferred_work_generated); +static bool pac_expand_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata, + size_t old_size, size_t new_size, bool zero, bool *deferred_work_generated); +static bool pac_shrink_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata, + size_t old_size, size_t new_size, bool *deferred_work_generated); +static void pac_dalloc_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata, + bool *deferred_work_generated); +static uint64_t pac_time_until_deferred_work(tsdn_t *tsdn, pai_t *self); + +static inline void +pac_decay_data_get(pac_t *pac, extent_state_t state, + decay_t **r_decay, pac_decay_stats_t **r_decay_stats, ecache_t **r_ecache) { + switch(state) { + case extent_state_dirty: + *r_decay = &pac->decay_dirty; + *r_decay_stats = &pac->stats->decay_dirty; + *r_ecache = &pac->ecache_dirty; + return; + case extent_state_muzzy: + *r_decay = &pac->decay_muzzy; + *r_decay_stats = &pac->stats->decay_muzzy; + *r_ecache = &pac->ecache_muzzy; + return; + default: + unreachable(); + } +} + +bool +pac_init(tsdn_t *tsdn, pac_t *pac, base_t *base, emap_t *emap, + edata_cache_t *edata_cache, nstime_t *cur_time, + size_t pac_oversize_threshold, ssize_t dirty_decay_ms, + ssize_t muzzy_decay_ms, pac_stats_t *pac_stats, malloc_mutex_t *stats_mtx) { + unsigned ind = base_ind_get(base); + /* + * Delay coalescing for dirty extents despite the disruptive effect on + * memory layout for best-fit extent allocation, since cached extents + * are likely to be reused soon after deallocation, and the cost of + * merging/splitting extents is non-trivial. + */ + if (ecache_init(tsdn, &pac->ecache_dirty, extent_state_dirty, ind, + /* delay_coalesce */ true)) { + return true; + } + /* + * Coalesce muzzy extents immediately, because operations on them are in + * the critical path much less often than for dirty extents. + */ + if (ecache_init(tsdn, &pac->ecache_muzzy, extent_state_muzzy, ind, + /* delay_coalesce */ false)) { + return true; + } + /* + * Coalesce retained extents immediately, in part because they will + * never be evicted (and therefore there's no opportunity for delayed + * coalescing), but also because operations on retained extents are not + * in the critical path. + */ + if (ecache_init(tsdn, &pac->ecache_retained, extent_state_retained, + ind, /* delay_coalesce */ false)) { + return true; + } + exp_grow_init(&pac->exp_grow); + if (malloc_mutex_init(&pac->grow_mtx, "extent_grow", + WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) { + return true; + } + atomic_store_zu(&pac->oversize_threshold, pac_oversize_threshold, + ATOMIC_RELAXED); + if (decay_init(&pac->decay_dirty, cur_time, dirty_decay_ms)) { + return true; + } + if (decay_init(&pac->decay_muzzy, cur_time, muzzy_decay_ms)) { + return true; + } + if (san_bump_alloc_init(&pac->sba)) { + return true; + } + + pac->base = base; + pac->emap = emap; + pac->edata_cache = edata_cache; + pac->stats = pac_stats; + pac->stats_mtx = stats_mtx; + atomic_store_zu(&pac->extent_sn_next, 0, ATOMIC_RELAXED); + + pac->pai.alloc = &pac_alloc_impl; + pac->pai.alloc_batch = &pai_alloc_batch_default; + pac->pai.expand = &pac_expand_impl; + pac->pai.shrink = &pac_shrink_impl; + pac->pai.dalloc = &pac_dalloc_impl; + pac->pai.dalloc_batch = &pai_dalloc_batch_default; + pac->pai.time_until_deferred_work = &pac_time_until_deferred_work; + + return false; +} + +static inline bool +pac_may_have_muzzy(pac_t *pac) { + return pac_decay_ms_get(pac, extent_state_muzzy) != 0; +} + +static edata_t * +pac_alloc_real(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, size_t size, + size_t alignment, bool zero, bool guarded) { + assert(!guarded || alignment <= PAGE); + + edata_t *edata = ecache_alloc(tsdn, pac, ehooks, &pac->ecache_dirty, + NULL, size, alignment, zero, guarded); + + if (edata == NULL && pac_may_have_muzzy(pac)) { + edata = ecache_alloc(tsdn, pac, ehooks, &pac->ecache_muzzy, + NULL, size, alignment, zero, guarded); + } + if (edata == NULL) { + edata = ecache_alloc_grow(tsdn, pac, ehooks, + &pac->ecache_retained, NULL, size, alignment, zero, + guarded); + if (config_stats && edata != NULL) { + atomic_fetch_add_zu(&pac->stats->pac_mapped, size, + ATOMIC_RELAXED); + } + } + + return edata; +} + +static edata_t * +pac_alloc_new_guarded(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, size_t size, + size_t alignment, bool zero, bool frequent_reuse) { + assert(alignment <= PAGE); + + edata_t *edata; + if (san_bump_enabled() && frequent_reuse) { + edata = san_bump_alloc(tsdn, &pac->sba, pac, ehooks, size, + zero); + } else { + size_t size_with_guards = san_two_side_guarded_sz(size); + /* Alloc a non-guarded extent first.*/ + edata = pac_alloc_real(tsdn, pac, ehooks, size_with_guards, + /* alignment */ PAGE, zero, /* guarded */ false); + if (edata != NULL) { + /* Add guards around it. */ + assert(edata_size_get(edata) == size_with_guards); + san_guard_pages_two_sided(tsdn, ehooks, edata, + pac->emap, true); + } + } + assert(edata == NULL || (edata_guarded_get(edata) && + edata_size_get(edata) == size)); + + return edata; +} + +static edata_t * +pac_alloc_impl(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, + bool zero, bool guarded, bool frequent_reuse, + bool *deferred_work_generated) { + pac_t *pac = (pac_t *)self; + ehooks_t *ehooks = pac_ehooks_get(pac); + + edata_t *edata = NULL; + /* + * The condition is an optimization - not frequently reused guarded + * allocations are never put in the ecache. pac_alloc_real also + * doesn't grow retained for guarded allocations. So pac_alloc_real + * for such allocations would always return NULL. + * */ + if (!guarded || frequent_reuse) { + edata = pac_alloc_real(tsdn, pac, ehooks, size, alignment, + zero, guarded); + } + if (edata == NULL && guarded) { + /* No cached guarded extents; creating a new one. */ + edata = pac_alloc_new_guarded(tsdn, pac, ehooks, size, + alignment, zero, frequent_reuse); + } + + return edata; +} + +static bool +pac_expand_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size, + size_t new_size, bool zero, bool *deferred_work_generated) { + pac_t *pac = (pac_t *)self; + ehooks_t *ehooks = pac_ehooks_get(pac); + + size_t mapped_add = 0; + size_t expand_amount = new_size - old_size; + + if (ehooks_merge_will_fail(ehooks)) { + return true; + } + edata_t *trail = ecache_alloc(tsdn, pac, ehooks, &pac->ecache_dirty, + edata, expand_amount, PAGE, zero, /* guarded*/ false); + if (trail == NULL) { + trail = ecache_alloc(tsdn, pac, ehooks, &pac->ecache_muzzy, + edata, expand_amount, PAGE, zero, /* guarded*/ false); + } + if (trail == NULL) { + trail = ecache_alloc_grow(tsdn, pac, ehooks, + &pac->ecache_retained, edata, expand_amount, PAGE, zero, + /* guarded */ false); + mapped_add = expand_amount; + } + if (trail == NULL) { + return true; + } + if (extent_merge_wrapper(tsdn, pac, ehooks, edata, trail)) { + extent_dalloc_wrapper(tsdn, pac, ehooks, trail); + return true; + } + if (config_stats && mapped_add > 0) { + atomic_fetch_add_zu(&pac->stats->pac_mapped, mapped_add, + ATOMIC_RELAXED); + } + return false; +} + +static bool +pac_shrink_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size, + size_t new_size, bool *deferred_work_generated) { + pac_t *pac = (pac_t *)self; + ehooks_t *ehooks = pac_ehooks_get(pac); + + size_t shrink_amount = old_size - new_size; + + if (ehooks_split_will_fail(ehooks)) { + return true; + } + + edata_t *trail = extent_split_wrapper(tsdn, pac, ehooks, edata, + new_size, shrink_amount, /* holding_core_locks */ false); + if (trail == NULL) { + return true; + } + ecache_dalloc(tsdn, pac, ehooks, &pac->ecache_dirty, trail); + *deferred_work_generated = true; + return false; +} + +static void +pac_dalloc_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata, + bool *deferred_work_generated) { + pac_t *pac = (pac_t *)self; + ehooks_t *ehooks = pac_ehooks_get(pac); + + if (edata_guarded_get(edata)) { + /* + * Because cached guarded extents do exact fit only, large + * guarded extents are restored on dalloc eagerly (otherwise + * they will not be reused efficiently). Slab sizes have a + * limited number of size classes, and tend to cycle faster. + * + * In the case where coalesce is restrained (VirtualFree on + * Windows), guarded extents are also not cached -- otherwise + * during arena destroy / reset, the retained extents would not + * be whole regions (i.e. they are split between regular and + * guarded). + */ + if (!edata_slab_get(edata) || !maps_coalesce) { + assert(edata_size_get(edata) >= SC_LARGE_MINCLASS || + !maps_coalesce); + san_unguard_pages_two_sided(tsdn, ehooks, edata, + pac->emap); + } + } + + ecache_dalloc(tsdn, pac, ehooks, &pac->ecache_dirty, edata); + /* Purging of deallocated pages is deferred */ + *deferred_work_generated = true; +} + +static inline uint64_t +pac_ns_until_purge(tsdn_t *tsdn, decay_t *decay, size_t npages) { + if (malloc_mutex_trylock(tsdn, &decay->mtx)) { + /* Use minimal interval if decay is contended. */ + return BACKGROUND_THREAD_DEFERRED_MIN; + } + uint64_t result = decay_ns_until_purge(decay, npages, + ARENA_DEFERRED_PURGE_NPAGES_THRESHOLD); + + malloc_mutex_unlock(tsdn, &decay->mtx); + return result; +} + +static uint64_t +pac_time_until_deferred_work(tsdn_t *tsdn, pai_t *self) { + uint64_t time; + pac_t *pac = (pac_t *)self; + + time = pac_ns_until_purge(tsdn, + &pac->decay_dirty, + ecache_npages_get(&pac->ecache_dirty)); + if (time == BACKGROUND_THREAD_DEFERRED_MIN) { + return time; + } + + uint64_t muzzy = pac_ns_until_purge(tsdn, + &pac->decay_muzzy, + ecache_npages_get(&pac->ecache_muzzy)); + if (muzzy < time) { + time = muzzy; + } + return time; +} + +bool +pac_retain_grow_limit_get_set(tsdn_t *tsdn, pac_t *pac, size_t *old_limit, + size_t *new_limit) { + pszind_t new_ind JEMALLOC_CC_SILENCE_INIT(0); + if (new_limit != NULL) { + size_t limit = *new_limit; + /* Grow no more than the new limit. */ + if ((new_ind = sz_psz2ind(limit + 1) - 1) >= SC_NPSIZES) { + return true; + } + } + + malloc_mutex_lock(tsdn, &pac->grow_mtx); + if (old_limit != NULL) { + *old_limit = sz_pind2sz(pac->exp_grow.limit); + } + if (new_limit != NULL) { + pac->exp_grow.limit = new_ind; + } + malloc_mutex_unlock(tsdn, &pac->grow_mtx); + + return false; +} + +static size_t +pac_stash_decayed(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache, + size_t npages_limit, size_t npages_decay_max, + edata_list_inactive_t *result) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + ehooks_t *ehooks = pac_ehooks_get(pac); + + /* Stash extents according to npages_limit. */ + size_t nstashed = 0; + while (nstashed < npages_decay_max) { + edata_t *edata = ecache_evict(tsdn, pac, ehooks, ecache, + npages_limit); + if (edata == NULL) { + break; + } + edata_list_inactive_append(result, edata); + nstashed += edata_size_get(edata) >> LG_PAGE; + } + return nstashed; +} + +static size_t +pac_decay_stashed(tsdn_t *tsdn, pac_t *pac, decay_t *decay, + pac_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay, + edata_list_inactive_t *decay_extents) { + bool err; + + size_t nmadvise = 0; + size_t nunmapped = 0; + size_t npurged = 0; + + ehooks_t *ehooks = pac_ehooks_get(pac); + + bool try_muzzy = !fully_decay + && pac_decay_ms_get(pac, extent_state_muzzy) != 0; + + for (edata_t *edata = edata_list_inactive_first(decay_extents); edata != + NULL; edata = edata_list_inactive_first(decay_extents)) { + edata_list_inactive_remove(decay_extents, edata); + + size_t size = edata_size_get(edata); + size_t npages = size >> LG_PAGE; + + nmadvise++; + npurged += npages; + + switch (ecache->state) { + case extent_state_active: + not_reached(); + case extent_state_dirty: + if (try_muzzy) { + err = extent_purge_lazy_wrapper(tsdn, ehooks, + edata, /* offset */ 0, size); + if (!err) { + ecache_dalloc(tsdn, pac, ehooks, + &pac->ecache_muzzy, edata); + break; + } + } + JEMALLOC_FALLTHROUGH; + case extent_state_muzzy: + extent_dalloc_wrapper(tsdn, pac, ehooks, edata); + nunmapped += npages; + break; + case extent_state_retained: + default: + not_reached(); + } + } + + if (config_stats) { + LOCKEDINT_MTX_LOCK(tsdn, *pac->stats_mtx); + locked_inc_u64(tsdn, LOCKEDINT_MTX(*pac->stats_mtx), + &decay_stats->npurge, 1); + locked_inc_u64(tsdn, LOCKEDINT_MTX(*pac->stats_mtx), + &decay_stats->nmadvise, nmadvise); + locked_inc_u64(tsdn, LOCKEDINT_MTX(*pac->stats_mtx), + &decay_stats->purged, npurged); + LOCKEDINT_MTX_UNLOCK(tsdn, *pac->stats_mtx); + atomic_fetch_sub_zu(&pac->stats->pac_mapped, + nunmapped << LG_PAGE, ATOMIC_RELAXED); + } + + return npurged; +} + +/* + * npages_limit: Decay at most npages_decay_max pages without violating the + * invariant: (ecache_npages_get(ecache) >= npages_limit). We need an upper + * bound on number of pages in order to prevent unbounded growth (namely in + * stashed), otherwise unbounded new pages could be added to extents during the + * current decay run, so that the purging thread never finishes. + */ +static void +pac_decay_to_limit(tsdn_t *tsdn, pac_t *pac, decay_t *decay, + pac_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay, + size_t npages_limit, size_t npages_decay_max) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 1); + + if (decay->purging || npages_decay_max == 0) { + return; + } + decay->purging = true; + malloc_mutex_unlock(tsdn, &decay->mtx); + + edata_list_inactive_t decay_extents; + edata_list_inactive_init(&decay_extents); + size_t npurge = pac_stash_decayed(tsdn, pac, ecache, npages_limit, + npages_decay_max, &decay_extents); + if (npurge != 0) { + size_t npurged = pac_decay_stashed(tsdn, pac, decay, + decay_stats, ecache, fully_decay, &decay_extents); + assert(npurged == npurge); + } + + malloc_mutex_lock(tsdn, &decay->mtx); + decay->purging = false; +} + +void +pac_decay_all(tsdn_t *tsdn, pac_t *pac, decay_t *decay, + pac_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay) { + malloc_mutex_assert_owner(tsdn, &decay->mtx); + pac_decay_to_limit(tsdn, pac, decay, decay_stats, ecache, fully_decay, + /* npages_limit */ 0, ecache_npages_get(ecache)); +} + +static void +pac_decay_try_purge(tsdn_t *tsdn, pac_t *pac, decay_t *decay, + pac_decay_stats_t *decay_stats, ecache_t *ecache, + size_t current_npages, size_t npages_limit) { + if (current_npages > npages_limit) { + pac_decay_to_limit(tsdn, pac, decay, decay_stats, ecache, + /* fully_decay */ false, npages_limit, + current_npages - npages_limit); + } +} + +bool +pac_maybe_decay_purge(tsdn_t *tsdn, pac_t *pac, decay_t *decay, + pac_decay_stats_t *decay_stats, ecache_t *ecache, + pac_purge_eagerness_t eagerness) { + malloc_mutex_assert_owner(tsdn, &decay->mtx); + + /* Purge all or nothing if the option is disabled. */ + ssize_t decay_ms = decay_ms_read(decay); + if (decay_ms <= 0) { + if (decay_ms == 0) { + pac_decay_to_limit(tsdn, pac, decay, decay_stats, + ecache, /* fully_decay */ false, + /* npages_limit */ 0, ecache_npages_get(ecache)); + } + return false; + } + + /* + * If the deadline has been reached, advance to the current epoch and + * purge to the new limit if necessary. Note that dirty pages created + * during the current epoch are not subject to purge until a future + * epoch, so as a result purging only happens during epoch advances, or + * being triggered by background threads (scheduled event). + */ + nstime_t time; + nstime_init_update(&time); + size_t npages_current = ecache_npages_get(ecache); + bool epoch_advanced = decay_maybe_advance_epoch(decay, &time, + npages_current); + if (eagerness == PAC_PURGE_ALWAYS + || (epoch_advanced && eagerness == PAC_PURGE_ON_EPOCH_ADVANCE)) { + size_t npages_limit = decay_npages_limit_get(decay); + pac_decay_try_purge(tsdn, pac, decay, decay_stats, ecache, + npages_current, npages_limit); + } + + return epoch_advanced; +} + +bool +pac_decay_ms_set(tsdn_t *tsdn, pac_t *pac, extent_state_t state, + ssize_t decay_ms, pac_purge_eagerness_t eagerness) { + decay_t *decay; + pac_decay_stats_t *decay_stats; + ecache_t *ecache; + pac_decay_data_get(pac, state, &decay, &decay_stats, &ecache); + + if (!decay_ms_valid(decay_ms)) { + return true; + } + + malloc_mutex_lock(tsdn, &decay->mtx); + /* + * Restart decay backlog from scratch, which may cause many dirty pages + * to be immediately purged. It would conceptually be possible to map + * the old backlog onto the new backlog, but there is no justification + * for such complexity since decay_ms changes are intended to be + * infrequent, either between the {-1, 0, >0} states, or a one-time + * arbitrary change during initial arena configuration. + */ + nstime_t cur_time; + nstime_init_update(&cur_time); + decay_reinit(decay, &cur_time, decay_ms); + pac_maybe_decay_purge(tsdn, pac, decay, decay_stats, ecache, eagerness); + malloc_mutex_unlock(tsdn, &decay->mtx); + + return false; +} + +ssize_t +pac_decay_ms_get(pac_t *pac, extent_state_t state) { + decay_t *decay; + pac_decay_stats_t *decay_stats; + ecache_t *ecache; + pac_decay_data_get(pac, state, &decay, &decay_stats, &ecache); + return decay_ms_read(decay); +} + +void +pac_reset(tsdn_t *tsdn, pac_t *pac) { + /* + * No-op for now; purging is still done at the arena-level. It should + * get moved in here, though. + */ + (void)tsdn; + (void)pac; +} + +void +pac_destroy(tsdn_t *tsdn, pac_t *pac) { + assert(ecache_npages_get(&pac->ecache_dirty) == 0); + assert(ecache_npages_get(&pac->ecache_muzzy) == 0); + /* + * Iterate over the retained extents and destroy them. This gives the + * extent allocator underlying the extent hooks an opportunity to unmap + * all retained memory without having to keep its own metadata + * structures. In practice, virtual memory for dss-allocated extents is + * leaked here, so best practice is to avoid dss for arenas to be + * destroyed, or provide custom extent hooks that track retained + * dss-based extents for later reuse. + */ + ehooks_t *ehooks = pac_ehooks_get(pac); + edata_t *edata; + while ((edata = ecache_evict(tsdn, pac, ehooks, + &pac->ecache_retained, 0)) != NULL) { + extent_destroy_wrapper(tsdn, pac, ehooks, edata); + } +} diff --git a/contrib/jemalloc/src/pages.c b/contrib/jemalloc/src/pages.c index 6405bb528984d5..9ccff11328ebd3 100644 --- a/contrib/jemalloc/src/pages.c +++ b/contrib/jemalloc/src/pages.c @@ -1,4 +1,3 @@ -#define JEMALLOC_PAGES_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/pages.h" @@ -16,6 +15,14 @@ #include #endif #endif +#ifdef __NetBSD__ +#include /* ilog2 */ +#endif +#ifdef JEMALLOC_HAVE_VM_MAKE_TAG +#define PAGES_FD_TAG VM_MAKE_TAG(101U) +#else +#define PAGES_FD_TAG -1 +#endif /******************************************************************************/ /* Data. */ @@ -42,6 +49,57 @@ thp_mode_t init_system_thp_mode; /* Runtime support for lazy purge. Irrelevant when !pages_can_purge_lazy. */ static bool pages_can_purge_lazy_runtime = true; +#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS +static int madvise_dont_need_zeros_is_faulty = -1; +/** + * Check that MADV_DONTNEED will actually zero pages on subsequent access. + * + * Since qemu does not support this, yet [1], and you can get very tricky + * assert if you will run program with jemalloc in use under qemu: + * + * : ../contrib/jemalloc/src/extent.c:1195: Failed assertion: "p[i] == 0" + * + * [1]: https://patchwork.kernel.org/patch/10576637/ + */ +static int madvise_MADV_DONTNEED_zeroes_pages() +{ + int works = -1; + size_t size = PAGE; + + void * addr = mmap(NULL, size, PROT_READ|PROT_WRITE, + MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); + + if (addr == MAP_FAILED) { + malloc_write(": Cannot allocate memory for " + "MADV_DONTNEED check\n"); + if (opt_abort) { + abort(); + } + } + + memset(addr, 'A', size); + if (madvise(addr, size, MADV_DONTNEED) == 0) { + works = memchr(addr, 'A', size) == NULL; + } else { + /* + * If madvise() does not support MADV_DONTNEED, then we can + * call it anyway, and use it's return code. + */ + works = 1; + } + + if (munmap(addr, size) != 0) { + malloc_write(": Cannot deallocate memory for " + "MADV_DONTNEED check\n"); + if (opt_abort) { + abort(); + } + } + + return works; +} +#endif + /******************************************************************************/ /* * Function prototypes for static functions that are referenced prior to @@ -76,9 +134,21 @@ os_pages_map(void *addr, size_t size, size_t alignment, bool *commit) { * of existing mappings, and we only want to create new mappings. */ { +#ifdef __NetBSD__ + /* + * On NetBSD PAGE for a platform is defined to the + * maximum page size of all machine architectures + * for that platform, so that we can use the same + * binaries across all machine architectures. + */ + if (alignment > os_page || PAGE > os_page) { + unsigned int a = ilog2(MAX(alignment, PAGE)); + mmap_flags |= MAP_ALIGNED(a); + } +#endif int prot = *commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT; - ret = mmap(addr, size, prot, mmap_flags, -1, 0); + ret = mmap(addr, size, prot, mmap_flags, PAGES_FD_TAG, 0); } assert(ret != NULL); @@ -199,8 +269,8 @@ pages_map(void *addr, size_t size, size_t alignment, bool *commit) { flags |= MAP_FIXED | MAP_EXCL; } else { unsigned alignment_bits = ffs_zu(alignment); - assert(alignment_bits > 1); - flags |= MAP_ALIGNED(alignment_bits - 1); + assert(alignment_bits > 0); + flags |= MAP_ALIGNED(alignment_bits); } void *ret = mmap(addr, size, prot, flags, -1, 0); @@ -248,14 +318,10 @@ pages_unmap(void *addr, size_t size) { } static bool -pages_commit_impl(void *addr, size_t size, bool commit) { +os_pages_commit(void *addr, size_t size, bool commit) { assert(PAGE_ADDR2BASE(addr) == addr); assert(PAGE_CEILING(size) == size); - if (os_overcommits) { - return true; - } - #ifdef _WIN32 return (commit ? (addr != VirtualAlloc(addr, size, MEM_COMMIT, PAGE_READWRITE)) : (!VirtualFree(addr, size, MEM_DECOMMIT))); @@ -263,7 +329,7 @@ pages_commit_impl(void *addr, size_t size, bool commit) { { int prot = commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT; void *result = mmap(addr, size, prot, mmap_flags | MAP_FIXED, - -1, 0); + PAGES_FD_TAG, 0); if (result == MAP_FAILED) { return true; } @@ -280,6 +346,15 @@ pages_commit_impl(void *addr, size_t size, bool commit) { #endif } +static bool +pages_commit_impl(void *addr, size_t size, bool commit) { + if (os_overcommits) { + return true; + } + + return os_pages_commit(addr, size, commit); +} + bool pages_commit(void *addr, size_t size) { return pages_commit_impl(addr, size, true); @@ -290,6 +365,66 @@ pages_decommit(void *addr, size_t size) { return pages_commit_impl(addr, size, false); } +void +pages_mark_guards(void *head, void *tail) { + assert(head != NULL || tail != NULL); + assert(head == NULL || tail == NULL || + (uintptr_t)head < (uintptr_t)tail); +#ifdef JEMALLOC_HAVE_MPROTECT + if (head != NULL) { + mprotect(head, PAGE, PROT_NONE); + } + if (tail != NULL) { + mprotect(tail, PAGE, PROT_NONE); + } +#else + /* Decommit sets to PROT_NONE / MEM_DECOMMIT. */ + if (head != NULL) { + os_pages_commit(head, PAGE, false); + } + if (tail != NULL) { + os_pages_commit(tail, PAGE, false); + } +#endif +} + +void +pages_unmark_guards(void *head, void *tail) { + assert(head != NULL || tail != NULL); + assert(head == NULL || tail == NULL || + (uintptr_t)head < (uintptr_t)tail); +#ifdef JEMALLOC_HAVE_MPROTECT + bool head_and_tail = (head != NULL) && (tail != NULL); + size_t range = head_and_tail ? + (uintptr_t)tail - (uintptr_t)head + PAGE : + SIZE_T_MAX; + /* + * The amount of work that the kernel does in mprotect depends on the + * range argument. SC_LARGE_MINCLASS is an arbitrary threshold chosen + * to prevent kernel from doing too much work that would outweigh the + * savings of performing one less system call. + */ + bool ranged_mprotect = head_and_tail && range <= SC_LARGE_MINCLASS; + if (ranged_mprotect) { + mprotect(head, range, PROT_READ | PROT_WRITE); + } else { + if (head != NULL) { + mprotect(head, PAGE, PROT_READ | PROT_WRITE); + } + if (tail != NULL) { + mprotect(tail, PAGE, PROT_READ | PROT_WRITE); + } + } +#else + if (head != NULL) { + os_pages_commit(head, PAGE, true); + } + if (tail != NULL) { + os_pages_commit(tail, PAGE, true); + } +#endif +} + bool pages_purge_lazy(void *addr, size_t size) { assert(ALIGNMENT_ADDR2BASE(addr, os_page) == addr); @@ -320,6 +455,9 @@ pages_purge_lazy(void *addr, size_t size) { #elif defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \ !defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS) return (madvise(addr, size, MADV_DONTNEED) != 0); +#elif defined(JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED) && \ + !defined(JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED_ZEROS) + return (posix_madvise(addr, size, POSIX_MADV_DONTNEED) != 0); #else not_reached(); #endif @@ -336,7 +474,12 @@ pages_purge_forced(void *addr, size_t size) { #if defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \ defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS) - return (madvise(addr, size, MADV_DONTNEED) != 0); + return (unlikely(madvise_dont_need_zeros_is_faulty) || + madvise(addr, size, MADV_DONTNEED) != 0); +#elif defined(JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED) && \ + defined(JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED_ZEROS) + return (unlikely(madvise_dont_need_zeros_is_faulty) || + posix_madvise(addr, size, POSIX_MADV_DONTNEED) != 0); #elif defined(JEMALLOC_MAPS_COALESCE) /* Try to overlay a new demand-zeroed mapping. */ return pages_commit(addr, size); @@ -351,8 +494,13 @@ pages_huge_impl(void *addr, size_t size, bool aligned) { assert(HUGEPAGE_ADDR2BASE(addr) == addr); assert(HUGEPAGE_CEILING(size) == size); } -#ifdef JEMALLOC_HAVE_MADVISE_HUGE +#if defined(JEMALLOC_HAVE_MADVISE_HUGE) return (madvise(addr, size, MADV_HUGEPAGE) != 0); +#elif defined(JEMALLOC_HAVE_MEMCNTL) + struct memcntl_mha m = {0}; + m.mha_cmd = MHA_MAPSIZE_VA; + m.mha_pagesize = HUGEPAGE; + return (memcntl(addr, size, MC_HAT_ADVISE, (caddr_t)&m, 0, 0) == 0); #else return true; #endif @@ -396,8 +544,10 @@ bool pages_dontdump(void *addr, size_t size) { assert(PAGE_ADDR2BASE(addr) == addr); assert(PAGE_CEILING(size) == size); -#ifdef JEMALLOC_MADVISE_DONTDUMP +#if defined(JEMALLOC_MADVISE_DONTDUMP) return madvise(addr, size, MADV_DONTDUMP) != 0; +#elif defined(JEMALLOC_MADVISE_NOCORE) + return madvise(addr, size, MADV_NOCORE) != 0; #else return false; #endif @@ -407,8 +557,10 @@ bool pages_dodump(void *addr, size_t size) { assert(PAGE_ADDR2BASE(addr) == addr); assert(PAGE_CEILING(size) == size); -#ifdef JEMALLOC_MADVISE_DONTDUMP +#if defined(JEMALLOC_MADVISE_DONTDUMP) return madvise(addr, size, MADV_DODUMP) != 0; +#elif defined(JEMALLOC_MADVISE_NOCORE) + return madvise(addr, size, MADV_CORE) != 0; #else return false; #endif @@ -561,14 +713,14 @@ pages_set_thp_state (void *ptr, size_t size) { static void init_thp_state(void) { - if (!have_madvise_huge) { + if (!have_madvise_huge && !have_memcntl) { if (metadata_thp_enabled() && opt_abort) { malloc_write(": no MADV_HUGEPAGE support\n"); abort(); } goto label_error; } - +#if defined(JEMALLOC_HAVE_MADVISE_HUGE) static const char sys_state_madvise[] = "always [madvise] never\n"; static const char sys_state_always[] = "[always] madvise never\n"; static const char sys_state_never[] = "always madvise [never]\n"; @@ -577,6 +729,9 @@ init_thp_state(void) { #if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open) int fd = (int)syscall(SYS_open, "/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY); +#elif defined(JEMALLOC_USE_SYSCALL) && defined(SYS_openat) + int fd = (int)syscall(SYS_openat, + AT_FDCWD, "/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY); #else int fd = open("/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY); #endif @@ -592,7 +747,7 @@ init_thp_state(void) { #endif if (nread < 0) { - goto label_error; + goto label_error; } if (strncmp(buf, sys_state_madvise, (size_t)nread) == 0) { @@ -605,6 +760,10 @@ init_thp_state(void) { goto label_error; } return; +#elif defined(JEMALLOC_HAVE_MEMCNTL) + init_system_thp_mode = thp_mode_default; + return; +#endif label_error: opt_thp = init_system_thp_mode = thp_mode_not_supported; } @@ -620,6 +779,20 @@ pages_boot(void) { return true; } +#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS + if (!opt_trust_madvise) { + madvise_dont_need_zeros_is_faulty = !madvise_MADV_DONTNEED_zeroes_pages(); + if (madvise_dont_need_zeros_is_faulty) { + malloc_write(": MADV_DONTNEED does not work (memset will be used instead)\n"); + malloc_write(": (This is the expected behaviour if you are running under QEMU)\n"); + } + } else { + /* In case opt_trust_madvise is disable, + * do not do runtime check */ + madvise_dont_need_zeros_is_faulty = 0; + } +#endif + #ifndef _WIN32 mmap_flags = MAP_PRIVATE | MAP_ANON; #endif @@ -633,6 +806,8 @@ pages_boot(void) { mmap_flags |= MAP_NORESERVE; } # endif +#elif defined(__NetBSD__) + os_overcommits = true; #else os_overcommits = false; #endif diff --git a/contrib/jemalloc/src/pai.c b/contrib/jemalloc/src/pai.c new file mode 100644 index 00000000000000..45c87729278d32 --- /dev/null +++ b/contrib/jemalloc/src/pai.c @@ -0,0 +1,31 @@ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +size_t +pai_alloc_batch_default(tsdn_t *tsdn, pai_t *self, size_t size, size_t nallocs, + edata_list_active_t *results, bool *deferred_work_generated) { + for (size_t i = 0; i < nallocs; i++) { + bool deferred_by_alloc = false; + edata_t *edata = pai_alloc(tsdn, self, size, PAGE, + /* zero */ false, /* guarded */ false, + /* frequent_reuse */ false, &deferred_by_alloc); + *deferred_work_generated |= deferred_by_alloc; + if (edata == NULL) { + return i; + } + edata_list_active_append(results, edata); + } + return nallocs; +} + +void +pai_dalloc_batch_default(tsdn_t *tsdn, pai_t *self, + edata_list_active_t *list, bool *deferred_work_generated) { + edata_t *edata; + while ((edata = edata_list_active_first(list)) != NULL) { + bool deferred_by_dalloc = false; + edata_list_active_remove(list, edata); + pai_dalloc(tsdn, self, edata, &deferred_by_dalloc); + *deferred_work_generated |= deferred_by_dalloc; + } +} diff --git a/contrib/jemalloc/src/peak_event.c b/contrib/jemalloc/src/peak_event.c new file mode 100644 index 00000000000000..4093fbcc691ee2 --- /dev/null +++ b/contrib/jemalloc/src/peak_event.c @@ -0,0 +1,82 @@ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/peak_event.h" + +#include "jemalloc/internal/activity_callback.h" +#include "jemalloc/internal/peak.h" + +/* + * Update every 64K by default. We're not exposing this as a configuration + * option for now; we don't want to bind ourselves too tightly to any particular + * performance requirements for small values, or guarantee that we'll even be + * able to provide fine-grained accuracy. + */ +#define PEAK_EVENT_WAIT (64 * 1024) + +/* Update the peak with current tsd state. */ +void +peak_event_update(tsd_t *tsd) { + uint64_t alloc = tsd_thread_allocated_get(tsd); + uint64_t dalloc = tsd_thread_deallocated_get(tsd); + peak_t *peak = tsd_peakp_get(tsd); + peak_update(peak, alloc, dalloc); +} + +static void +peak_event_activity_callback(tsd_t *tsd) { + activity_callback_thunk_t *thunk = tsd_activity_callback_thunkp_get( + tsd); + uint64_t alloc = tsd_thread_allocated_get(tsd); + uint64_t dalloc = tsd_thread_deallocated_get(tsd); + if (thunk->callback != NULL) { + thunk->callback(thunk->uctx, alloc, dalloc); + } +} + +/* Set current state to zero. */ +void +peak_event_zero(tsd_t *tsd) { + uint64_t alloc = tsd_thread_allocated_get(tsd); + uint64_t dalloc = tsd_thread_deallocated_get(tsd); + peak_t *peak = tsd_peakp_get(tsd); + peak_set_zero(peak, alloc, dalloc); +} + +uint64_t +peak_event_max(tsd_t *tsd) { + peak_t *peak = tsd_peakp_get(tsd); + return peak_max(peak); +} + +uint64_t +peak_alloc_new_event_wait(tsd_t *tsd) { + return PEAK_EVENT_WAIT; +} + +uint64_t +peak_alloc_postponed_event_wait(tsd_t *tsd) { + return TE_MIN_START_WAIT; +} + +void +peak_alloc_event_handler(tsd_t *tsd, uint64_t elapsed) { + peak_event_update(tsd); + peak_event_activity_callback(tsd); +} + +uint64_t +peak_dalloc_new_event_wait(tsd_t *tsd) { + return PEAK_EVENT_WAIT; +} + +uint64_t +peak_dalloc_postponed_event_wait(tsd_t *tsd) { + return TE_MIN_START_WAIT; +} + +void +peak_dalloc_event_handler(tsd_t *tsd, uint64_t elapsed) { + peak_event_update(tsd); + peak_event_activity_callback(tsd); +} diff --git a/contrib/jemalloc/src/prng.c b/contrib/jemalloc/src/prng.c deleted file mode 100644 index 83c04bf9b5dd09..00000000000000 --- a/contrib/jemalloc/src/prng.c +++ /dev/null @@ -1,3 +0,0 @@ -#define JEMALLOC_PRNG_C_ -#include "jemalloc/internal/jemalloc_preamble.h" -#include "jemalloc/internal/jemalloc_internal_includes.h" diff --git a/contrib/jemalloc/src/prof.c b/contrib/jemalloc/src/prof.c index 13334cb4c0ba5b..7a6d5d569a484a 100644 --- a/contrib/jemalloc/src/prof.c +++ b/contrib/jemalloc/src/prof.c @@ -1,2786 +1,444 @@ -#define JEMALLOC_PROF_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" +#include "jemalloc/internal/ctl.h" #include "jemalloc/internal/assert.h" -#include "jemalloc/internal/ckh.h" -#include "jemalloc/internal/hash.h" -#include "jemalloc/internal/malloc_io.h" #include "jemalloc/internal/mutex.h" -#include "jemalloc/internal/emitter.h" +#include "jemalloc/internal/counter.h" +#include "jemalloc/internal/prof_data.h" +#include "jemalloc/internal/prof_log.h" +#include "jemalloc/internal/prof_recent.h" +#include "jemalloc/internal/prof_stats.h" +#include "jemalloc/internal/prof_sys.h" +#include "jemalloc/internal/prof_hook.h" +#include "jemalloc/internal/thread_event.h" -/******************************************************************************/ - -#ifdef JEMALLOC_PROF_LIBUNWIND -#define UNW_LOCAL_ONLY -#include -#endif - -#ifdef JEMALLOC_PROF_LIBGCC /* - * We have a circular dependency -- jemalloc_internal.h tells us if we should - * use libgcc's unwinding functionality, but after we've included that, we've - * already hooked _Unwind_Backtrace. We'll temporarily disable hooking. + * This file implements the profiling "APIs" needed by other parts of jemalloc, + * and also manages the relevant "operational" data, mainly options and mutexes; + * the core profiling data structures are encapsulated in prof_data.c. */ -#undef _Unwind_Backtrace -#include -#define _Unwind_Backtrace JEMALLOC_HOOK(_Unwind_Backtrace, test_hooks_libc_hook) -#endif /******************************************************************************/ + /* Data. */ -bool opt_prof = false; -bool opt_prof_active = true; -bool opt_prof_thread_active_init = true; -size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT; -ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT; -bool opt_prof_gdump = false; -bool opt_prof_final = false; -bool opt_prof_leak = false; -bool opt_prof_accum = false; -bool opt_prof_log = false; -char opt_prof_prefix[ - /* Minimize memory bloat for non-prof builds. */ -#ifdef JEMALLOC_PROF - PATH_MAX + -#endif - 1]; +bool opt_prof = false; +bool opt_prof_active = true; +bool opt_prof_thread_active_init = true; +size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT; +ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT; +bool opt_prof_gdump = false; +bool opt_prof_final = false; +bool opt_prof_leak = false; +bool opt_prof_leak_error = false; +bool opt_prof_accum = false; +char opt_prof_prefix[PROF_DUMP_FILENAME_LEN]; +bool opt_prof_sys_thread_name = false; +bool opt_prof_unbias = true; + +/* Accessed via prof_sample_event_handler(). */ +static counter_accum_t prof_idump_accumulated; /* * Initialized as opt_prof_active, and accessed via * prof_active_[gs]et{_unlocked,}(). */ -bool prof_active; -static malloc_mutex_t prof_active_mtx; +bool prof_active_state; +static malloc_mutex_t prof_active_mtx; /* * Initialized as opt_prof_thread_active_init, and accessed via * prof_thread_active_init_[gs]et(). */ -static bool prof_thread_active_init; -static malloc_mutex_t prof_thread_active_init_mtx; +static bool prof_thread_active_init; +static malloc_mutex_t prof_thread_active_init_mtx; /* * Initialized as opt_prof_gdump, and accessed via * prof_gdump_[gs]et{_unlocked,}(). */ -bool prof_gdump_val; -static malloc_mutex_t prof_gdump_mtx; - -uint64_t prof_interval = 0; - -size_t lg_prof_sample; - -typedef enum prof_logging_state_e prof_logging_state_t; -enum prof_logging_state_e { - prof_logging_state_stopped, - prof_logging_state_started, - prof_logging_state_dumping -}; - -/* - * - stopped: log_start never called, or previous log_stop has completed. - * - started: log_start called, log_stop not called yet. Allocations are logged. - * - dumping: log_stop called but not finished; samples are not logged anymore. - */ -prof_logging_state_t prof_logging_state = prof_logging_state_stopped; - -#ifdef JEMALLOC_JET -static bool prof_log_dummy = false; -#endif - -/* Incremented for every log file that is output. */ -static uint64_t log_seq = 0; -static char log_filename[ - /* Minimize memory bloat for non-prof builds. */ -#ifdef JEMALLOC_PROF - PATH_MAX + -#endif - 1]; - -/* Timestamp for most recent call to log_start(). */ -static nstime_t log_start_timestamp = NSTIME_ZERO_INITIALIZER; - -/* Increment these when adding to the log_bt and log_thr linked lists. */ -static size_t log_bt_index = 0; -static size_t log_thr_index = 0; - -/* Linked list node definitions. These are only used in prof.c. */ -typedef struct prof_bt_node_s prof_bt_node_t; - -struct prof_bt_node_s { - prof_bt_node_t *next; - size_t index; - prof_bt_t bt; - /* Variable size backtrace vector pointed to by bt. */ - void *vec[1]; -}; - -typedef struct prof_thr_node_s prof_thr_node_t; - -struct prof_thr_node_s { - prof_thr_node_t *next; - size_t index; - uint64_t thr_uid; - /* Variable size based on thr_name_sz. */ - char name[1]; -}; - -typedef struct prof_alloc_node_s prof_alloc_node_t; - -/* This is output when logging sampled allocations. */ -struct prof_alloc_node_s { - prof_alloc_node_t *next; - /* Indices into an array of thread data. */ - size_t alloc_thr_ind; - size_t free_thr_ind; - - /* Indices into an array of backtraces. */ - size_t alloc_bt_ind; - size_t free_bt_ind; - - uint64_t alloc_time_ns; - uint64_t free_time_ns; - - size_t usize; -}; - -/* - * Created on the first call to prof_log_start and deleted on prof_log_stop. - * These are the backtraces and threads that have already been logged by an - * allocation. - */ -static bool log_tables_initialized = false; -static ckh_t log_bt_node_set; -static ckh_t log_thr_node_set; - -/* Store linked lists for logged data. */ -static prof_bt_node_t *log_bt_first = NULL; -static prof_bt_node_t *log_bt_last = NULL; -static prof_thr_node_t *log_thr_first = NULL; -static prof_thr_node_t *log_thr_last = NULL; -static prof_alloc_node_t *log_alloc_first = NULL; -static prof_alloc_node_t *log_alloc_last = NULL; - -/* Protects the prof_logging_state and any log_{...} variable. */ -static malloc_mutex_t log_mtx; - -/* - * Table of mutexes that are shared among gctx's. These are leaf locks, so - * there is no problem with using them for more than one gctx at the same time. - * The primary motivation for this sharing though is that gctx's are ephemeral, - * and destroying mutexes causes complications for systems that allocate when - * creating/destroying mutexes. - */ -static malloc_mutex_t *gctx_locks; -static atomic_u_t cum_gctxs; /* Atomic counter. */ - -/* - * Table of mutexes that are shared among tdata's. No operations require - * holding multiple tdata locks, so there is no problem with using them for more - * than one tdata at the same time, even though a gctx lock may be acquired - * while holding a tdata lock. - */ -static malloc_mutex_t *tdata_locks; - -/* - * Global hash of (prof_bt_t *)-->(prof_gctx_t *). This is the master data - * structure that knows about all backtraces currently captured. - */ -static ckh_t bt2gctx; -/* Non static to enable profiling. */ -malloc_mutex_t bt2gctx_mtx; - -/* - * Tree of all extant prof_tdata_t structures, regardless of state, - * {attached,detached,expired}. - */ -static prof_tdata_tree_t tdatas; -static malloc_mutex_t tdatas_mtx; +bool prof_gdump_val; +static malloc_mutex_t prof_gdump_mtx; -static uint64_t next_thr_uid; -static malloc_mutex_t next_thr_uid_mtx; +uint64_t prof_interval = 0; -static malloc_mutex_t prof_dump_seq_mtx; -static uint64_t prof_dump_seq; -static uint64_t prof_dump_iseq; -static uint64_t prof_dump_mseq; -static uint64_t prof_dump_useq; +size_t lg_prof_sample; -/* - * This buffer is rather large for stack allocation, so use a single buffer for - * all profile dumps. - */ -static malloc_mutex_t prof_dump_mtx; -static char prof_dump_buf[ - /* Minimize memory bloat for non-prof builds. */ -#ifdef JEMALLOC_PROF - PROF_DUMP_BUFSIZE -#else - 1 -#endif -]; -static size_t prof_dump_buf_end; -static int prof_dump_fd; +static uint64_t next_thr_uid; +static malloc_mutex_t next_thr_uid_mtx; /* Do not dump any profiles until bootstrapping is complete. */ -static bool prof_booted = false; - -/******************************************************************************/ -/* - * Function prototypes for static functions that are referenced prior to - * definition. - */ - -static bool prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx); -static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx); -static bool prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata, - bool even_if_attached); -static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, - bool even_if_attached); -static char *prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name); - -/* Hashtable functions for log_bt_node_set and log_thr_node_set. */ -static void prof_thr_node_hash(const void *key, size_t r_hash[2]); -static bool prof_thr_node_keycomp(const void *k1, const void *k2); -static void prof_bt_node_hash(const void *key, size_t r_hash[2]); -static bool prof_bt_node_keycomp(const void *k1, const void *k2); - -/******************************************************************************/ -/* Red-black trees. */ - -static int -prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b) { - uint64_t a_thr_uid = a->thr_uid; - uint64_t b_thr_uid = b->thr_uid; - int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid); - if (ret == 0) { - uint64_t a_thr_discrim = a->thr_discrim; - uint64_t b_thr_discrim = b->thr_discrim; - ret = (a_thr_discrim > b_thr_discrim) - (a_thr_discrim < - b_thr_discrim); - if (ret == 0) { - uint64_t a_tctx_uid = a->tctx_uid; - uint64_t b_tctx_uid = b->tctx_uid; - ret = (a_tctx_uid > b_tctx_uid) - (a_tctx_uid < - b_tctx_uid); - } - } - return ret; -} - -rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t, - tctx_link, prof_tctx_comp) - -static int -prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b) { - unsigned a_len = a->bt.len; - unsigned b_len = b->bt.len; - unsigned comp_len = (a_len < b_len) ? a_len : b_len; - int ret = memcmp(a->bt.vec, b->bt.vec, comp_len * sizeof(void *)); - if (ret == 0) { - ret = (a_len > b_len) - (a_len < b_len); - } - return ret; -} +bool prof_booted = false; -rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link, - prof_gctx_comp) - -static int -prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) { - int ret; - uint64_t a_uid = a->thr_uid; - uint64_t b_uid = b->thr_uid; - - ret = ((a_uid > b_uid) - (a_uid < b_uid)); - if (ret == 0) { - uint64_t a_discrim = a->thr_discrim; - uint64_t b_discrim = b->thr_discrim; - - ret = ((a_discrim > b_discrim) - (a_discrim < b_discrim)); - } - return ret; -} +/* Logically a prof_backtrace_hook_t. */ +atomic_p_t prof_backtrace_hook; -rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link, - prof_tdata_comp) +/* Logically a prof_dump_hook_t. */ +atomic_p_t prof_dump_hook; /******************************************************************************/ void -prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated) { - prof_tdata_t *tdata; - +prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx) { cassert(config_prof); - if (updated) { - /* - * Compute a new sample threshold. This isn't very important in - * practice, because this function is rarely executed, so the - * potential for sample bias is minimal except in contrived - * programs. - */ - tdata = prof_tdata_get(tsd, true); - if (tdata != NULL) { - prof_sample_threshold_update(tdata); - } + if (tsd_reentrancy_level_get(tsd) > 0) { + assert((uintptr_t)tctx == (uintptr_t)1U); + return; } if ((uintptr_t)tctx > (uintptr_t)1U) { malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock); tctx->prepared = false; - if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) { - prof_tctx_destroy(tsd, tctx); - } else { - malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock); - } + prof_tctx_try_destroy(tsd, tctx); } } void -prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize, - prof_tctx_t *tctx) { - prof_tctx_set(tsdn, ptr, usize, NULL, tctx); +prof_malloc_sample_object(tsd_t *tsd, const void *ptr, size_t size, + size_t usize, prof_tctx_t *tctx) { + cassert(config_prof); + + if (opt_prof_sys_thread_name) { + prof_sys_thread_name_fetch(tsd); + } - /* Get the current time and set this in the extent_t. We'll read this - * when free() is called. */ - nstime_t t = NSTIME_ZERO_INITIALIZER; - nstime_update(&t); - prof_alloc_time_set(tsdn, ptr, NULL, t); + edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global, + ptr); + prof_info_set(tsd, edata, tctx, size); - malloc_mutex_lock(tsdn, tctx->tdata->lock); + szind_t szind = sz_size2index(usize); + + malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock); + /* + * We need to do these map lookups while holding the lock, to avoid the + * possibility of races with prof_reset calls, which update the map and + * then acquire the lock. This actually still leaves a data race on the + * contents of the unbias map, but we have not yet gone through and + * atomic-ified the prof module, and compilers are not yet causing us + * issues. The key thing is to make sure that, if we read garbage data, + * the prof_reset call is about to mark our tctx as expired before any + * dumping of our corrupted output is attempted. + */ + size_t shifted_unbiased_cnt = prof_shifted_unbiased_cnt[szind]; + size_t unbiased_bytes = prof_unbiased_sz[szind]; tctx->cnts.curobjs++; + tctx->cnts.curobjs_shifted_unbiased += shifted_unbiased_cnt; tctx->cnts.curbytes += usize; + tctx->cnts.curbytes_unbiased += unbiased_bytes; if (opt_prof_accum) { tctx->cnts.accumobjs++; + tctx->cnts.accumobjs_shifted_unbiased += shifted_unbiased_cnt; tctx->cnts.accumbytes += usize; + tctx->cnts.accumbytes_unbiased += unbiased_bytes; } + bool record_recent = prof_recent_alloc_prepare(tsd, tctx); tctx->prepared = false; - malloc_mutex_unlock(tsdn, tctx->tdata->lock); -} - -static size_t -prof_log_bt_index(tsd_t *tsd, prof_bt_t *bt) { - assert(prof_logging_state == prof_logging_state_started); - malloc_mutex_assert_owner(tsd_tsdn(tsd), &log_mtx); - - prof_bt_node_t dummy_node; - dummy_node.bt = *bt; - prof_bt_node_t *node; - - /* See if this backtrace is already cached in the table. */ - if (ckh_search(&log_bt_node_set, (void *)(&dummy_node), - (void **)(&node), NULL)) { - size_t sz = offsetof(prof_bt_node_t, vec) + - (bt->len * sizeof(void *)); - prof_bt_node_t *new_node = (prof_bt_node_t *) - iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL, - true, arena_get(TSDN_NULL, 0, true), true); - if (log_bt_first == NULL) { - log_bt_first = new_node; - log_bt_last = new_node; - } else { - log_bt_last->next = new_node; - log_bt_last = new_node; - } - - new_node->next = NULL; - new_node->index = log_bt_index; - /* - * Copy the backtrace: bt is inside a tdata or gctx, which - * might die before prof_log_stop is called. - */ - new_node->bt.len = bt->len; - memcpy(new_node->vec, bt->vec, bt->len * sizeof(void *)); - new_node->bt.vec = new_node->vec; - - log_bt_index++; - ckh_insert(tsd, &log_bt_node_set, (void *)new_node, NULL); - return new_node->index; - } else { - return node->index; + malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock); + if (record_recent) { + assert(tctx == edata_prof_tctx_get(edata)); + prof_recent_alloc(tsd, edata, size, usize); } -} -static size_t -prof_log_thr_index(tsd_t *tsd, uint64_t thr_uid, const char *name) { - assert(prof_logging_state == prof_logging_state_started); - malloc_mutex_assert_owner(tsd_tsdn(tsd), &log_mtx); - - prof_thr_node_t dummy_node; - dummy_node.thr_uid = thr_uid; - prof_thr_node_t *node; - - /* See if this thread is already cached in the table. */ - if (ckh_search(&log_thr_node_set, (void *)(&dummy_node), - (void **)(&node), NULL)) { - size_t sz = offsetof(prof_thr_node_t, name) + strlen(name) + 1; - prof_thr_node_t *new_node = (prof_thr_node_t *) - iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL, - true, arena_get(TSDN_NULL, 0, true), true); - if (log_thr_first == NULL) { - log_thr_first = new_node; - log_thr_last = new_node; - } else { - log_thr_last->next = new_node; - log_thr_last = new_node; - } - new_node->next = NULL; - new_node->index = log_thr_index; - new_node->thr_uid = thr_uid; - strcpy(new_node->name, name); - - log_thr_index++; - ckh_insert(tsd, &log_thr_node_set, (void *)new_node, NULL); - return new_node->index; - } else { - return node->index; + if (opt_prof_stats) { + prof_stats_inc(tsd, szind, size); } } -static void -prof_try_log(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx) { - malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock); - - prof_tdata_t *cons_tdata = prof_tdata_get(tsd, false); - if (cons_tdata == NULL) { - /* - * We decide not to log these allocations. cons_tdata will be - * NULL only when the current thread is in a weird state (e.g. - * it's being destroyed). - */ - return; - } - - malloc_mutex_lock(tsd_tsdn(tsd), &log_mtx); - - if (prof_logging_state != prof_logging_state_started) { - goto label_done; - } - - if (!log_tables_initialized) { - bool err1 = ckh_new(tsd, &log_bt_node_set, PROF_CKH_MINITEMS, - prof_bt_node_hash, prof_bt_node_keycomp); - bool err2 = ckh_new(tsd, &log_thr_node_set, PROF_CKH_MINITEMS, - prof_thr_node_hash, prof_thr_node_keycomp); - if (err1 || err2) { - goto label_done; - } - log_tables_initialized = true; - } - - nstime_t alloc_time = prof_alloc_time_get(tsd_tsdn(tsd), ptr, - (alloc_ctx_t *)NULL); - nstime_t free_time = NSTIME_ZERO_INITIALIZER; - nstime_update(&free_time); - - size_t sz = sizeof(prof_alloc_node_t); - prof_alloc_node_t *new_node = (prof_alloc_node_t *) - iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL, true, - arena_get(TSDN_NULL, 0, true), true); - - const char *prod_thr_name = (tctx->tdata->thread_name == NULL)? - "" : tctx->tdata->thread_name; - const char *cons_thr_name = prof_thread_name_get(tsd); - - prof_bt_t bt; - /* Initialize the backtrace, using the buffer in tdata to store it. */ - bt_init(&bt, cons_tdata->vec); - prof_backtrace(&bt); - prof_bt_t *cons_bt = &bt; - - /* We haven't destroyed tctx yet, so gctx should be good to read. */ - prof_bt_t *prod_bt = &tctx->gctx->bt; - - new_node->next = NULL; - new_node->alloc_thr_ind = prof_log_thr_index(tsd, tctx->tdata->thr_uid, - prod_thr_name); - new_node->free_thr_ind = prof_log_thr_index(tsd, cons_tdata->thr_uid, - cons_thr_name); - new_node->alloc_bt_ind = prof_log_bt_index(tsd, prod_bt); - new_node->free_bt_ind = prof_log_bt_index(tsd, cons_bt); - new_node->alloc_time_ns = nstime_ns(&alloc_time); - new_node->free_time_ns = nstime_ns(&free_time); - new_node->usize = usize; - - if (log_alloc_first == NULL) { - log_alloc_first = new_node; - log_alloc_last = new_node; - } else { - log_alloc_last->next = new_node; - log_alloc_last = new_node; - } +void +prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_info_t *prof_info) { + cassert(config_prof); -label_done: - malloc_mutex_unlock(tsd_tsdn(tsd), &log_mtx); -} + assert(prof_info != NULL); + prof_tctx_t *tctx = prof_info->alloc_tctx; + assert((uintptr_t)tctx > (uintptr_t)1U); -void -prof_free_sampled_object(tsd_t *tsd, const void *ptr, size_t usize, - prof_tctx_t *tctx) { + szind_t szind = sz_size2index(usize); malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock); assert(tctx->cnts.curobjs > 0); assert(tctx->cnts.curbytes >= usize); + /* + * It's not correct to do equivalent asserts for unbiased bytes, because + * of the potential for races with prof.reset calls. The map contents + * should really be atomic, but we have not atomic-ified the prof module + * yet. + */ tctx->cnts.curobjs--; + tctx->cnts.curobjs_shifted_unbiased -= prof_shifted_unbiased_cnt[szind]; tctx->cnts.curbytes -= usize; + tctx->cnts.curbytes_unbiased -= prof_unbiased_sz[szind]; - prof_try_log(tsd, ptr, usize, tctx); - - if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) { - prof_tctx_destroy(tsd, tctx); - } else { - malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock); - } -} - -void -bt_init(prof_bt_t *bt, void **vec) { - cassert(config_prof); - - bt->vec = vec; - bt->len = 0; -} - -static void -prof_enter(tsd_t *tsd, prof_tdata_t *tdata) { - cassert(config_prof); - assert(tdata == prof_tdata_get(tsd, false)); - - if (tdata != NULL) { - assert(!tdata->enq); - tdata->enq = true; - } - - malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx); -} - -static void -prof_leave(tsd_t *tsd, prof_tdata_t *tdata) { - cassert(config_prof); - assert(tdata == prof_tdata_get(tsd, false)); - - malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx); - - if (tdata != NULL) { - bool idump, gdump; + prof_try_log(tsd, usize, prof_info); - assert(tdata->enq); - tdata->enq = false; - idump = tdata->enq_idump; - tdata->enq_idump = false; - gdump = tdata->enq_gdump; - tdata->enq_gdump = false; + prof_tctx_try_destroy(tsd, tctx); - if (idump) { - prof_idump(tsd_tsdn(tsd)); - } - if (gdump) { - prof_gdump(tsd_tsdn(tsd)); - } + if (opt_prof_stats) { + prof_stats_dec(tsd, szind, prof_info->alloc_size); } } -#ifdef JEMALLOC_PROF_LIBUNWIND -void -prof_backtrace(prof_bt_t *bt) { - int nframes; - - cassert(config_prof); - assert(bt->len == 0); - assert(bt->vec != NULL); - - nframes = unw_backtrace(bt->vec, PROF_BT_MAX); - if (nframes <= 0) { - return; +prof_tctx_t * +prof_tctx_create(tsd_t *tsd) { + if (!tsd_nominal(tsd) || tsd_reentrancy_level_get(tsd) > 0) { + return NULL; } - bt->len = nframes; -} -#elif (defined(JEMALLOC_PROF_LIBGCC)) -static _Unwind_Reason_Code -prof_unwind_init_callback(struct _Unwind_Context *context, void *arg) { - cassert(config_prof); - - return _URC_NO_REASON; -} - -static _Unwind_Reason_Code -prof_unwind_callback(struct _Unwind_Context *context, void *arg) { - prof_unwind_data_t *data = (prof_unwind_data_t *)arg; - void *ip; - - cassert(config_prof); - ip = (void *)_Unwind_GetIP(context); - if (ip == NULL) { - return _URC_END_OF_STACK; - } - data->bt->vec[data->bt->len] = ip; - data->bt->len++; - if (data->bt->len == data->max) { - return _URC_END_OF_STACK; + prof_tdata_t *tdata = prof_tdata_get(tsd, true); + if (tdata == NULL) { + return NULL; } - return _URC_NO_REASON; + prof_bt_t bt; + bt_init(&bt, tdata->vec); + prof_backtrace(tsd, &bt); + return prof_lookup(tsd, &bt); } -void -prof_backtrace(prof_bt_t *bt) { - prof_unwind_data_t data = {bt, PROF_BT_MAX}; - - cassert(config_prof); - - _Unwind_Backtrace(prof_unwind_callback, &data); -} -#elif (defined(JEMALLOC_PROF_GCC)) -void -prof_backtrace(prof_bt_t *bt) { -#define BT_FRAME(i) \ - if ((i) < PROF_BT_MAX) { \ - void *p; \ - if (__builtin_frame_address(i) == 0) { \ - return; \ - } \ - p = __builtin_return_address(i); \ - if (p == NULL) { \ - return; \ - } \ - bt->vec[(i)] = p; \ - bt->len = (i) + 1; \ - } else { \ - return; \ +/* + * The bodies of this function and prof_leakcheck() are compiled out unless heap + * profiling is enabled, so that it is possible to compile jemalloc with + * floating point support completely disabled. Avoiding floating point code is + * important on memory-constrained systems, but it also enables a workaround for + * versions of glibc that don't properly save/restore floating point registers + * during dynamic lazy symbol loading (which internally calls into whatever + * malloc implementation happens to be integrated into the application). Note + * that some compilers (e.g. gcc 4.8) may use floating point registers for fast + * memory moves, so jemalloc must be compiled with such optimizations disabled + * (e.g. + * -mno-sse) in order for the workaround to be complete. + */ +uint64_t +prof_sample_new_event_wait(tsd_t *tsd) { +#ifdef JEMALLOC_PROF + if (lg_prof_sample == 0) { + return TE_MIN_START_WAIT; } - cassert(config_prof); - - BT_FRAME(0) - BT_FRAME(1) - BT_FRAME(2) - BT_FRAME(3) - BT_FRAME(4) - BT_FRAME(5) - BT_FRAME(6) - BT_FRAME(7) - BT_FRAME(8) - BT_FRAME(9) - - BT_FRAME(10) - BT_FRAME(11) - BT_FRAME(12) - BT_FRAME(13) - BT_FRAME(14) - BT_FRAME(15) - BT_FRAME(16) - BT_FRAME(17) - BT_FRAME(18) - BT_FRAME(19) - - BT_FRAME(20) - BT_FRAME(21) - BT_FRAME(22) - BT_FRAME(23) - BT_FRAME(24) - BT_FRAME(25) - BT_FRAME(26) - BT_FRAME(27) - BT_FRAME(28) - BT_FRAME(29) - - BT_FRAME(30) - BT_FRAME(31) - BT_FRAME(32) - BT_FRAME(33) - BT_FRAME(34) - BT_FRAME(35) - BT_FRAME(36) - BT_FRAME(37) - BT_FRAME(38) - BT_FRAME(39) - - BT_FRAME(40) - BT_FRAME(41) - BT_FRAME(42) - BT_FRAME(43) - BT_FRAME(44) - BT_FRAME(45) - BT_FRAME(46) - BT_FRAME(47) - BT_FRAME(48) - BT_FRAME(49) - - BT_FRAME(50) - BT_FRAME(51) - BT_FRAME(52) - BT_FRAME(53) - BT_FRAME(54) - BT_FRAME(55) - BT_FRAME(56) - BT_FRAME(57) - BT_FRAME(58) - BT_FRAME(59) - - BT_FRAME(60) - BT_FRAME(61) - BT_FRAME(62) - BT_FRAME(63) - BT_FRAME(64) - BT_FRAME(65) - BT_FRAME(66) - BT_FRAME(67) - BT_FRAME(68) - BT_FRAME(69) - - BT_FRAME(70) - BT_FRAME(71) - BT_FRAME(72) - BT_FRAME(73) - BT_FRAME(74) - BT_FRAME(75) - BT_FRAME(76) - BT_FRAME(77) - BT_FRAME(78) - BT_FRAME(79) - - BT_FRAME(80) - BT_FRAME(81) - BT_FRAME(82) - BT_FRAME(83) - BT_FRAME(84) - BT_FRAME(85) - BT_FRAME(86) - BT_FRAME(87) - BT_FRAME(88) - BT_FRAME(89) - - BT_FRAME(90) - BT_FRAME(91) - BT_FRAME(92) - BT_FRAME(93) - BT_FRAME(94) - BT_FRAME(95) - BT_FRAME(96) - BT_FRAME(97) - BT_FRAME(98) - BT_FRAME(99) - - BT_FRAME(100) - BT_FRAME(101) - BT_FRAME(102) - BT_FRAME(103) - BT_FRAME(104) - BT_FRAME(105) - BT_FRAME(106) - BT_FRAME(107) - BT_FRAME(108) - BT_FRAME(109) - - BT_FRAME(110) - BT_FRAME(111) - BT_FRAME(112) - BT_FRAME(113) - BT_FRAME(114) - BT_FRAME(115) - BT_FRAME(116) - BT_FRAME(117) - BT_FRAME(118) - BT_FRAME(119) - - BT_FRAME(120) - BT_FRAME(121) - BT_FRAME(122) - BT_FRAME(123) - BT_FRAME(124) - BT_FRAME(125) - BT_FRAME(126) - BT_FRAME(127) -#undef BT_FRAME -} + /* + * Compute sample interval as a geometrically distributed random + * variable with mean (2^lg_prof_sample). + * + * __ __ + * | log(u) | 1 + * bytes_until_sample = | -------- |, where p = --------------- + * | log(1-p) | lg_prof_sample + * 2 + * + * For more information on the math, see: + * + * Non-Uniform Random Variate Generation + * Luc Devroye + * Springer-Verlag, New York, 1986 + * pp 500 + * (http://luc.devroye.org/rnbookindex.html) + * + * In the actual computation, there's a non-zero probability that our + * pseudo random number generator generates an exact 0, and to avoid + * log(0), we set u to 1.0 in case r is 0. Therefore u effectively is + * uniformly distributed in (0, 1] instead of [0, 1). Further, rather + * than taking the ceiling, we take the floor and then add 1, since + * otherwise bytes_until_sample would be 0 if u is exactly 1.0. + */ + uint64_t r = prng_lg_range_u64(tsd_prng_statep_get(tsd), 53); + double u = (r == 0U) ? 1.0 : (double)r * (1.0/9007199254740992.0L); + return (uint64_t)(log(u) / + log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample)))) + + (uint64_t)1U; #else -void -prof_backtrace(prof_bt_t *bt) { - cassert(config_prof); not_reached(); -} + return TE_MAX_START_WAIT; #endif - -static malloc_mutex_t * -prof_gctx_mutex_choose(void) { - unsigned ngctxs = atomic_fetch_add_u(&cum_gctxs, 1, ATOMIC_RELAXED); - - return &gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS]; -} - -static malloc_mutex_t * -prof_tdata_mutex_choose(uint64_t thr_uid) { - return &tdata_locks[thr_uid % PROF_NTDATA_LOCKS]; -} - -static prof_gctx_t * -prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) { - /* - * Create a single allocation that has space for vec of length bt->len. - */ - size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *)); - prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsdn, size, - sz_size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true), - true); - if (gctx == NULL) { - return NULL; - } - gctx->lock = prof_gctx_mutex_choose(); - /* - * Set nlimbo to 1, in order to avoid a race condition with - * prof_tctx_destroy()/prof_gctx_try_destroy(). - */ - gctx->nlimbo = 1; - tctx_tree_new(&gctx->tctxs); - /* Duplicate bt. */ - memcpy(gctx->vec, bt->vec, bt->len * sizeof(void *)); - gctx->bt.vec = gctx->vec; - gctx->bt.len = bt->len; - return gctx; } -static void -prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx, - prof_tdata_t *tdata) { - cassert(config_prof); - +uint64_t +prof_sample_postponed_event_wait(tsd_t *tsd) { /* - * Check that gctx is still unused by any thread cache before destroying - * it. prof_lookup() increments gctx->nlimbo in order to avoid a race - * condition with this function, as does prof_tctx_destroy() in order to - * avoid a race between the main body of prof_tctx_destroy() and entry - * into this function. + * The postponed wait time for prof sample event is computed as if we + * want a new wait time (i.e. as if the event were triggered). If we + * instead postpone to the immediate next allocation, like how we're + * handling the other events, then we can have sampling bias, if e.g. + * the allocation immediately following a reentrancy always comes from + * the same stack trace. */ - prof_enter(tsd, tdata_self); - malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); - assert(gctx->nlimbo != 0); - if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) { - /* Remove gctx from bt2gctx. */ - if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL)) { - not_reached(); - } - prof_leave(tsd, tdata_self); - /* Destroy gctx. */ - malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); - idalloctm(tsd_tsdn(tsd), gctx, NULL, NULL, true, true); - } else { - /* - * Compensate for increment in prof_tctx_destroy() or - * prof_lookup(). - */ - gctx->nlimbo--; - malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); - prof_leave(tsd, tdata_self); - } -} - -static bool -prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx) { - malloc_mutex_assert_owner(tsdn, tctx->tdata->lock); - - if (opt_prof_accum) { - return false; - } - if (tctx->cnts.curobjs != 0) { - return false; - } - if (tctx->prepared) { - return false; - } - return true; -} - -static bool -prof_gctx_should_destroy(prof_gctx_t *gctx) { - if (opt_prof_accum) { - return false; - } - if (!tctx_tree_empty(&gctx->tctxs)) { - return false; - } - if (gctx->nlimbo != 0) { - return false; - } - return true; -} - -static void -prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) { - prof_tdata_t *tdata = tctx->tdata; - prof_gctx_t *gctx = tctx->gctx; - bool destroy_tdata, destroy_tctx, destroy_gctx; - - malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock); - - assert(tctx->cnts.curobjs == 0); - assert(tctx->cnts.curbytes == 0); - assert(!opt_prof_accum); - assert(tctx->cnts.accumobjs == 0); - assert(tctx->cnts.accumbytes == 0); - - ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL); - destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, false); - malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); - - malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); - switch (tctx->state) { - case prof_tctx_state_nominal: - tctx_tree_remove(&gctx->tctxs, tctx); - destroy_tctx = true; - if (prof_gctx_should_destroy(gctx)) { - /* - * Increment gctx->nlimbo in order to keep another - * thread from winning the race to destroy gctx while - * this one has gctx->lock dropped. Without this, it - * would be possible for another thread to: - * - * 1) Sample an allocation associated with gctx. - * 2) Deallocate the sampled object. - * 3) Successfully prof_gctx_try_destroy(gctx). - * - * The result would be that gctx no longer exists by the - * time this thread accesses it in - * prof_gctx_try_destroy(). - */ - gctx->nlimbo++; - destroy_gctx = true; - } else { - destroy_gctx = false; - } - break; - case prof_tctx_state_dumping: - /* - * A dumping thread needs tctx to remain valid until dumping - * has finished. Change state such that the dumping thread will - * complete destruction during a late dump iteration phase. - */ - tctx->state = prof_tctx_state_purgatory; - destroy_tctx = false; - destroy_gctx = false; - break; - default: - not_reached(); - destroy_tctx = false; - destroy_gctx = false; - } - malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); - if (destroy_gctx) { - prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx, - tdata); - } - - malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tctx->tdata->lock); - - if (destroy_tdata) { - prof_tdata_destroy(tsd, tdata, false); - } - - if (destroy_tctx) { - idalloctm(tsd_tsdn(tsd), tctx, NULL, NULL, true, true); - } -} - -static bool -prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata, - void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx) { - union { - prof_gctx_t *p; - void *v; - } gctx, tgctx; - union { - prof_bt_t *p; - void *v; - } btkey; - bool new_gctx; - - prof_enter(tsd, tdata); - if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) { - /* bt has never been seen before. Insert it. */ - prof_leave(tsd, tdata); - tgctx.p = prof_gctx_create(tsd_tsdn(tsd), bt); - if (tgctx.v == NULL) { - return true; - } - prof_enter(tsd, tdata); - if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) { - gctx.p = tgctx.p; - btkey.p = &gctx.p->bt; - if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) { - /* OOM. */ - prof_leave(tsd, tdata); - idalloctm(tsd_tsdn(tsd), gctx.v, NULL, NULL, - true, true); - return true; - } - new_gctx = true; - } else { - new_gctx = false; - } - } else { - tgctx.v = NULL; - new_gctx = false; - } - - if (!new_gctx) { - /* - * Increment nlimbo, in order to avoid a race condition with - * prof_tctx_destroy()/prof_gctx_try_destroy(). - */ - malloc_mutex_lock(tsd_tsdn(tsd), gctx.p->lock); - gctx.p->nlimbo++; - malloc_mutex_unlock(tsd_tsdn(tsd), gctx.p->lock); - new_gctx = false; - - if (tgctx.v != NULL) { - /* Lost race to insert. */ - idalloctm(tsd_tsdn(tsd), tgctx.v, NULL, NULL, true, - true); - } - } - prof_leave(tsd, tdata); - - *p_btkey = btkey.v; - *p_gctx = gctx.p; - *p_new_gctx = new_gctx; - return false; -} - -prof_tctx_t * -prof_lookup(tsd_t *tsd, prof_bt_t *bt) { - union { - prof_tctx_t *p; - void *v; - } ret; - prof_tdata_t *tdata; - bool not_found; - - cassert(config_prof); - - tdata = prof_tdata_get(tsd, false); - if (tdata == NULL) { - return NULL; - } - - malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); - not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v); - if (!not_found) { /* Note double negative! */ - ret.p->prepared = true; - } - malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); - if (not_found) { - void *btkey; - prof_gctx_t *gctx; - bool new_gctx, error; - - /* - * This thread's cache lacks bt. Look for it in the global - * cache. - */ - if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx, - &new_gctx)) { - return NULL; - } - - /* Link a prof_tctx_t into gctx for this thread. */ - ret.v = iallocztm(tsd_tsdn(tsd), sizeof(prof_tctx_t), - sz_size2index(sizeof(prof_tctx_t)), false, NULL, true, - arena_ichoose(tsd, NULL), true); - if (ret.p == NULL) { - if (new_gctx) { - prof_gctx_try_destroy(tsd, tdata, gctx, tdata); - } - return NULL; - } - ret.p->tdata = tdata; - ret.p->thr_uid = tdata->thr_uid; - ret.p->thr_discrim = tdata->thr_discrim; - memset(&ret.p->cnts, 0, sizeof(prof_cnt_t)); - ret.p->gctx = gctx; - ret.p->tctx_uid = tdata->tctx_uid_next++; - ret.p->prepared = true; - ret.p->state = prof_tctx_state_initializing; - malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); - error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v); - malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); - if (error) { - if (new_gctx) { - prof_gctx_try_destroy(tsd, tdata, gctx, tdata); - } - idalloctm(tsd_tsdn(tsd), ret.v, NULL, NULL, true, true); - return NULL; - } - malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); - ret.p->state = prof_tctx_state_nominal; - tctx_tree_insert(&gctx->tctxs, ret.p); - gctx->nlimbo--; - malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); - } - - return ret.p; -} - -/* - * The bodies of this function and prof_leakcheck() are compiled out unless heap - * profiling is enabled, so that it is possible to compile jemalloc with - * floating point support completely disabled. Avoiding floating point code is - * important on memory-constrained systems, but it also enables a workaround for - * versions of glibc that don't properly save/restore floating point registers - * during dynamic lazy symbol loading (which internally calls into whatever - * malloc implementation happens to be integrated into the application). Note - * that some compilers (e.g. gcc 4.8) may use floating point registers for fast - * memory moves, so jemalloc must be compiled with such optimizations disabled - * (e.g. - * -mno-sse) in order for the workaround to be complete. - */ -void -prof_sample_threshold_update(prof_tdata_t *tdata) { -#ifdef JEMALLOC_PROF - if (!config_prof) { - return; - } - - if (lg_prof_sample == 0) { - tsd_bytes_until_sample_set(tsd_fetch(), 0); - return; - } - - /* - * Compute sample interval as a geometrically distributed random - * variable with mean (2^lg_prof_sample). - * - * __ __ - * | log(u) | 1 - * tdata->bytes_until_sample = | -------- |, where p = --------------- - * | log(1-p) | lg_prof_sample - * 2 - * - * For more information on the math, see: - * - * Non-Uniform Random Variate Generation - * Luc Devroye - * Springer-Verlag, New York, 1986 - * pp 500 - * (http://luc.devroye.org/rnbookindex.html) - */ - uint64_t r = prng_lg_range_u64(&tdata->prng_state, 53); - double u = (double)r * (1.0/9007199254740992.0L); - uint64_t bytes_until_sample = (uint64_t)(log(u) / - log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample)))) - + (uint64_t)1U; - if (bytes_until_sample > SSIZE_MAX) { - bytes_until_sample = SSIZE_MAX; - } - tsd_bytes_until_sample_set(tsd_fetch(), bytes_until_sample); - -#endif -} - -#ifdef JEMALLOC_JET -static prof_tdata_t * -prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, - void *arg) { - size_t *tdata_count = (size_t *)arg; - - (*tdata_count)++; - - return NULL; -} - -size_t -prof_tdata_count(void) { - size_t tdata_count = 0; - tsdn_t *tsdn; - - tsdn = tsdn_fetch(); - malloc_mutex_lock(tsdn, &tdatas_mtx); - tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter, - (void *)&tdata_count); - malloc_mutex_unlock(tsdn, &tdatas_mtx); - - return tdata_count; -} - -size_t -prof_bt_count(void) { - size_t bt_count; - tsd_t *tsd; - prof_tdata_t *tdata; - - tsd = tsd_fetch(); - tdata = prof_tdata_get(tsd, false); - if (tdata == NULL) { - return 0; - } - - malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx); - bt_count = ckh_count(&bt2gctx); - malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx); - - return bt_count; -} -#endif - -static int -prof_dump_open_impl(bool propagate_err, const char *filename) { - int fd; - - fd = creat(filename, 0644); - if (fd == -1 && !propagate_err) { - malloc_printf(": creat(\"%s\"), 0644) failed\n", - filename); - if (opt_abort) { - abort(); - } - } - - return fd; -} -prof_dump_open_t *JET_MUTABLE prof_dump_open = prof_dump_open_impl; - -static bool -prof_dump_flush(bool propagate_err) { - bool ret = false; - ssize_t err; - - cassert(config_prof); - - err = malloc_write_fd(prof_dump_fd, prof_dump_buf, prof_dump_buf_end); - if (err == -1) { - if (!propagate_err) { - malloc_write(": write() failed during heap " - "profile flush\n"); - if (opt_abort) { - abort(); - } - } - ret = true; - } - prof_dump_buf_end = 0; - - return ret; -} - -static bool -prof_dump_close(bool propagate_err) { - bool ret; - - assert(prof_dump_fd != -1); - ret = prof_dump_flush(propagate_err); - close(prof_dump_fd); - prof_dump_fd = -1; - - return ret; -} - -static bool -prof_dump_write(bool propagate_err, const char *s) { - size_t i, slen, n; - - cassert(config_prof); - - i = 0; - slen = strlen(s); - while (i < slen) { - /* Flush the buffer if it is full. */ - if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) { - if (prof_dump_flush(propagate_err) && propagate_err) { - return true; - } - } - - if (prof_dump_buf_end + slen - i <= PROF_DUMP_BUFSIZE) { - /* Finish writing. */ - n = slen - i; - } else { - /* Write as much of s as will fit. */ - n = PROF_DUMP_BUFSIZE - prof_dump_buf_end; - } - memcpy(&prof_dump_buf[prof_dump_buf_end], &s[i], n); - prof_dump_buf_end += n; - i += n; - } - assert(i == slen); - - return false; -} - -JEMALLOC_FORMAT_PRINTF(2, 3) -static bool -prof_dump_printf(bool propagate_err, const char *format, ...) { - bool ret; - va_list ap; - char buf[PROF_PRINTF_BUFSIZE]; - - va_start(ap, format); - malloc_vsnprintf(buf, sizeof(buf), format, ap); - va_end(ap); - ret = prof_dump_write(propagate_err, buf); - - return ret; -} - -static void -prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata) { - malloc_mutex_assert_owner(tsdn, tctx->tdata->lock); - - malloc_mutex_lock(tsdn, tctx->gctx->lock); - - switch (tctx->state) { - case prof_tctx_state_initializing: - malloc_mutex_unlock(tsdn, tctx->gctx->lock); - return; - case prof_tctx_state_nominal: - tctx->state = prof_tctx_state_dumping; - malloc_mutex_unlock(tsdn, tctx->gctx->lock); - - memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t)); - - tdata->cnt_summed.curobjs += tctx->dump_cnts.curobjs; - tdata->cnt_summed.curbytes += tctx->dump_cnts.curbytes; - if (opt_prof_accum) { - tdata->cnt_summed.accumobjs += - tctx->dump_cnts.accumobjs; - tdata->cnt_summed.accumbytes += - tctx->dump_cnts.accumbytes; - } - break; - case prof_tctx_state_dumping: - case prof_tctx_state_purgatory: - not_reached(); - } -} - -static void -prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx) { - malloc_mutex_assert_owner(tsdn, gctx->lock); - - gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs; - gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes; - if (opt_prof_accum) { - gctx->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs; - gctx->cnt_summed.accumbytes += tctx->dump_cnts.accumbytes; - } -} - -static prof_tctx_t * -prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) { - tsdn_t *tsdn = (tsdn_t *)arg; - - malloc_mutex_assert_owner(tsdn, tctx->gctx->lock); - - switch (tctx->state) { - case prof_tctx_state_nominal: - /* New since dumping started; ignore. */ - break; - case prof_tctx_state_dumping: - case prof_tctx_state_purgatory: - prof_tctx_merge_gctx(tsdn, tctx, tctx->gctx); - break; - default: - not_reached(); - } - - return NULL; -} - -struct prof_tctx_dump_iter_arg_s { - tsdn_t *tsdn; - bool propagate_err; -}; - -static prof_tctx_t * -prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque) { - struct prof_tctx_dump_iter_arg_s *arg = - (struct prof_tctx_dump_iter_arg_s *)opaque; - - malloc_mutex_assert_owner(arg->tsdn, tctx->gctx->lock); - - switch (tctx->state) { - case prof_tctx_state_initializing: - case prof_tctx_state_nominal: - /* Not captured by this dump. */ - break; - case prof_tctx_state_dumping: - case prof_tctx_state_purgatory: - if (prof_dump_printf(arg->propagate_err, - " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": " - "%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs, - tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs, - tctx->dump_cnts.accumbytes)) { - return tctx; - } - break; - default: - not_reached(); - } - return NULL; -} - -static prof_tctx_t * -prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) { - tsdn_t *tsdn = (tsdn_t *)arg; - prof_tctx_t *ret; - - malloc_mutex_assert_owner(tsdn, tctx->gctx->lock); - - switch (tctx->state) { - case prof_tctx_state_nominal: - /* New since dumping started; ignore. */ - break; - case prof_tctx_state_dumping: - tctx->state = prof_tctx_state_nominal; - break; - case prof_tctx_state_purgatory: - ret = tctx; - goto label_return; - default: - not_reached(); - } - - ret = NULL; -label_return: - return ret; -} - -static void -prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs) { - cassert(config_prof); - - malloc_mutex_lock(tsdn, gctx->lock); - - /* - * Increment nlimbo so that gctx won't go away before dump. - * Additionally, link gctx into the dump list so that it is included in - * prof_dump()'s second pass. - */ - gctx->nlimbo++; - gctx_tree_insert(gctxs, gctx); - - memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t)); - - malloc_mutex_unlock(tsdn, gctx->lock); -} - -struct prof_gctx_merge_iter_arg_s { - tsdn_t *tsdn; - size_t leak_ngctx; -}; - -static prof_gctx_t * -prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) { - struct prof_gctx_merge_iter_arg_s *arg = - (struct prof_gctx_merge_iter_arg_s *)opaque; - - malloc_mutex_lock(arg->tsdn, gctx->lock); - tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter, - (void *)arg->tsdn); - if (gctx->cnt_summed.curobjs != 0) { - arg->leak_ngctx++; - } - malloc_mutex_unlock(arg->tsdn, gctx->lock); - - return NULL; -} - -static void -prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) { - prof_tdata_t *tdata = prof_tdata_get(tsd, false); - prof_gctx_t *gctx; - - /* - * Standard tree iteration won't work here, because as soon as we - * decrement gctx->nlimbo and unlock gctx, another thread can - * concurrently destroy it, which will corrupt the tree. Therefore, - * tear down the tree one node at a time during iteration. - */ - while ((gctx = gctx_tree_first(gctxs)) != NULL) { - gctx_tree_remove(gctxs, gctx); - malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); - { - prof_tctx_t *next; - - next = NULL; - do { - prof_tctx_t *to_destroy = - tctx_tree_iter(&gctx->tctxs, next, - prof_tctx_finish_iter, - (void *)tsd_tsdn(tsd)); - if (to_destroy != NULL) { - next = tctx_tree_next(&gctx->tctxs, - to_destroy); - tctx_tree_remove(&gctx->tctxs, - to_destroy); - idalloctm(tsd_tsdn(tsd), to_destroy, - NULL, NULL, true, true); - } else { - next = NULL; - } - } while (next != NULL); - } - gctx->nlimbo--; - if (prof_gctx_should_destroy(gctx)) { - gctx->nlimbo++; - malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); - prof_gctx_try_destroy(tsd, tdata, gctx, tdata); - } else { - malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); - } - } -} - -struct prof_tdata_merge_iter_arg_s { - tsdn_t *tsdn; - prof_cnt_t cnt_all; -}; - -static prof_tdata_t * -prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, - void *opaque) { - struct prof_tdata_merge_iter_arg_s *arg = - (struct prof_tdata_merge_iter_arg_s *)opaque; - - malloc_mutex_lock(arg->tsdn, tdata->lock); - if (!tdata->expired) { - size_t tabind; - union { - prof_tctx_t *p; - void *v; - } tctx; - - tdata->dumping = true; - memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t)); - for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL, - &tctx.v);) { - prof_tctx_merge_tdata(arg->tsdn, tctx.p, tdata); - } - - arg->cnt_all.curobjs += tdata->cnt_summed.curobjs; - arg->cnt_all.curbytes += tdata->cnt_summed.curbytes; - if (opt_prof_accum) { - arg->cnt_all.accumobjs += tdata->cnt_summed.accumobjs; - arg->cnt_all.accumbytes += tdata->cnt_summed.accumbytes; - } - } else { - tdata->dumping = false; - } - malloc_mutex_unlock(arg->tsdn, tdata->lock); - - return NULL; -} - -static prof_tdata_t * -prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, - void *arg) { - bool propagate_err = *(bool *)arg; - - if (!tdata->dumping) { - return NULL; - } - - if (prof_dump_printf(propagate_err, - " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]%s%s\n", - tdata->thr_uid, tdata->cnt_summed.curobjs, - tdata->cnt_summed.curbytes, tdata->cnt_summed.accumobjs, - tdata->cnt_summed.accumbytes, - (tdata->thread_name != NULL) ? " " : "", - (tdata->thread_name != NULL) ? tdata->thread_name : "")) { - return tdata; - } - return NULL; -} - -static bool -prof_dump_header_impl(tsdn_t *tsdn, bool propagate_err, - const prof_cnt_t *cnt_all) { - bool ret; - - if (prof_dump_printf(propagate_err, - "heap_v2/%"FMTu64"\n" - " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n", - ((uint64_t)1U << lg_prof_sample), cnt_all->curobjs, - cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes)) { - return true; - } - - malloc_mutex_lock(tsdn, &tdatas_mtx); - ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter, - (void *)&propagate_err) != NULL); - malloc_mutex_unlock(tsdn, &tdatas_mtx); - return ret; -} -prof_dump_header_t *JET_MUTABLE prof_dump_header = prof_dump_header_impl; - -static bool -prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx, - const prof_bt_t *bt, prof_gctx_tree_t *gctxs) { - bool ret; - unsigned i; - struct prof_tctx_dump_iter_arg_s prof_tctx_dump_iter_arg; - - cassert(config_prof); - malloc_mutex_assert_owner(tsdn, gctx->lock); - - /* Avoid dumping such gctx's that have no useful data. */ - if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) || - (opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) { - assert(gctx->cnt_summed.curobjs == 0); - assert(gctx->cnt_summed.curbytes == 0); - assert(gctx->cnt_summed.accumobjs == 0); - assert(gctx->cnt_summed.accumbytes == 0); - ret = false; - goto label_return; - } - - if (prof_dump_printf(propagate_err, "@")) { - ret = true; - goto label_return; - } - for (i = 0; i < bt->len; i++) { - if (prof_dump_printf(propagate_err, " %#"FMTxPTR, - (uintptr_t)bt->vec[i])) { - ret = true; - goto label_return; - } - } - - if (prof_dump_printf(propagate_err, - "\n" - " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n", - gctx->cnt_summed.curobjs, gctx->cnt_summed.curbytes, - gctx->cnt_summed.accumobjs, gctx->cnt_summed.accumbytes)) { - ret = true; - goto label_return; - } - - prof_tctx_dump_iter_arg.tsdn = tsdn; - prof_tctx_dump_iter_arg.propagate_err = propagate_err; - if (tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter, - (void *)&prof_tctx_dump_iter_arg) != NULL) { - ret = true; - goto label_return; - } - - ret = false; -label_return: - return ret; -} - -#ifndef _WIN32 -JEMALLOC_FORMAT_PRINTF(1, 2) -static int -prof_open_maps(const char *format, ...) { - int mfd; - va_list ap; - char filename[PATH_MAX + 1]; - - va_start(ap, format); - malloc_vsnprintf(filename, sizeof(filename), format, ap); - va_end(ap); - -#if defined(O_CLOEXEC) - mfd = open(filename, O_RDONLY | O_CLOEXEC); -#else - mfd = open(filename, O_RDONLY); - if (mfd != -1) { - fcntl(mfd, F_SETFD, fcntl(mfd, F_GETFD) | FD_CLOEXEC); - } -#endif - - return mfd; -} -#endif - -static int -prof_getpid(void) { -#ifdef _WIN32 - return GetCurrentProcessId(); -#else - return getpid(); -#endif -} - -static bool -prof_dump_maps(bool propagate_err) { - bool ret; - int mfd; - - cassert(config_prof); -#ifdef __FreeBSD__ - mfd = prof_open_maps("/proc/curproc/map"); -#elif defined(_WIN32) - mfd = -1; // Not implemented -#else - { - int pid = prof_getpid(); - - mfd = prof_open_maps("/proc/%d/task/%d/maps", pid, pid); - if (mfd == -1) { - mfd = prof_open_maps("/proc/%d/maps", pid); - } - } -#endif - if (mfd != -1) { - ssize_t nread; - - if (prof_dump_write(propagate_err, "\nMAPPED_LIBRARIES:\n") && - propagate_err) { - ret = true; - goto label_return; - } - nread = 0; - do { - prof_dump_buf_end += nread; - if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) { - /* Make space in prof_dump_buf before read(). */ - if (prof_dump_flush(propagate_err) && - propagate_err) { - ret = true; - goto label_return; - } - } - nread = malloc_read_fd(mfd, - &prof_dump_buf[prof_dump_buf_end], PROF_DUMP_BUFSIZE - - prof_dump_buf_end); - } while (nread > 0); - } else { - ret = true; - goto label_return; - } - - ret = false; -label_return: - if (mfd != -1) { - close(mfd); - } - return ret; -} - -/* - * See prof_sample_threshold_update() comment for why the body of this function - * is conditionally compiled. - */ -static void -prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx, - const char *filename) { -#ifdef JEMALLOC_PROF - /* - * Scaling is equivalent AdjustSamples() in jeprof, but the result may - * differ slightly from what jeprof reports, because here we scale the - * summary values, whereas jeprof scales each context individually and - * reports the sums of the scaled values. - */ - if (cnt_all->curbytes != 0) { - double sample_period = (double)((uint64_t)1 << lg_prof_sample); - double ratio = (((double)cnt_all->curbytes) / - (double)cnt_all->curobjs) / sample_period; - double scale_factor = 1.0 / (1.0 - exp(-ratio)); - uint64_t curbytes = (uint64_t)round(((double)cnt_all->curbytes) - * scale_factor); - uint64_t curobjs = (uint64_t)round(((double)cnt_all->curobjs) * - scale_factor); - - malloc_printf(": Leak approximation summary: ~%"FMTu64 - " byte%s, ~%"FMTu64" object%s, >= %zu context%s\n", - curbytes, (curbytes != 1) ? "s" : "", curobjs, (curobjs != - 1) ? "s" : "", leak_ngctx, (leak_ngctx != 1) ? "s" : ""); - malloc_printf( - ": Run jeprof on \"%s\" for leak detail\n", - filename); - } -#endif -} - -struct prof_gctx_dump_iter_arg_s { - tsdn_t *tsdn; - bool propagate_err; -}; - -static prof_gctx_t * -prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) { - prof_gctx_t *ret; - struct prof_gctx_dump_iter_arg_s *arg = - (struct prof_gctx_dump_iter_arg_s *)opaque; - - malloc_mutex_lock(arg->tsdn, gctx->lock); - - if (prof_dump_gctx(arg->tsdn, arg->propagate_err, gctx, &gctx->bt, - gctxs)) { - ret = gctx; - goto label_return; - } - - ret = NULL; -label_return: - malloc_mutex_unlock(arg->tsdn, gctx->lock); - return ret; -} - -static void -prof_dump_prep(tsd_t *tsd, prof_tdata_t *tdata, - struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg, - struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg, - prof_gctx_tree_t *gctxs) { - size_t tabind; - union { - prof_gctx_t *p; - void *v; - } gctx; - - prof_enter(tsd, tdata); - - /* - * Put gctx's in limbo and clear their counters in preparation for - * summing. - */ - gctx_tree_new(gctxs); - for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);) { - prof_dump_gctx_prep(tsd_tsdn(tsd), gctx.p, gctxs); - } - - /* - * Iterate over tdatas, and for the non-expired ones snapshot their tctx - * stats and merge them into the associated gctx's. - */ - prof_tdata_merge_iter_arg->tsdn = tsd_tsdn(tsd); - memset(&prof_tdata_merge_iter_arg->cnt_all, 0, sizeof(prof_cnt_t)); - malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); - tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter, - (void *)prof_tdata_merge_iter_arg); - malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); - - /* Merge tctx stats into gctx's. */ - prof_gctx_merge_iter_arg->tsdn = tsd_tsdn(tsd); - prof_gctx_merge_iter_arg->leak_ngctx = 0; - gctx_tree_iter(gctxs, NULL, prof_gctx_merge_iter, - (void *)prof_gctx_merge_iter_arg); - - prof_leave(tsd, tdata); -} - -static bool -prof_dump_file(tsd_t *tsd, bool propagate_err, const char *filename, - bool leakcheck, prof_tdata_t *tdata, - struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg, - struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg, - struct prof_gctx_dump_iter_arg_s *prof_gctx_dump_iter_arg, - prof_gctx_tree_t *gctxs) { - /* Create dump file. */ - if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1) { - return true; - } - - /* Dump profile header. */ - if (prof_dump_header(tsd_tsdn(tsd), propagate_err, - &prof_tdata_merge_iter_arg->cnt_all)) { - goto label_write_error; - } - - /* Dump per gctx profile stats. */ - prof_gctx_dump_iter_arg->tsdn = tsd_tsdn(tsd); - prof_gctx_dump_iter_arg->propagate_err = propagate_err; - if (gctx_tree_iter(gctxs, NULL, prof_gctx_dump_iter, - (void *)prof_gctx_dump_iter_arg) != NULL) { - goto label_write_error; - } - - /* Dump /proc//maps if possible. */ - if (prof_dump_maps(propagate_err)) { - goto label_write_error; - } - - if (prof_dump_close(propagate_err)) { - return true; - } - - return false; -label_write_error: - prof_dump_close(propagate_err); - return true; -} - -static bool -prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, - bool leakcheck) { - cassert(config_prof); - assert(tsd_reentrancy_level_get(tsd) == 0); - - prof_tdata_t * tdata = prof_tdata_get(tsd, true); - if (tdata == NULL) { - return true; - } - - pre_reentrancy(tsd, NULL); - malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx); - - prof_gctx_tree_t gctxs; - struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg; - struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg; - struct prof_gctx_dump_iter_arg_s prof_gctx_dump_iter_arg; - prof_dump_prep(tsd, tdata, &prof_tdata_merge_iter_arg, - &prof_gctx_merge_iter_arg, &gctxs); - bool err = prof_dump_file(tsd, propagate_err, filename, leakcheck, tdata, - &prof_tdata_merge_iter_arg, &prof_gctx_merge_iter_arg, - &prof_gctx_dump_iter_arg, &gctxs); - prof_gctx_finish(tsd, &gctxs); - - malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx); - post_reentrancy(tsd); - - if (err) { - return true; - } - - if (leakcheck) { - prof_leakcheck(&prof_tdata_merge_iter_arg.cnt_all, - prof_gctx_merge_iter_arg.leak_ngctx, filename); - } - return false; -} - -#ifdef JEMALLOC_JET -void -prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs, - uint64_t *accumbytes) { - tsd_t *tsd; - prof_tdata_t *tdata; - struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg; - struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg; - prof_gctx_tree_t gctxs; - - tsd = tsd_fetch(); - tdata = prof_tdata_get(tsd, false); - if (tdata == NULL) { - if (curobjs != NULL) { - *curobjs = 0; - } - if (curbytes != NULL) { - *curbytes = 0; - } - if (accumobjs != NULL) { - *accumobjs = 0; - } - if (accumbytes != NULL) { - *accumbytes = 0; - } - return; - } - - prof_dump_prep(tsd, tdata, &prof_tdata_merge_iter_arg, - &prof_gctx_merge_iter_arg, &gctxs); - prof_gctx_finish(tsd, &gctxs); - - if (curobjs != NULL) { - *curobjs = prof_tdata_merge_iter_arg.cnt_all.curobjs; - } - if (curbytes != NULL) { - *curbytes = prof_tdata_merge_iter_arg.cnt_all.curbytes; - } - if (accumobjs != NULL) { - *accumobjs = prof_tdata_merge_iter_arg.cnt_all.accumobjs; - } - if (accumbytes != NULL) { - *accumbytes = prof_tdata_merge_iter_arg.cnt_all.accumbytes; - } -} -#endif - -#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1) -#define VSEQ_INVALID UINT64_C(0xffffffffffffffff) -static void -prof_dump_filename(char *filename, char v, uint64_t vseq) { - cassert(config_prof); - - if (vseq != VSEQ_INVALID) { - /* "...v.heap" */ - malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, - "%s.%d.%"FMTu64".%c%"FMTu64".heap", - opt_prof_prefix, prof_getpid(), prof_dump_seq, v, vseq); - } else { - /* "....heap" */ - malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, - "%s.%d.%"FMTu64".%c.heap", - opt_prof_prefix, prof_getpid(), prof_dump_seq, v); - } - prof_dump_seq++; -} - -static void -prof_fdump(void) { - tsd_t *tsd; - char filename[DUMP_FILENAME_BUFSIZE]; - - cassert(config_prof); - assert(opt_prof_final); - assert(opt_prof_prefix[0] != '\0'); - - if (!prof_booted) { - return; - } - tsd = tsd_fetch(); - assert(tsd_reentrancy_level_get(tsd) == 0); - - malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); - prof_dump_filename(filename, 'f', VSEQ_INVALID); - malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); - prof_dump(tsd, false, filename, opt_prof_leak); -} - -bool -prof_accum_init(tsdn_t *tsdn, prof_accum_t *prof_accum) { - cassert(config_prof); - -#ifndef JEMALLOC_ATOMIC_U64 - if (malloc_mutex_init(&prof_accum->mtx, "prof_accum", - WITNESS_RANK_PROF_ACCUM, malloc_mutex_rank_exclusive)) { - return true; - } - prof_accum->accumbytes = 0; -#else - atomic_store_u64(&prof_accum->accumbytes, 0, ATOMIC_RELAXED); -#endif - return false; -} - -void -prof_idump(tsdn_t *tsdn) { - tsd_t *tsd; - prof_tdata_t *tdata; - - cassert(config_prof); - - if (!prof_booted || tsdn_null(tsdn) || !prof_active_get_unlocked()) { - return; - } - tsd = tsdn_tsd(tsdn); - if (tsd_reentrancy_level_get(tsd) > 0) { - return; - } - - tdata = prof_tdata_get(tsd, false); - if (tdata == NULL) { - return; - } - if (tdata->enq) { - tdata->enq_idump = true; - return; - } - - if (opt_prof_prefix[0] != '\0') { - char filename[PATH_MAX + 1]; - malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); - prof_dump_filename(filename, 'i', prof_dump_iseq); - prof_dump_iseq++; - malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); - prof_dump(tsd, false, filename, false); - } -} - -bool -prof_mdump(tsd_t *tsd, const char *filename) { - cassert(config_prof); - assert(tsd_reentrancy_level_get(tsd) == 0); - - if (!opt_prof || !prof_booted) { - return true; - } - char filename_buf[DUMP_FILENAME_BUFSIZE]; - if (filename == NULL) { - /* No filename specified, so automatically generate one. */ - if (opt_prof_prefix[0] == '\0') { - return true; - } - malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); - prof_dump_filename(filename_buf, 'm', prof_dump_mseq); - prof_dump_mseq++; - malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); - filename = filename_buf; - } - return prof_dump(tsd, true, filename, false); -} - -void -prof_gdump(tsdn_t *tsdn) { - tsd_t *tsd; - prof_tdata_t *tdata; - - cassert(config_prof); - - if (!prof_booted || tsdn_null(tsdn) || !prof_active_get_unlocked()) { - return; - } - tsd = tsdn_tsd(tsdn); - if (tsd_reentrancy_level_get(tsd) > 0) { - return; - } - - tdata = prof_tdata_get(tsd, false); - if (tdata == NULL) { - return; - } - if (tdata->enq) { - tdata->enq_gdump = true; - return; - } - - if (opt_prof_prefix[0] != '\0') { - char filename[DUMP_FILENAME_BUFSIZE]; - malloc_mutex_lock(tsdn, &prof_dump_seq_mtx); - prof_dump_filename(filename, 'u', prof_dump_useq); - prof_dump_useq++; - malloc_mutex_unlock(tsdn, &prof_dump_seq_mtx); - prof_dump(tsd, false, filename, false); - } -} - -static void -prof_bt_hash(const void *key, size_t r_hash[2]) { - prof_bt_t *bt = (prof_bt_t *)key; - - cassert(config_prof); - - hash(bt->vec, bt->len * sizeof(void *), 0x94122f33U, r_hash); -} - -static bool -prof_bt_keycomp(const void *k1, const void *k2) { - const prof_bt_t *bt1 = (prof_bt_t *)k1; - const prof_bt_t *bt2 = (prof_bt_t *)k2; - - cassert(config_prof); - - if (bt1->len != bt2->len) { - return false; - } - return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0); -} - -static void -prof_bt_node_hash(const void *key, size_t r_hash[2]) { - const prof_bt_node_t *bt_node = (prof_bt_node_t *)key; - prof_bt_hash((void *)(&bt_node->bt), r_hash); -} - -static bool -prof_bt_node_keycomp(const void *k1, const void *k2) { - const prof_bt_node_t *bt_node1 = (prof_bt_node_t *)k1; - const prof_bt_node_t *bt_node2 = (prof_bt_node_t *)k2; - return prof_bt_keycomp((void *)(&bt_node1->bt), - (void *)(&bt_node2->bt)); -} - -static void -prof_thr_node_hash(const void *key, size_t r_hash[2]) { - const prof_thr_node_t *thr_node = (prof_thr_node_t *)key; - hash(&thr_node->thr_uid, sizeof(uint64_t), 0x94122f35U, r_hash); -} - -static bool -prof_thr_node_keycomp(const void *k1, const void *k2) { - const prof_thr_node_t *thr_node1 = (prof_thr_node_t *)k1; - const prof_thr_node_t *thr_node2 = (prof_thr_node_t *)k2; - return thr_node1->thr_uid == thr_node2->thr_uid; -} - -static uint64_t -prof_thr_uid_alloc(tsdn_t *tsdn) { - uint64_t thr_uid; - - malloc_mutex_lock(tsdn, &next_thr_uid_mtx); - thr_uid = next_thr_uid; - next_thr_uid++; - malloc_mutex_unlock(tsdn, &next_thr_uid_mtx); - - return thr_uid; -} - -static prof_tdata_t * -prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim, - char *thread_name, bool active) { - prof_tdata_t *tdata; - - cassert(config_prof); - - /* Initialize an empty cache for this thread. */ - tdata = (prof_tdata_t *)iallocztm(tsd_tsdn(tsd), sizeof(prof_tdata_t), - sz_size2index(sizeof(prof_tdata_t)), false, NULL, true, - arena_get(TSDN_NULL, 0, true), true); - if (tdata == NULL) { - return NULL; - } - - tdata->lock = prof_tdata_mutex_choose(thr_uid); - tdata->thr_uid = thr_uid; - tdata->thr_discrim = thr_discrim; - tdata->thread_name = thread_name; - tdata->attached = true; - tdata->expired = false; - tdata->tctx_uid_next = 0; - - if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS, prof_bt_hash, - prof_bt_keycomp)) { - idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true); - return NULL; - } - - tdata->prng_state = (uint64_t)(uintptr_t)tdata; - prof_sample_threshold_update(tdata); - - tdata->enq = false; - tdata->enq_idump = false; - tdata->enq_gdump = false; - - tdata->dumping = false; - tdata->active = active; - - malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); - tdata_tree_insert(&tdatas, tdata); - malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); - - return tdata; -} - -prof_tdata_t * -prof_tdata_init(tsd_t *tsd) { - return prof_tdata_init_impl(tsd, prof_thr_uid_alloc(tsd_tsdn(tsd)), 0, - NULL, prof_thread_active_init_get(tsd_tsdn(tsd))); -} - -static bool -prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached) { - if (tdata->attached && !even_if_attached) { - return false; - } - if (ckh_count(&tdata->bt2tctx) != 0) { - return false; - } - return true; -} - -static bool -prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata, - bool even_if_attached) { - malloc_mutex_assert_owner(tsdn, tdata->lock); - - return prof_tdata_should_destroy_unlocked(tdata, even_if_attached); -} - -static void -prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata, - bool even_if_attached) { - malloc_mutex_assert_owner(tsd_tsdn(tsd), &tdatas_mtx); - - tdata_tree_remove(&tdatas, tdata); - - assert(prof_tdata_should_destroy_unlocked(tdata, even_if_attached)); - - if (tdata->thread_name != NULL) { - idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true, - true); - } - ckh_delete(tsd, &tdata->bt2tctx); - idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true); -} - -static void -prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached) { - malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); - prof_tdata_destroy_locked(tsd, tdata, even_if_attached); - malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); -} - -static void -prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata) { - bool destroy_tdata; - - malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); - if (tdata->attached) { - destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, - true); - /* - * Only detach if !destroy_tdata, because detaching would allow - * another thread to win the race to destroy tdata. - */ - if (!destroy_tdata) { - tdata->attached = false; - } - tsd_prof_tdata_set(tsd, NULL); - } else { - destroy_tdata = false; - } - malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); - if (destroy_tdata) { - prof_tdata_destroy(tsd, tdata, true); - } -} - -prof_tdata_t * -prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) { - uint64_t thr_uid = tdata->thr_uid; - uint64_t thr_discrim = tdata->thr_discrim + 1; - char *thread_name = (tdata->thread_name != NULL) ? - prof_thread_name_alloc(tsd_tsdn(tsd), tdata->thread_name) : NULL; - bool active = tdata->active; - - prof_tdata_detach(tsd, tdata); - return prof_tdata_init_impl(tsd, thr_uid, thr_discrim, thread_name, - active); -} - -static bool -prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata) { - bool destroy_tdata; - - malloc_mutex_lock(tsdn, tdata->lock); - if (!tdata->expired) { - tdata->expired = true; - destroy_tdata = tdata->attached ? false : - prof_tdata_should_destroy(tsdn, tdata, false); - } else { - destroy_tdata = false; - } - malloc_mutex_unlock(tsdn, tdata->lock); - - return destroy_tdata; -} - -static prof_tdata_t * -prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, - void *arg) { - tsdn_t *tsdn = (tsdn_t *)arg; - - return (prof_tdata_expire(tsdn, tdata) ? tdata : NULL); -} - -void -prof_reset(tsd_t *tsd, size_t lg_sample) { - prof_tdata_t *next; - - assert(lg_sample < (sizeof(uint64_t) << 3)); - - malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx); - malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); - - lg_prof_sample = lg_sample; - - next = NULL; - do { - prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next, - prof_tdata_reset_iter, (void *)tsd); - if (to_destroy != NULL) { - next = tdata_tree_next(&tdatas, to_destroy); - prof_tdata_destroy_locked(tsd, to_destroy, false); - } else { - next = NULL; - } - } while (next != NULL); - - malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); - malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx); -} - -void -prof_tdata_cleanup(tsd_t *tsd) { - prof_tdata_t *tdata; - - if (!config_prof) { - return; - } - - tdata = tsd_prof_tdata_get(tsd); - if (tdata != NULL) { - prof_tdata_detach(tsd, tdata); - } -} - -bool -prof_active_get(tsdn_t *tsdn) { - bool prof_active_current; - - malloc_mutex_lock(tsdn, &prof_active_mtx); - prof_active_current = prof_active; - malloc_mutex_unlock(tsdn, &prof_active_mtx); - return prof_active_current; -} - -bool -prof_active_set(tsdn_t *tsdn, bool active) { - bool prof_active_old; - - malloc_mutex_lock(tsdn, &prof_active_mtx); - prof_active_old = prof_active; - prof_active = active; - malloc_mutex_unlock(tsdn, &prof_active_mtx); - return prof_active_old; -} - -#ifdef JEMALLOC_JET -size_t -prof_log_bt_count(void) { - size_t cnt = 0; - prof_bt_node_t *node = log_bt_first; - while (node != NULL) { - cnt++; - node = node->next; - } - return cnt; -} - -size_t -prof_log_alloc_count(void) { - size_t cnt = 0; - prof_alloc_node_t *node = log_alloc_first; - while (node != NULL) { - cnt++; - node = node->next; - } - return cnt; -} - -size_t -prof_log_thr_count(void) { - size_t cnt = 0; - prof_thr_node_t *node = log_thr_first; - while (node != NULL) { - cnt++; - node = node->next; - } - return cnt; -} - -bool -prof_log_is_logging(void) { - return prof_logging_state == prof_logging_state_started; -} - -bool -prof_log_rep_check(void) { - if (prof_logging_state == prof_logging_state_stopped - && log_tables_initialized) { - return true; - } - - if (log_bt_last != NULL && log_bt_last->next != NULL) { - return true; - } - if (log_thr_last != NULL && log_thr_last->next != NULL) { - return true; - } - if (log_alloc_last != NULL && log_alloc_last->next != NULL) { - return true; - } - - size_t bt_count = prof_log_bt_count(); - size_t thr_count = prof_log_thr_count(); - size_t alloc_count = prof_log_alloc_count(); - - - if (prof_logging_state == prof_logging_state_stopped) { - if (bt_count != 0 || thr_count != 0 || alloc_count || 0) { - return true; - } - } - - prof_alloc_node_t *node = log_alloc_first; - while (node != NULL) { - if (node->alloc_bt_ind >= bt_count) { - return true; - } - if (node->free_bt_ind >= bt_count) { - return true; - } - if (node->alloc_thr_ind >= thr_count) { - return true; - } - if (node->free_thr_ind >= thr_count) { - return true; - } - if (node->alloc_time_ns > node->free_time_ns) { - return true; - } - node = node->next; - } - - return false; + return prof_sample_new_event_wait(tsd); } void -prof_log_dummy_set(bool new_value) { - prof_log_dummy = new_value; -} -#endif - -bool -prof_log_start(tsdn_t *tsdn, const char *filename) { - if (!opt_prof || !prof_booted) { - return true; - } - - bool ret = false; - size_t buf_size = PATH_MAX + 1; - - malloc_mutex_lock(tsdn, &log_mtx); - - if (prof_logging_state != prof_logging_state_stopped) { - ret = true; - } else if (filename == NULL) { - /* Make default name. */ - malloc_snprintf(log_filename, buf_size, "%s.%d.%"FMTu64".json", - opt_prof_prefix, prof_getpid(), log_seq); - log_seq++; - prof_logging_state = prof_logging_state_started; - } else if (strlen(filename) >= buf_size) { - ret = true; - } else { - strcpy(log_filename, filename); - prof_logging_state = prof_logging_state_started; +prof_sample_event_handler(tsd_t *tsd, uint64_t elapsed) { + cassert(config_prof); + assert(elapsed > 0 && elapsed != TE_INVALID_ELAPSED); + if (prof_interval == 0 || !prof_active_get_unlocked()) { + return; } - - if (!ret) { - nstime_update(&log_start_timestamp); + if (counter_accum(tsd_tsdn(tsd), &prof_idump_accumulated, elapsed)) { + prof_idump(tsd_tsdn(tsd)); } - - malloc_mutex_unlock(tsdn, &log_mtx); - - return ret; } -/* Used as an atexit function to stop logging on exit. */ static void -prof_log_stop_final(void) { - tsd_t *tsd = tsd_fetch(); - prof_log_stop(tsd_tsdn(tsd)); -} +prof_fdump(void) { + tsd_t *tsd; -struct prof_emitter_cb_arg_s { - int fd; - ssize_t ret; -}; + cassert(config_prof); + assert(opt_prof_final); -static void -prof_emitter_write_cb(void *opaque, const char *to_write) { - struct prof_emitter_cb_arg_s *arg = - (struct prof_emitter_cb_arg_s *)opaque; - size_t bytes = strlen(to_write); -#ifdef JEMALLOC_JET - if (prof_log_dummy) { + if (!prof_booted) { return; } -#endif - arg->ret = write(arg->fd, (void *)to_write, bytes); + tsd = tsd_fetch(); + assert(tsd_reentrancy_level_get(tsd) == 0); + + prof_fdump_impl(tsd); } -/* - * prof_log_emit_{...} goes through the appropriate linked list, emitting each - * node to the json and deallocating it. - */ -static void -prof_log_emit_threads(tsd_t *tsd, emitter_t *emitter) { - emitter_json_array_kv_begin(emitter, "threads"); - prof_thr_node_t *thr_node = log_thr_first; - prof_thr_node_t *thr_old_node; - while (thr_node != NULL) { - emitter_json_object_begin(emitter); +static bool +prof_idump_accum_init(void) { + cassert(config_prof); - emitter_json_kv(emitter, "thr_uid", emitter_type_uint64, - &thr_node->thr_uid); + return counter_accum_init(&prof_idump_accumulated, prof_interval); +} - char *thr_name = thr_node->name; +void +prof_idump(tsdn_t *tsdn) { + tsd_t *tsd; + prof_tdata_t *tdata; - emitter_json_kv(emitter, "thr_name", emitter_type_string, - &thr_name); + cassert(config_prof); - emitter_json_object_end(emitter); - thr_old_node = thr_node; - thr_node = thr_node->next; - idalloc(tsd, thr_old_node); + if (!prof_booted || tsdn_null(tsdn) || !prof_active_get_unlocked()) { + return; } - emitter_json_array_end(emitter); -} - -static void -prof_log_emit_traces(tsd_t *tsd, emitter_t *emitter) { - emitter_json_array_kv_begin(emitter, "stack_traces"); - prof_bt_node_t *bt_node = log_bt_first; - prof_bt_node_t *bt_old_node; - /* - * Calculate how many hex digits we need: twice number of bytes, two for - * "0x", and then one more for terminating '\0'. - */ - char buf[2 * sizeof(intptr_t) + 3]; - size_t buf_sz = sizeof(buf); - while (bt_node != NULL) { - emitter_json_array_begin(emitter); - size_t i; - for (i = 0; i < bt_node->bt.len; i++) { - malloc_snprintf(buf, buf_sz, "%p", bt_node->bt.vec[i]); - char *trace_str = buf; - emitter_json_value(emitter, emitter_type_string, - &trace_str); - } - emitter_json_array_end(emitter); - - bt_old_node = bt_node; - bt_node = bt_node->next; - idalloc(tsd, bt_old_node); + tsd = tsdn_tsd(tsdn); + if (tsd_reentrancy_level_get(tsd) > 0) { + return; } - emitter_json_array_end(emitter); -} - -static void -prof_log_emit_allocs(tsd_t *tsd, emitter_t *emitter) { - emitter_json_array_kv_begin(emitter, "allocations"); - prof_alloc_node_t *alloc_node = log_alloc_first; - prof_alloc_node_t *alloc_old_node; - while (alloc_node != NULL) { - emitter_json_object_begin(emitter); - emitter_json_kv(emitter, "alloc_thread", emitter_type_size, - &alloc_node->alloc_thr_ind); + tdata = prof_tdata_get(tsd, true); + if (tdata == NULL) { + return; + } + if (tdata->enq) { + tdata->enq_idump = true; + return; + } - emitter_json_kv(emitter, "free_thread", emitter_type_size, - &alloc_node->free_thr_ind); + prof_idump_impl(tsd); +} - emitter_json_kv(emitter, "alloc_trace", emitter_type_size, - &alloc_node->alloc_bt_ind); +bool +prof_mdump(tsd_t *tsd, const char *filename) { + cassert(config_prof); + assert(tsd_reentrancy_level_get(tsd) == 0); - emitter_json_kv(emitter, "free_trace", emitter_type_size, - &alloc_node->free_bt_ind); + if (!opt_prof || !prof_booted) { + return true; + } - emitter_json_kv(emitter, "alloc_timestamp", - emitter_type_uint64, &alloc_node->alloc_time_ns); + return prof_mdump_impl(tsd, filename); +} - emitter_json_kv(emitter, "free_timestamp", emitter_type_uint64, - &alloc_node->free_time_ns); +void +prof_gdump(tsdn_t *tsdn) { + tsd_t *tsd; + prof_tdata_t *tdata; - emitter_json_kv(emitter, "usize", emitter_type_uint64, - &alloc_node->usize); + cassert(config_prof); - emitter_json_object_end(emitter); + if (!prof_booted || tsdn_null(tsdn) || !prof_active_get_unlocked()) { + return; + } + tsd = tsdn_tsd(tsdn); + if (tsd_reentrancy_level_get(tsd) > 0) { + return; + } - alloc_old_node = alloc_node; - alloc_node = alloc_node->next; - idalloc(tsd, alloc_old_node); + tdata = prof_tdata_get(tsd, false); + if (tdata == NULL) { + return; + } + if (tdata->enq) { + tdata->enq_gdump = true; + return; } - emitter_json_array_end(emitter); -} -static void -prof_log_emit_metadata(emitter_t *emitter) { - emitter_json_object_kv_begin(emitter, "info"); + prof_gdump_impl(tsd); +} - nstime_t now = NSTIME_ZERO_INITIALIZER; +static uint64_t +prof_thr_uid_alloc(tsdn_t *tsdn) { + uint64_t thr_uid; - nstime_update(&now); - uint64_t ns = nstime_ns(&now) - nstime_ns(&log_start_timestamp); - emitter_json_kv(emitter, "duration", emitter_type_uint64, &ns); + malloc_mutex_lock(tsdn, &next_thr_uid_mtx); + thr_uid = next_thr_uid; + next_thr_uid++; + malloc_mutex_unlock(tsdn, &next_thr_uid_mtx); - char *vers = JEMALLOC_VERSION; - emitter_json_kv(emitter, "version", - emitter_type_string, &vers); + return thr_uid; +} - emitter_json_kv(emitter, "lg_sample_rate", - emitter_type_int, &lg_prof_sample); +prof_tdata_t * +prof_tdata_init(tsd_t *tsd) { + return prof_tdata_init_impl(tsd, prof_thr_uid_alloc(tsd_tsdn(tsd)), 0, + NULL, prof_thread_active_init_get(tsd_tsdn(tsd))); +} - int pid = prof_getpid(); - emitter_json_kv(emitter, "pid", emitter_type_int, &pid); +prof_tdata_t * +prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) { + uint64_t thr_uid = tdata->thr_uid; + uint64_t thr_discrim = tdata->thr_discrim + 1; + char *thread_name = (tdata->thread_name != NULL) ? + prof_thread_name_alloc(tsd, tdata->thread_name) : NULL; + bool active = tdata->active; - emitter_json_object_end(emitter); + prof_tdata_detach(tsd, tdata); + return prof_tdata_init_impl(tsd, thr_uid, thr_discrim, thread_name, + active); } +void +prof_tdata_cleanup(tsd_t *tsd) { + prof_tdata_t *tdata; -bool -prof_log_stop(tsdn_t *tsdn) { - if (!opt_prof || !prof_booted) { - return true; + if (!config_prof) { + return; } - tsd_t *tsd = tsdn_tsd(tsdn); - malloc_mutex_lock(tsdn, &log_mtx); - - if (prof_logging_state != prof_logging_state_started) { - malloc_mutex_unlock(tsdn, &log_mtx); - return true; + tdata = tsd_prof_tdata_get(tsd); + if (tdata != NULL) { + prof_tdata_detach(tsd, tdata); } +} - /* - * Set the state to dumping. We'll set it to stopped when we're done. - * Since other threads won't be able to start/stop/log when the state is - * dumping, we don't have to hold the lock during the whole method. - */ - prof_logging_state = prof_logging_state_dumping; - malloc_mutex_unlock(tsdn, &log_mtx); - - - emitter_t emitter; - - /* Create a file. */ +bool +prof_active_get(tsdn_t *tsdn) { + bool prof_active_current; - int fd; -#ifdef JEMALLOC_JET - if (prof_log_dummy) { - fd = 0; - } else { - fd = creat(log_filename, 0644); - } -#else - fd = creat(log_filename, 0644); -#endif + prof_active_assert(); + malloc_mutex_lock(tsdn, &prof_active_mtx); + prof_active_current = prof_active_state; + malloc_mutex_unlock(tsdn, &prof_active_mtx); + return prof_active_current; +} - if (fd == -1) { - malloc_printf(": creat() for log file \"%s\" " - " failed with %d\n", log_filename, errno); - if (opt_abort) { - abort(); - } - return true; - } +bool +prof_active_set(tsdn_t *tsdn, bool active) { + bool prof_active_old; - /* Emit to json. */ - struct prof_emitter_cb_arg_s arg; - arg.fd = fd; - emitter_init(&emitter, emitter_output_json, &prof_emitter_write_cb, - (void *)(&arg)); - - emitter_begin(&emitter); - prof_log_emit_metadata(&emitter); - prof_log_emit_threads(tsd, &emitter); - prof_log_emit_traces(tsd, &emitter); - prof_log_emit_allocs(tsd, &emitter); - emitter_end(&emitter); - - /* Reset global state. */ - if (log_tables_initialized) { - ckh_delete(tsd, &log_bt_node_set); - ckh_delete(tsd, &log_thr_node_set); - } - log_tables_initialized = false; - log_bt_index = 0; - log_thr_index = 0; - log_bt_first = NULL; - log_bt_last = NULL; - log_thr_first = NULL; - log_thr_last = NULL; - log_alloc_first = NULL; - log_alloc_last = NULL; - - malloc_mutex_lock(tsdn, &log_mtx); - prof_logging_state = prof_logging_state_stopped; - malloc_mutex_unlock(tsdn, &log_mtx); - -#ifdef JEMALLOC_JET - if (prof_log_dummy) { - return false; - } -#endif - return close(fd); + prof_active_assert(); + malloc_mutex_lock(tsdn, &prof_active_mtx); + prof_active_old = prof_active_state; + prof_active_state = active; + malloc_mutex_unlock(tsdn, &prof_active_mtx); + prof_active_assert(); + return prof_active_old; } const char * prof_thread_name_get(tsd_t *tsd) { + assert(tsd_reentrancy_level_get(tsd) == 0); + prof_tdata_t *tdata; tdata = prof_tdata_get(tsd, true); @@ -2790,69 +448,19 @@ prof_thread_name_get(tsd_t *tsd) { return (tdata->thread_name != NULL ? tdata->thread_name : ""); } -static char * -prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name) { - char *ret; - size_t size; - - if (thread_name == NULL) { - return NULL; - } - - size = strlen(thread_name) + 1; - if (size == 1) { - return ""; - } - - ret = iallocztm(tsdn, size, sz_size2index(size), false, NULL, true, - arena_get(TSDN_NULL, 0, true), true); - if (ret == NULL) { - return NULL; - } - memcpy(ret, thread_name, size); - return ret; -} - int prof_thread_name_set(tsd_t *tsd, const char *thread_name) { - prof_tdata_t *tdata; - unsigned i; - char *s; - - tdata = prof_tdata_get(tsd, true); - if (tdata == NULL) { - return EAGAIN; - } - - /* Validate input. */ - if (thread_name == NULL) { - return EFAULT; - } - for (i = 0; thread_name[i] != '\0'; i++) { - char c = thread_name[i]; - if (!isgraph(c) && !isblank(c)) { - return EFAULT; - } - } - - s = prof_thread_name_alloc(tsd_tsdn(tsd), thread_name); - if (s == NULL) { - return EAGAIN; - } - - if (tdata->thread_name != NULL) { - idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true, - true); - tdata->thread_name = NULL; - } - if (strlen(s) > 0) { - tdata->thread_name = s; + if (opt_prof_sys_thread_name) { + return ENOENT; + } else { + return prof_thread_name_set_impl(tsd, thread_name); } - return 0; } bool prof_thread_active_get(tsd_t *tsd) { + assert(tsd_reentrancy_level_get(tsd) == 0); + prof_tdata_t *tdata; tdata = prof_tdata_get(tsd, true); @@ -2864,6 +472,8 @@ prof_thread_active_get(tsd_t *tsd) { bool prof_thread_active_set(tsd_t *tsd, bool active) { + assert(tsd_reentrancy_level_get(tsd) == 0); + prof_tdata_t *tdata; tdata = prof_tdata_get(tsd, true); @@ -2916,6 +526,28 @@ prof_gdump_set(tsdn_t *tsdn, bool gdump) { return prof_gdump_old; } +void +prof_backtrace_hook_set(prof_backtrace_hook_t hook) { + atomic_store_p(&prof_backtrace_hook, hook, ATOMIC_RELEASE); +} + +prof_backtrace_hook_t +prof_backtrace_hook_get() { + return (prof_backtrace_hook_t)atomic_load_p(&prof_backtrace_hook, + ATOMIC_ACQUIRE); +} + +void +prof_dump_hook_set(prof_dump_hook_t hook) { + atomic_store_p(&prof_dump_hook, hook, ATOMIC_RELEASE); +} + +prof_dump_hook_t +prof_dump_hook_get() { + return (prof_dump_hook_t)atomic_load_p(&prof_dump_hook, + ATOMIC_ACQUIRE); +} + void prof_boot0(void) { cassert(config_prof); @@ -2932,6 +564,9 @@ prof_boot1(void) { * opt_prof must be in its final state before any arenas are * initialized, so this function must be executed early. */ + if (opt_prof_leak_error && !opt_prof_leak) { + opt_prof_leak = true; + } if (opt_prof_leak && !opt_prof) { /* @@ -2949,61 +584,65 @@ prof_boot1(void) { } bool -prof_boot2(tsd_t *tsd) { +prof_boot2(tsd_t *tsd, base_t *base) { cassert(config_prof); - if (opt_prof) { - unsigned i; + /* + * Initialize the global mutexes unconditionally to maintain correct + * stats when opt_prof is false. + */ + if (malloc_mutex_init(&prof_active_mtx, "prof_active", + WITNESS_RANK_PROF_ACTIVE, malloc_mutex_rank_exclusive)) { + return true; + } + if (malloc_mutex_init(&prof_gdump_mtx, "prof_gdump", + WITNESS_RANK_PROF_GDUMP, malloc_mutex_rank_exclusive)) { + return true; + } + if (malloc_mutex_init(&prof_thread_active_init_mtx, + "prof_thread_active_init", WITNESS_RANK_PROF_THREAD_ACTIVE_INIT, + malloc_mutex_rank_exclusive)) { + return true; + } + if (malloc_mutex_init(&bt2gctx_mtx, "prof_bt2gctx", + WITNESS_RANK_PROF_BT2GCTX, malloc_mutex_rank_exclusive)) { + return true; + } + if (malloc_mutex_init(&tdatas_mtx, "prof_tdatas", + WITNESS_RANK_PROF_TDATAS, malloc_mutex_rank_exclusive)) { + return true; + } + if (malloc_mutex_init(&next_thr_uid_mtx, "prof_next_thr_uid", + WITNESS_RANK_PROF_NEXT_THR_UID, malloc_mutex_rank_exclusive)) { + return true; + } + if (malloc_mutex_init(&prof_stats_mtx, "prof_stats", + WITNESS_RANK_PROF_STATS, malloc_mutex_rank_exclusive)) { + return true; + } + if (malloc_mutex_init(&prof_dump_filename_mtx, + "prof_dump_filename", WITNESS_RANK_PROF_DUMP_FILENAME, + malloc_mutex_rank_exclusive)) { + return true; + } + if (malloc_mutex_init(&prof_dump_mtx, "prof_dump", + WITNESS_RANK_PROF_DUMP, malloc_mutex_rank_exclusive)) { + return true; + } + if (opt_prof) { lg_prof_sample = opt_lg_prof_sample; - - prof_active = opt_prof_active; - if (malloc_mutex_init(&prof_active_mtx, "prof_active", - WITNESS_RANK_PROF_ACTIVE, malloc_mutex_rank_exclusive)) { - return true; - } - + prof_unbias_map_init(); + prof_active_state = opt_prof_active; prof_gdump_val = opt_prof_gdump; - if (malloc_mutex_init(&prof_gdump_mtx, "prof_gdump", - WITNESS_RANK_PROF_GDUMP, malloc_mutex_rank_exclusive)) { - return true; - } - prof_thread_active_init = opt_prof_thread_active_init; - if (malloc_mutex_init(&prof_thread_active_init_mtx, - "prof_thread_active_init", - WITNESS_RANK_PROF_THREAD_ACTIVE_INIT, - malloc_mutex_rank_exclusive)) { - return true; - } - - if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash, - prof_bt_keycomp)) { - return true; - } - if (malloc_mutex_init(&bt2gctx_mtx, "prof_bt2gctx", - WITNESS_RANK_PROF_BT2GCTX, malloc_mutex_rank_exclusive)) { - return true; - } - tdata_tree_new(&tdatas); - if (malloc_mutex_init(&tdatas_mtx, "prof_tdatas", - WITNESS_RANK_PROF_TDATAS, malloc_mutex_rank_exclusive)) { + if (prof_data_init(tsd)) { return true; } next_thr_uid = 0; - if (malloc_mutex_init(&next_thr_uid_mtx, "prof_next_thr_uid", - WITNESS_RANK_PROF_NEXT_THR_UID, malloc_mutex_rank_exclusive)) { - return true; - } - - if (malloc_mutex_init(&prof_dump_seq_mtx, "prof_dump_seq", - WITNESS_RANK_PROF_DUMP_SEQ, malloc_mutex_rank_exclusive)) { - return true; - } - if (malloc_mutex_init(&prof_dump_mtx, "prof_dump", - WITNESS_RANK_PROF_DUMP, malloc_mutex_rank_exclusive)) { + if (prof_idump_accum_init()) { return true; } @@ -3015,42 +654,22 @@ prof_boot2(tsd_t *tsd) { } } - if (opt_prof_log) { - prof_log_start(tsd_tsdn(tsd), NULL); - } - - if (atexit(prof_log_stop_final) != 0) { - malloc_write(": Error in atexit() " - "for logging\n"); - if (opt_abort) { - abort(); - } - } - - if (malloc_mutex_init(&log_mtx, "prof_log", - WITNESS_RANK_PROF_LOG, malloc_mutex_rank_exclusive)) { - return true; - } - - if (ckh_new(tsd, &log_bt_node_set, PROF_CKH_MINITEMS, - prof_bt_node_hash, prof_bt_node_keycomp)) { + if (prof_log_init(tsd)) { return true; } - if (ckh_new(tsd, &log_thr_node_set, PROF_CKH_MINITEMS, - prof_thr_node_hash, prof_thr_node_keycomp)) { + if (prof_recent_init()) { return true; } - log_tables_initialized = true; + prof_base = base; - gctx_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd), - b0get(), PROF_NCTX_LOCKS * sizeof(malloc_mutex_t), - CACHELINE); + gctx_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd), base, + PROF_NCTX_LOCKS * sizeof(malloc_mutex_t), CACHELINE); if (gctx_locks == NULL) { return true; } - for (i = 0; i < PROF_NCTX_LOCKS; i++) { + for (unsigned i = 0; i < PROF_NCTX_LOCKS; i++) { if (malloc_mutex_init(&gctx_locks[i], "prof_gctx", WITNESS_RANK_PROF_GCTX, malloc_mutex_rank_exclusive)) { @@ -3058,26 +677,21 @@ prof_boot2(tsd_t *tsd) { } } - tdata_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd), - b0get(), PROF_NTDATA_LOCKS * sizeof(malloc_mutex_t), - CACHELINE); + tdata_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd), base, + PROF_NTDATA_LOCKS * sizeof(malloc_mutex_t), CACHELINE); if (tdata_locks == NULL) { return true; } - for (i = 0; i < PROF_NTDATA_LOCKS; i++) { + for (unsigned i = 0; i < PROF_NTDATA_LOCKS; i++) { if (malloc_mutex_init(&tdata_locks[i], "prof_tdata", WITNESS_RANK_PROF_TDATA, malloc_mutex_rank_exclusive)) { return true; } } -#ifdef JEMALLOC_PROF_LIBGCC - /* - * Cause the backtracing machinery to allocate its internal - * state before enabling profiling. - */ - _Unwind_Backtrace(prof_unwind_init_callback, NULL); -#endif + + prof_unwind_init(); + prof_hooks_init(); } prof_booted = true; @@ -3095,18 +709,23 @@ prof_prefork0(tsdn_t *tsdn) { for (i = 0; i < PROF_NTDATA_LOCKS; i++) { malloc_mutex_prefork(tsdn, &tdata_locks[i]); } + malloc_mutex_prefork(tsdn, &log_mtx); for (i = 0; i < PROF_NCTX_LOCKS; i++) { malloc_mutex_prefork(tsdn, &gctx_locks[i]); } + malloc_mutex_prefork(tsdn, &prof_recent_dump_mtx); } } void prof_prefork1(tsdn_t *tsdn) { if (config_prof && opt_prof) { + counter_prefork(tsdn, &prof_idump_accumulated); malloc_mutex_prefork(tsdn, &prof_active_mtx); - malloc_mutex_prefork(tsdn, &prof_dump_seq_mtx); + malloc_mutex_prefork(tsdn, &prof_dump_filename_mtx); malloc_mutex_prefork(tsdn, &prof_gdump_mtx); + malloc_mutex_prefork(tsdn, &prof_recent_alloc_mtx); + malloc_mutex_prefork(tsdn, &prof_stats_mtx); malloc_mutex_prefork(tsdn, &next_thr_uid_mtx); malloc_mutex_prefork(tsdn, &prof_thread_active_init_mtx); } @@ -3120,12 +739,17 @@ prof_postfork_parent(tsdn_t *tsdn) { malloc_mutex_postfork_parent(tsdn, &prof_thread_active_init_mtx); malloc_mutex_postfork_parent(tsdn, &next_thr_uid_mtx); + malloc_mutex_postfork_parent(tsdn, &prof_stats_mtx); + malloc_mutex_postfork_parent(tsdn, &prof_recent_alloc_mtx); malloc_mutex_postfork_parent(tsdn, &prof_gdump_mtx); - malloc_mutex_postfork_parent(tsdn, &prof_dump_seq_mtx); + malloc_mutex_postfork_parent(tsdn, &prof_dump_filename_mtx); malloc_mutex_postfork_parent(tsdn, &prof_active_mtx); + counter_postfork_parent(tsdn, &prof_idump_accumulated); + malloc_mutex_postfork_parent(tsdn, &prof_recent_dump_mtx); for (i = 0; i < PROF_NCTX_LOCKS; i++) { malloc_mutex_postfork_parent(tsdn, &gctx_locks[i]); } + malloc_mutex_postfork_parent(tsdn, &log_mtx); for (i = 0; i < PROF_NTDATA_LOCKS; i++) { malloc_mutex_postfork_parent(tsdn, &tdata_locks[i]); } @@ -3142,12 +766,17 @@ prof_postfork_child(tsdn_t *tsdn) { malloc_mutex_postfork_child(tsdn, &prof_thread_active_init_mtx); malloc_mutex_postfork_child(tsdn, &next_thr_uid_mtx); + malloc_mutex_postfork_child(tsdn, &prof_stats_mtx); + malloc_mutex_postfork_child(tsdn, &prof_recent_alloc_mtx); malloc_mutex_postfork_child(tsdn, &prof_gdump_mtx); - malloc_mutex_postfork_child(tsdn, &prof_dump_seq_mtx); + malloc_mutex_postfork_child(tsdn, &prof_dump_filename_mtx); malloc_mutex_postfork_child(tsdn, &prof_active_mtx); + counter_postfork_child(tsdn, &prof_idump_accumulated); + malloc_mutex_postfork_child(tsdn, &prof_recent_dump_mtx); for (i = 0; i < PROF_NCTX_LOCKS; i++) { malloc_mutex_postfork_child(tsdn, &gctx_locks[i]); } + malloc_mutex_postfork_child(tsdn, &log_mtx); for (i = 0; i < PROF_NTDATA_LOCKS; i++) { malloc_mutex_postfork_child(tsdn, &tdata_locks[i]); } diff --git a/contrib/jemalloc/src/prof_data.c b/contrib/jemalloc/src/prof_data.c new file mode 100644 index 00000000000000..bfa55be1ca559f --- /dev/null +++ b/contrib/jemalloc/src/prof_data.c @@ -0,0 +1,1447 @@ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/ckh.h" +#include "jemalloc/internal/hash.h" +#include "jemalloc/internal/malloc_io.h" +#include "jemalloc/internal/prof_data.h" + +/* + * This file defines and manages the core profiling data structures. + * + * Conceptually, profiling data can be imagined as a table with three columns: + * thread, stack trace, and current allocation size. (When prof_accum is on, + * there's one additional column which is the cumulative allocation size.) + * + * Implementation wise, each thread maintains a hash recording the stack trace + * to allocation size correspondences, which are basically the individual rows + * in the table. In addition, two global "indices" are built to make data + * aggregation efficient (for dumping): bt2gctx and tdatas, which are basically + * the "grouped by stack trace" and "grouped by thread" views of the same table, + * respectively. Note that the allocation size is only aggregated to the two + * indices at dumping time, so as to optimize for performance. + */ + +/******************************************************************************/ + +malloc_mutex_t bt2gctx_mtx; +malloc_mutex_t tdatas_mtx; +malloc_mutex_t prof_dump_mtx; + +/* + * Table of mutexes that are shared among gctx's. These are leaf locks, so + * there is no problem with using them for more than one gctx at the same time. + * The primary motivation for this sharing though is that gctx's are ephemeral, + * and destroying mutexes causes complications for systems that allocate when + * creating/destroying mutexes. + */ +malloc_mutex_t *gctx_locks; +static atomic_u_t cum_gctxs; /* Atomic counter. */ + +/* + * Table of mutexes that are shared among tdata's. No operations require + * holding multiple tdata locks, so there is no problem with using them for more + * than one tdata at the same time, even though a gctx lock may be acquired + * while holding a tdata lock. + */ +malloc_mutex_t *tdata_locks; + +/* + * Global hash of (prof_bt_t *)-->(prof_gctx_t *). This is the master data + * structure that knows about all backtraces currently captured. + */ +static ckh_t bt2gctx; + +/* + * Tree of all extant prof_tdata_t structures, regardless of state, + * {attached,detached,expired}. + */ +static prof_tdata_tree_t tdatas; + +size_t prof_unbiased_sz[PROF_SC_NSIZES]; +size_t prof_shifted_unbiased_cnt[PROF_SC_NSIZES]; + +/******************************************************************************/ +/* Red-black trees. */ + +static int +prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b) { + uint64_t a_thr_uid = a->thr_uid; + uint64_t b_thr_uid = b->thr_uid; + int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid); + if (ret == 0) { + uint64_t a_thr_discrim = a->thr_discrim; + uint64_t b_thr_discrim = b->thr_discrim; + ret = (a_thr_discrim > b_thr_discrim) - (a_thr_discrim < + b_thr_discrim); + if (ret == 0) { + uint64_t a_tctx_uid = a->tctx_uid; + uint64_t b_tctx_uid = b->tctx_uid; + ret = (a_tctx_uid > b_tctx_uid) - (a_tctx_uid < + b_tctx_uid); + } + } + return ret; +} + +rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t, + tctx_link, prof_tctx_comp) + +static int +prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b) { + unsigned a_len = a->bt.len; + unsigned b_len = b->bt.len; + unsigned comp_len = (a_len < b_len) ? a_len : b_len; + int ret = memcmp(a->bt.vec, b->bt.vec, comp_len * sizeof(void *)); + if (ret == 0) { + ret = (a_len > b_len) - (a_len < b_len); + } + return ret; +} + +rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link, + prof_gctx_comp) + +static int +prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) { + int ret; + uint64_t a_uid = a->thr_uid; + uint64_t b_uid = b->thr_uid; + + ret = ((a_uid > b_uid) - (a_uid < b_uid)); + if (ret == 0) { + uint64_t a_discrim = a->thr_discrim; + uint64_t b_discrim = b->thr_discrim; + + ret = ((a_discrim > b_discrim) - (a_discrim < b_discrim)); + } + return ret; +} + +rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link, + prof_tdata_comp) + +/******************************************************************************/ + +static malloc_mutex_t * +prof_gctx_mutex_choose(void) { + unsigned ngctxs = atomic_fetch_add_u(&cum_gctxs, 1, ATOMIC_RELAXED); + + return &gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS]; +} + +static malloc_mutex_t * +prof_tdata_mutex_choose(uint64_t thr_uid) { + return &tdata_locks[thr_uid % PROF_NTDATA_LOCKS]; +} + +bool +prof_data_init(tsd_t *tsd) { + tdata_tree_new(&tdatas); + return ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, + prof_bt_hash, prof_bt_keycomp); +} + +static void +prof_enter(tsd_t *tsd, prof_tdata_t *tdata) { + cassert(config_prof); + assert(tdata == prof_tdata_get(tsd, false)); + + if (tdata != NULL) { + assert(!tdata->enq); + tdata->enq = true; + } + + malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx); +} + +static void +prof_leave(tsd_t *tsd, prof_tdata_t *tdata) { + cassert(config_prof); + assert(tdata == prof_tdata_get(tsd, false)); + + malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx); + + if (tdata != NULL) { + bool idump, gdump; + + assert(tdata->enq); + tdata->enq = false; + idump = tdata->enq_idump; + tdata->enq_idump = false; + gdump = tdata->enq_gdump; + tdata->enq_gdump = false; + + if (idump) { + prof_idump(tsd_tsdn(tsd)); + } + if (gdump) { + prof_gdump(tsd_tsdn(tsd)); + } + } +} + +static prof_gctx_t * +prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) { + /* + * Create a single allocation that has space for vec of length bt->len. + */ + size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *)); + prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsdn, size, + sz_size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true), + true); + if (gctx == NULL) { + return NULL; + } + gctx->lock = prof_gctx_mutex_choose(); + /* + * Set nlimbo to 1, in order to avoid a race condition with + * prof_tctx_destroy()/prof_gctx_try_destroy(). + */ + gctx->nlimbo = 1; + tctx_tree_new(&gctx->tctxs); + /* Duplicate bt. */ + memcpy(gctx->vec, bt->vec, bt->len * sizeof(void *)); + gctx->bt.vec = gctx->vec; + gctx->bt.len = bt->len; + return gctx; +} + +static void +prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, + prof_gctx_t *gctx) { + cassert(config_prof); + + /* + * Check that gctx is still unused by any thread cache before destroying + * it. prof_lookup() increments gctx->nlimbo in order to avoid a race + * condition with this function, as does prof_tctx_destroy() in order to + * avoid a race between the main body of prof_tctx_destroy() and entry + * into this function. + */ + prof_enter(tsd, tdata_self); + malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); + assert(gctx->nlimbo != 0); + if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) { + /* Remove gctx from bt2gctx. */ + if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL)) { + not_reached(); + } + prof_leave(tsd, tdata_self); + /* Destroy gctx. */ + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); + idalloctm(tsd_tsdn(tsd), gctx, NULL, NULL, true, true); + } else { + /* + * Compensate for increment in prof_tctx_destroy() or + * prof_lookup(). + */ + gctx->nlimbo--; + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); + prof_leave(tsd, tdata_self); + } +} + +static bool +prof_gctx_should_destroy(prof_gctx_t *gctx) { + if (opt_prof_accum) { + return false; + } + if (!tctx_tree_empty(&gctx->tctxs)) { + return false; + } + if (gctx->nlimbo != 0) { + return false; + } + return true; +} + +static bool +prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata, + void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx) { + union { + prof_gctx_t *p; + void *v; + } gctx, tgctx; + union { + prof_bt_t *p; + void *v; + } btkey; + bool new_gctx; + + prof_enter(tsd, tdata); + if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) { + /* bt has never been seen before. Insert it. */ + prof_leave(tsd, tdata); + tgctx.p = prof_gctx_create(tsd_tsdn(tsd), bt); + if (tgctx.v == NULL) { + return true; + } + prof_enter(tsd, tdata); + if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) { + gctx.p = tgctx.p; + btkey.p = &gctx.p->bt; + if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) { + /* OOM. */ + prof_leave(tsd, tdata); + idalloctm(tsd_tsdn(tsd), gctx.v, NULL, NULL, + true, true); + return true; + } + new_gctx = true; + } else { + new_gctx = false; + } + } else { + tgctx.v = NULL; + new_gctx = false; + } + + if (!new_gctx) { + /* + * Increment nlimbo, in order to avoid a race condition with + * prof_tctx_destroy()/prof_gctx_try_destroy(). + */ + malloc_mutex_lock(tsd_tsdn(tsd), gctx.p->lock); + gctx.p->nlimbo++; + malloc_mutex_unlock(tsd_tsdn(tsd), gctx.p->lock); + new_gctx = false; + + if (tgctx.v != NULL) { + /* Lost race to insert. */ + idalloctm(tsd_tsdn(tsd), tgctx.v, NULL, NULL, true, + true); + } + } + prof_leave(tsd, tdata); + + *p_btkey = btkey.v; + *p_gctx = gctx.p; + *p_new_gctx = new_gctx; + return false; +} + +prof_tctx_t * +prof_lookup(tsd_t *tsd, prof_bt_t *bt) { + union { + prof_tctx_t *p; + void *v; + } ret; + prof_tdata_t *tdata; + bool not_found; + + cassert(config_prof); + + tdata = prof_tdata_get(tsd, false); + assert(tdata != NULL); + + malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); + not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v); + if (!not_found) { /* Note double negative! */ + ret.p->prepared = true; + } + malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); + if (not_found) { + void *btkey; + prof_gctx_t *gctx; + bool new_gctx, error; + + /* + * This thread's cache lacks bt. Look for it in the global + * cache. + */ + if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx, + &new_gctx)) { + return NULL; + } + + /* Link a prof_tctx_t into gctx for this thread. */ + ret.v = iallocztm(tsd_tsdn(tsd), sizeof(prof_tctx_t), + sz_size2index(sizeof(prof_tctx_t)), false, NULL, true, + arena_ichoose(tsd, NULL), true); + if (ret.p == NULL) { + if (new_gctx) { + prof_gctx_try_destroy(tsd, tdata, gctx); + } + return NULL; + } + ret.p->tdata = tdata; + ret.p->thr_uid = tdata->thr_uid; + ret.p->thr_discrim = tdata->thr_discrim; + ret.p->recent_count = 0; + memset(&ret.p->cnts, 0, sizeof(prof_cnt_t)); + ret.p->gctx = gctx; + ret.p->tctx_uid = tdata->tctx_uid_next++; + ret.p->prepared = true; + ret.p->state = prof_tctx_state_initializing; + malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); + error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v); + malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); + if (error) { + if (new_gctx) { + prof_gctx_try_destroy(tsd, tdata, gctx); + } + idalloctm(tsd_tsdn(tsd), ret.v, NULL, NULL, true, true); + return NULL; + } + malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); + ret.p->state = prof_tctx_state_nominal; + tctx_tree_insert(&gctx->tctxs, ret.p); + gctx->nlimbo--; + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); + } + + return ret.p; +} + +/* Used in unit tests. */ +static prof_tdata_t * +prof_tdata_count_iter(prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata, + void *arg) { + size_t *tdata_count = (size_t *)arg; + + (*tdata_count)++; + + return NULL; +} + +/* Used in unit tests. */ +size_t +prof_tdata_count(void) { + size_t tdata_count = 0; + tsdn_t *tsdn; + + tsdn = tsdn_fetch(); + malloc_mutex_lock(tsdn, &tdatas_mtx); + tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter, + (void *)&tdata_count); + malloc_mutex_unlock(tsdn, &tdatas_mtx); + + return tdata_count; +} + +/* Used in unit tests. */ +size_t +prof_bt_count(void) { + size_t bt_count; + tsd_t *tsd; + prof_tdata_t *tdata; + + tsd = tsd_fetch(); + tdata = prof_tdata_get(tsd, false); + if (tdata == NULL) { + return 0; + } + + malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx); + bt_count = ckh_count(&bt2gctx); + malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx); + + return bt_count; +} + +char * +prof_thread_name_alloc(tsd_t *tsd, const char *thread_name) { + char *ret; + size_t size; + + if (thread_name == NULL) { + return NULL; + } + + size = strlen(thread_name) + 1; + if (size == 1) { + return ""; + } + + ret = iallocztm(tsd_tsdn(tsd), size, sz_size2index(size), false, NULL, + true, arena_get(TSDN_NULL, 0, true), true); + if (ret == NULL) { + return NULL; + } + memcpy(ret, thread_name, size); + return ret; +} + +int +prof_thread_name_set_impl(tsd_t *tsd, const char *thread_name) { + assert(tsd_reentrancy_level_get(tsd) == 0); + + prof_tdata_t *tdata; + unsigned i; + char *s; + + tdata = prof_tdata_get(tsd, true); + if (tdata == NULL) { + return EAGAIN; + } + + /* Validate input. */ + if (thread_name == NULL) { + return EFAULT; + } + for (i = 0; thread_name[i] != '\0'; i++) { + char c = thread_name[i]; + if (!isgraph(c) && !isblank(c)) { + return EFAULT; + } + } + + s = prof_thread_name_alloc(tsd, thread_name); + if (s == NULL) { + return EAGAIN; + } + + if (tdata->thread_name != NULL) { + idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true, + true); + tdata->thread_name = NULL; + } + if (strlen(s) > 0) { + tdata->thread_name = s; + } + return 0; +} + +JEMALLOC_FORMAT_PRINTF(3, 4) +static void +prof_dump_printf(write_cb_t *prof_dump_write, void *cbopaque, + const char *format, ...) { + va_list ap; + char buf[PROF_PRINTF_BUFSIZE]; + + va_start(ap, format); + malloc_vsnprintf(buf, sizeof(buf), format, ap); + va_end(ap); + prof_dump_write(cbopaque, buf); +} + +/* + * Casting a double to a uint64_t may not necessarily be in range; this can be + * UB. I don't think this is practically possible with the cur counters, but + * plausibly could be with the accum counters. + */ +#ifdef JEMALLOC_PROF +static uint64_t +prof_double_uint64_cast(double d) { + /* + * Note: UINT64_MAX + 1 is exactly representable as a double on all + * reasonable platforms (certainly those we'll support). Writing this + * as !(a < b) instead of (a >= b) means that we're NaN-safe. + */ + double rounded = round(d); + if (!(rounded < (double)UINT64_MAX)) { + return UINT64_MAX; + } + return (uint64_t)rounded; +} +#endif + +void prof_unbias_map_init() { + /* See the comment in prof_sample_new_event_wait */ +#ifdef JEMALLOC_PROF + for (szind_t i = 0; i < SC_NSIZES; i++) { + double sz = (double)sz_index2size(i); + double rate = (double)(ZU(1) << lg_prof_sample); + double div_val = 1.0 - exp(-sz / rate); + double unbiased_sz = sz / div_val; + /* + * The "true" right value for the unbiased count is + * 1.0/(1 - exp(-sz/rate)). The problem is, we keep the counts + * as integers (for a variety of reasons -- rounding errors + * could trigger asserts, and not all libcs can properly handle + * floating point arithmetic during malloc calls inside libc). + * Rounding to an integer, though, can lead to rounding errors + * of over 30% for sizes close to the sampling rate. So + * instead, we multiply by a constant, dividing the maximum + * possible roundoff error by that constant. To avoid overflow + * in summing up size_t values, the largest safe constant we can + * pick is the size of the smallest allocation. + */ + double cnt_shift = (double)(ZU(1) << SC_LG_TINY_MIN); + double shifted_unbiased_cnt = cnt_shift / div_val; + prof_unbiased_sz[i] = (size_t)round(unbiased_sz); + prof_shifted_unbiased_cnt[i] = (size_t)round( + shifted_unbiased_cnt); + } +#else + unreachable(); +#endif +} + +/* + * The unbiasing story is long. The jeprof unbiasing logic was copied from + * pprof. Both shared an issue: they unbiased using the average size of the + * allocations at a particular stack trace. This can work out OK if allocations + * are mostly of the same size given some stack, but not otherwise. We now + * internally track what the unbiased results ought to be. We can't just report + * them as they are though; they'll still go through the jeprof unbiasing + * process. Instead, we figure out what values we can feed *into* jeprof's + * unbiasing mechanism that will lead to getting the right values out. + * + * It'll unbias count and aggregate size as: + * + * c_out = c_in * 1/(1-exp(-s_in/c_in/R) + * s_out = s_in * 1/(1-exp(-s_in/c_in/R) + * + * We want to solve for the values of c_in and s_in that will + * give the c_out and s_out that we've computed internally. + * + * Let's do a change of variables (both to make the math easier and to make it + * easier to write): + * x = s_in / c_in + * y = s_in + * k = 1/R. + * + * Then + * c_out = y/x * 1/(1-exp(-k*x)) + * s_out = y * 1/(1-exp(-k*x)) + * + * The first equation gives: + * y = x * c_out * (1-exp(-k*x)) + * The second gives: + * y = s_out * (1-exp(-k*x)) + * So we have + * x = s_out / c_out. + * And all the other values fall out from that. + * + * This is all a fair bit of work. The thing we get out of it is that we don't + * break backwards compatibility with jeprof (and the various tools that have + * copied its unbiasing logic). Eventually, we anticipate a v3 heap profile + * dump format based on JSON, at which point I think much of this logic can get + * cleaned up (since we'll be taking a compatibility break there anyways). + */ +static void +prof_do_unbias(uint64_t c_out_shifted_i, uint64_t s_out_i, uint64_t *r_c_in, + uint64_t *r_s_in) { +#ifdef JEMALLOC_PROF + if (c_out_shifted_i == 0 || s_out_i == 0) { + *r_c_in = 0; + *r_s_in = 0; + return; + } + /* + * See the note in prof_unbias_map_init() to see why we take c_out in a + * shifted form. + */ + double c_out = (double)c_out_shifted_i + / (double)(ZU(1) << SC_LG_TINY_MIN); + double s_out = (double)s_out_i; + double R = (double)(ZU(1) << lg_prof_sample); + + double x = s_out / c_out; + double y = s_out * (1.0 - exp(-x / R)); + + double c_in = y / x; + double s_in = y; + + *r_c_in = prof_double_uint64_cast(c_in); + *r_s_in = prof_double_uint64_cast(s_in); +#else + unreachable(); +#endif +} + +static void +prof_dump_print_cnts(write_cb_t *prof_dump_write, void *cbopaque, + const prof_cnt_t *cnts) { + uint64_t curobjs; + uint64_t curbytes; + uint64_t accumobjs; + uint64_t accumbytes; + if (opt_prof_unbias) { + prof_do_unbias(cnts->curobjs_shifted_unbiased, + cnts->curbytes_unbiased, &curobjs, &curbytes); + prof_do_unbias(cnts->accumobjs_shifted_unbiased, + cnts->accumbytes_unbiased, &accumobjs, &accumbytes); + } else { + curobjs = cnts->curobjs; + curbytes = cnts->curbytes; + accumobjs = cnts->accumobjs; + accumbytes = cnts->accumbytes; + } + prof_dump_printf(prof_dump_write, cbopaque, + "%"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]", + curobjs, curbytes, accumobjs, accumbytes); +} + +static void +prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata) { + malloc_mutex_assert_owner(tsdn, tctx->tdata->lock); + + malloc_mutex_lock(tsdn, tctx->gctx->lock); + + switch (tctx->state) { + case prof_tctx_state_initializing: + malloc_mutex_unlock(tsdn, tctx->gctx->lock); + return; + case prof_tctx_state_nominal: + tctx->state = prof_tctx_state_dumping; + malloc_mutex_unlock(tsdn, tctx->gctx->lock); + + memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t)); + + tdata->cnt_summed.curobjs += tctx->dump_cnts.curobjs; + tdata->cnt_summed.curobjs_shifted_unbiased + += tctx->dump_cnts.curobjs_shifted_unbiased; + tdata->cnt_summed.curbytes += tctx->dump_cnts.curbytes; + tdata->cnt_summed.curbytes_unbiased + += tctx->dump_cnts.curbytes_unbiased; + if (opt_prof_accum) { + tdata->cnt_summed.accumobjs += + tctx->dump_cnts.accumobjs; + tdata->cnt_summed.accumobjs_shifted_unbiased += + tctx->dump_cnts.accumobjs_shifted_unbiased; + tdata->cnt_summed.accumbytes += + tctx->dump_cnts.accumbytes; + tdata->cnt_summed.accumbytes_unbiased += + tctx->dump_cnts.accumbytes_unbiased; + } + break; + case prof_tctx_state_dumping: + case prof_tctx_state_purgatory: + not_reached(); + } +} + +static void +prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx) { + malloc_mutex_assert_owner(tsdn, gctx->lock); + + gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs; + gctx->cnt_summed.curobjs_shifted_unbiased + += tctx->dump_cnts.curobjs_shifted_unbiased; + gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes; + gctx->cnt_summed.curbytes_unbiased += tctx->dump_cnts.curbytes_unbiased; + if (opt_prof_accum) { + gctx->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs; + gctx->cnt_summed.accumobjs_shifted_unbiased + += tctx->dump_cnts.accumobjs_shifted_unbiased; + gctx->cnt_summed.accumbytes += tctx->dump_cnts.accumbytes; + gctx->cnt_summed.accumbytes_unbiased + += tctx->dump_cnts.accumbytes_unbiased; + } +} + +static prof_tctx_t * +prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) { + tsdn_t *tsdn = (tsdn_t *)arg; + + malloc_mutex_assert_owner(tsdn, tctx->gctx->lock); + + switch (tctx->state) { + case prof_tctx_state_nominal: + /* New since dumping started; ignore. */ + break; + case prof_tctx_state_dumping: + case prof_tctx_state_purgatory: + prof_tctx_merge_gctx(tsdn, tctx, tctx->gctx); + break; + default: + not_reached(); + } + + return NULL; +} + +typedef struct prof_dump_iter_arg_s prof_dump_iter_arg_t; +struct prof_dump_iter_arg_s { + tsdn_t *tsdn; + write_cb_t *prof_dump_write; + void *cbopaque; +}; + +static prof_tctx_t * +prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque) { + prof_dump_iter_arg_t *arg = (prof_dump_iter_arg_t *)opaque; + malloc_mutex_assert_owner(arg->tsdn, tctx->gctx->lock); + + switch (tctx->state) { + case prof_tctx_state_initializing: + case prof_tctx_state_nominal: + /* Not captured by this dump. */ + break; + case prof_tctx_state_dumping: + case prof_tctx_state_purgatory: + prof_dump_printf(arg->prof_dump_write, arg->cbopaque, + " t%"FMTu64": ", tctx->thr_uid); + prof_dump_print_cnts(arg->prof_dump_write, arg->cbopaque, + &tctx->dump_cnts); + arg->prof_dump_write(arg->cbopaque, "\n"); + break; + default: + not_reached(); + } + return NULL; +} + +static prof_tctx_t * +prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) { + tsdn_t *tsdn = (tsdn_t *)arg; + prof_tctx_t *ret; + + malloc_mutex_assert_owner(tsdn, tctx->gctx->lock); + + switch (tctx->state) { + case prof_tctx_state_nominal: + /* New since dumping started; ignore. */ + break; + case prof_tctx_state_dumping: + tctx->state = prof_tctx_state_nominal; + break; + case prof_tctx_state_purgatory: + ret = tctx; + goto label_return; + default: + not_reached(); + } + + ret = NULL; +label_return: + return ret; +} + +static void +prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs) { + cassert(config_prof); + + malloc_mutex_lock(tsdn, gctx->lock); + + /* + * Increment nlimbo so that gctx won't go away before dump. + * Additionally, link gctx into the dump list so that it is included in + * prof_dump()'s second pass. + */ + gctx->nlimbo++; + gctx_tree_insert(gctxs, gctx); + + memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t)); + + malloc_mutex_unlock(tsdn, gctx->lock); +} + +typedef struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg_t; +struct prof_gctx_merge_iter_arg_s { + tsdn_t *tsdn; + size_t *leak_ngctx; +}; + +static prof_gctx_t * +prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) { + prof_gctx_merge_iter_arg_t *arg = (prof_gctx_merge_iter_arg_t *)opaque; + + malloc_mutex_lock(arg->tsdn, gctx->lock); + tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter, + (void *)arg->tsdn); + if (gctx->cnt_summed.curobjs != 0) { + (*arg->leak_ngctx)++; + } + malloc_mutex_unlock(arg->tsdn, gctx->lock); + + return NULL; +} + +static void +prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) { + prof_tdata_t *tdata = prof_tdata_get(tsd, false); + prof_gctx_t *gctx; + + /* + * Standard tree iteration won't work here, because as soon as we + * decrement gctx->nlimbo and unlock gctx, another thread can + * concurrently destroy it, which will corrupt the tree. Therefore, + * tear down the tree one node at a time during iteration. + */ + while ((gctx = gctx_tree_first(gctxs)) != NULL) { + gctx_tree_remove(gctxs, gctx); + malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); + { + prof_tctx_t *next; + + next = NULL; + do { + prof_tctx_t *to_destroy = + tctx_tree_iter(&gctx->tctxs, next, + prof_tctx_finish_iter, + (void *)tsd_tsdn(tsd)); + if (to_destroy != NULL) { + next = tctx_tree_next(&gctx->tctxs, + to_destroy); + tctx_tree_remove(&gctx->tctxs, + to_destroy); + idalloctm(tsd_tsdn(tsd), to_destroy, + NULL, NULL, true, true); + } else { + next = NULL; + } + } while (next != NULL); + } + gctx->nlimbo--; + if (prof_gctx_should_destroy(gctx)) { + gctx->nlimbo++; + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); + prof_gctx_try_destroy(tsd, tdata, gctx); + } else { + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); + } + } +} + +typedef struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg_t; +struct prof_tdata_merge_iter_arg_s { + tsdn_t *tsdn; + prof_cnt_t *cnt_all; +}; + +static prof_tdata_t * +prof_tdata_merge_iter(prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata, + void *opaque) { + prof_tdata_merge_iter_arg_t *arg = + (prof_tdata_merge_iter_arg_t *)opaque; + + malloc_mutex_lock(arg->tsdn, tdata->lock); + if (!tdata->expired) { + size_t tabind; + union { + prof_tctx_t *p; + void *v; + } tctx; + + tdata->dumping = true; + memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t)); + for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL, + &tctx.v);) { + prof_tctx_merge_tdata(arg->tsdn, tctx.p, tdata); + } + + arg->cnt_all->curobjs += tdata->cnt_summed.curobjs; + arg->cnt_all->curobjs_shifted_unbiased + += tdata->cnt_summed.curobjs_shifted_unbiased; + arg->cnt_all->curbytes += tdata->cnt_summed.curbytes; + arg->cnt_all->curbytes_unbiased + += tdata->cnt_summed.curbytes_unbiased; + if (opt_prof_accum) { + arg->cnt_all->accumobjs += tdata->cnt_summed.accumobjs; + arg->cnt_all->accumobjs_shifted_unbiased + += tdata->cnt_summed.accumobjs_shifted_unbiased; + arg->cnt_all->accumbytes += + tdata->cnt_summed.accumbytes; + arg->cnt_all->accumbytes_unbiased += + tdata->cnt_summed.accumbytes_unbiased; + } + } else { + tdata->dumping = false; + } + malloc_mutex_unlock(arg->tsdn, tdata->lock); + + return NULL; +} + +static prof_tdata_t * +prof_tdata_dump_iter(prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata, + void *opaque) { + if (!tdata->dumping) { + return NULL; + } + + prof_dump_iter_arg_t *arg = (prof_dump_iter_arg_t *)opaque; + prof_dump_printf(arg->prof_dump_write, arg->cbopaque, " t%"FMTu64": ", + tdata->thr_uid); + prof_dump_print_cnts(arg->prof_dump_write, arg->cbopaque, + &tdata->cnt_summed); + if (tdata->thread_name != NULL) { + arg->prof_dump_write(arg->cbopaque, " "); + arg->prof_dump_write(arg->cbopaque, tdata->thread_name); + } + arg->prof_dump_write(arg->cbopaque, "\n"); + return NULL; +} + +static void +prof_dump_header(prof_dump_iter_arg_t *arg, const prof_cnt_t *cnt_all) { + prof_dump_printf(arg->prof_dump_write, arg->cbopaque, + "heap_v2/%"FMTu64"\n t*: ", ((uint64_t)1U << lg_prof_sample)); + prof_dump_print_cnts(arg->prof_dump_write, arg->cbopaque, cnt_all); + arg->prof_dump_write(arg->cbopaque, "\n"); + + malloc_mutex_lock(arg->tsdn, &tdatas_mtx); + tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter, arg); + malloc_mutex_unlock(arg->tsdn, &tdatas_mtx); +} + +static void +prof_dump_gctx(prof_dump_iter_arg_t *arg, prof_gctx_t *gctx, + const prof_bt_t *bt, prof_gctx_tree_t *gctxs) { + cassert(config_prof); + malloc_mutex_assert_owner(arg->tsdn, gctx->lock); + + /* Avoid dumping such gctx's that have no useful data. */ + if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) || + (opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) { + assert(gctx->cnt_summed.curobjs == 0); + assert(gctx->cnt_summed.curbytes == 0); + /* + * These asserts would not be correct -- see the comment on races + * in prof.c + * assert(gctx->cnt_summed.curobjs_unbiased == 0); + * assert(gctx->cnt_summed.curbytes_unbiased == 0); + */ + assert(gctx->cnt_summed.accumobjs == 0); + assert(gctx->cnt_summed.accumobjs_shifted_unbiased == 0); + assert(gctx->cnt_summed.accumbytes == 0); + assert(gctx->cnt_summed.accumbytes_unbiased == 0); + return; + } + + arg->prof_dump_write(arg->cbopaque, "@"); + for (unsigned i = 0; i < bt->len; i++) { + prof_dump_printf(arg->prof_dump_write, arg->cbopaque, + " %#"FMTxPTR, (uintptr_t)bt->vec[i]); + } + + arg->prof_dump_write(arg->cbopaque, "\n t*: "); + prof_dump_print_cnts(arg->prof_dump_write, arg->cbopaque, + &gctx->cnt_summed); + arg->prof_dump_write(arg->cbopaque, "\n"); + + tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter, arg); +} + +/* + * See prof_sample_new_event_wait() comment for why the body of this function + * is conditionally compiled. + */ +static void +prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx) { +#ifdef JEMALLOC_PROF + /* + * Scaling is equivalent AdjustSamples() in jeprof, but the result may + * differ slightly from what jeprof reports, because here we scale the + * summary values, whereas jeprof scales each context individually and + * reports the sums of the scaled values. + */ + if (cnt_all->curbytes != 0) { + double sample_period = (double)((uint64_t)1 << lg_prof_sample); + double ratio = (((double)cnt_all->curbytes) / + (double)cnt_all->curobjs) / sample_period; + double scale_factor = 1.0 / (1.0 - exp(-ratio)); + uint64_t curbytes = (uint64_t)round(((double)cnt_all->curbytes) + * scale_factor); + uint64_t curobjs = (uint64_t)round(((double)cnt_all->curobjs) * + scale_factor); + + malloc_printf(": Leak approximation summary: ~%"FMTu64 + " byte%s, ~%"FMTu64" object%s, >= %zu context%s\n", + curbytes, (curbytes != 1) ? "s" : "", curobjs, (curobjs != + 1) ? "s" : "", leak_ngctx, (leak_ngctx != 1) ? "s" : ""); + malloc_printf( + ": Run jeprof on dump output for leak detail\n"); + if (opt_prof_leak_error) { + malloc_printf( + ": Exiting with error code because memory" + " leaks were detected\n"); + /* + * Use _exit() with underscore to avoid calling atexit() + * and entering endless cycle. + */ + _exit(1); + } + } +#endif +} + +static prof_gctx_t * +prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) { + prof_dump_iter_arg_t *arg = (prof_dump_iter_arg_t *)opaque; + malloc_mutex_lock(arg->tsdn, gctx->lock); + prof_dump_gctx(arg, gctx, &gctx->bt, gctxs); + malloc_mutex_unlock(arg->tsdn, gctx->lock); + return NULL; +} + +static void +prof_dump_prep(tsd_t *tsd, prof_tdata_t *tdata, prof_cnt_t *cnt_all, + size_t *leak_ngctx, prof_gctx_tree_t *gctxs) { + size_t tabind; + union { + prof_gctx_t *p; + void *v; + } gctx; + + prof_enter(tsd, tdata); + + /* + * Put gctx's in limbo and clear their counters in preparation for + * summing. + */ + gctx_tree_new(gctxs); + for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);) { + prof_dump_gctx_prep(tsd_tsdn(tsd), gctx.p, gctxs); + } + + /* + * Iterate over tdatas, and for the non-expired ones snapshot their tctx + * stats and merge them into the associated gctx's. + */ + memset(cnt_all, 0, sizeof(prof_cnt_t)); + prof_tdata_merge_iter_arg_t prof_tdata_merge_iter_arg = {tsd_tsdn(tsd), + cnt_all}; + malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); + tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter, + &prof_tdata_merge_iter_arg); + malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); + + /* Merge tctx stats into gctx's. */ + *leak_ngctx = 0; + prof_gctx_merge_iter_arg_t prof_gctx_merge_iter_arg = {tsd_tsdn(tsd), + leak_ngctx}; + gctx_tree_iter(gctxs, NULL, prof_gctx_merge_iter, + &prof_gctx_merge_iter_arg); + + prof_leave(tsd, tdata); +} + +void +prof_dump_impl(tsd_t *tsd, write_cb_t *prof_dump_write, void *cbopaque, + prof_tdata_t *tdata, bool leakcheck) { + malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_dump_mtx); + prof_cnt_t cnt_all; + size_t leak_ngctx; + prof_gctx_tree_t gctxs; + prof_dump_prep(tsd, tdata, &cnt_all, &leak_ngctx, &gctxs); + prof_dump_iter_arg_t prof_dump_iter_arg = {tsd_tsdn(tsd), + prof_dump_write, cbopaque}; + prof_dump_header(&prof_dump_iter_arg, &cnt_all); + gctx_tree_iter(&gctxs, NULL, prof_gctx_dump_iter, &prof_dump_iter_arg); + prof_gctx_finish(tsd, &gctxs); + if (leakcheck) { + prof_leakcheck(&cnt_all, leak_ngctx); + } +} + +/* Used in unit tests. */ +void +prof_cnt_all(prof_cnt_t *cnt_all) { + tsd_t *tsd = tsd_fetch(); + prof_tdata_t *tdata = prof_tdata_get(tsd, false); + if (tdata == NULL) { + memset(cnt_all, 0, sizeof(prof_cnt_t)); + } else { + size_t leak_ngctx; + prof_gctx_tree_t gctxs; + prof_dump_prep(tsd, tdata, cnt_all, &leak_ngctx, &gctxs); + prof_gctx_finish(tsd, &gctxs); + } +} + +void +prof_bt_hash(const void *key, size_t r_hash[2]) { + prof_bt_t *bt = (prof_bt_t *)key; + + cassert(config_prof); + + hash(bt->vec, bt->len * sizeof(void *), 0x94122f33U, r_hash); +} + +bool +prof_bt_keycomp(const void *k1, const void *k2) { + const prof_bt_t *bt1 = (prof_bt_t *)k1; + const prof_bt_t *bt2 = (prof_bt_t *)k2; + + cassert(config_prof); + + if (bt1->len != bt2->len) { + return false; + } + return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0); +} + +prof_tdata_t * +prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim, + char *thread_name, bool active) { + assert(tsd_reentrancy_level_get(tsd) == 0); + + prof_tdata_t *tdata; + + cassert(config_prof); + + /* Initialize an empty cache for this thread. */ + tdata = (prof_tdata_t *)iallocztm(tsd_tsdn(tsd), sizeof(prof_tdata_t), + sz_size2index(sizeof(prof_tdata_t)), false, NULL, true, + arena_get(TSDN_NULL, 0, true), true); + if (tdata == NULL) { + return NULL; + } + + tdata->lock = prof_tdata_mutex_choose(thr_uid); + tdata->thr_uid = thr_uid; + tdata->thr_discrim = thr_discrim; + tdata->thread_name = thread_name; + tdata->attached = true; + tdata->expired = false; + tdata->tctx_uid_next = 0; + + if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS, prof_bt_hash, + prof_bt_keycomp)) { + idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true); + return NULL; + } + + tdata->enq = false; + tdata->enq_idump = false; + tdata->enq_gdump = false; + + tdata->dumping = false; + tdata->active = active; + + malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); + tdata_tree_insert(&tdatas, tdata); + malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); + + return tdata; +} + +static bool +prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached) { + if (tdata->attached && !even_if_attached) { + return false; + } + if (ckh_count(&tdata->bt2tctx) != 0) { + return false; + } + return true; +} + +static bool +prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata, + bool even_if_attached) { + malloc_mutex_assert_owner(tsdn, tdata->lock); + + return prof_tdata_should_destroy_unlocked(tdata, even_if_attached); +} + +static void +prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata, + bool even_if_attached) { + malloc_mutex_assert_owner(tsd_tsdn(tsd), &tdatas_mtx); + malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tdata->lock); + + tdata_tree_remove(&tdatas, tdata); + + assert(prof_tdata_should_destroy_unlocked(tdata, even_if_attached)); + + if (tdata->thread_name != NULL) { + idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true, + true); + } + ckh_delete(tsd, &tdata->bt2tctx); + idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true); +} + +static void +prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached) { + malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); + prof_tdata_destroy_locked(tsd, tdata, even_if_attached); + malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); +} + +void +prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata) { + bool destroy_tdata; + + malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); + if (tdata->attached) { + destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, + true); + /* + * Only detach if !destroy_tdata, because detaching would allow + * another thread to win the race to destroy tdata. + */ + if (!destroy_tdata) { + tdata->attached = false; + } + tsd_prof_tdata_set(tsd, NULL); + } else { + destroy_tdata = false; + } + malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); + if (destroy_tdata) { + prof_tdata_destroy(tsd, tdata, true); + } +} + +static bool +prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata) { + bool destroy_tdata; + + malloc_mutex_lock(tsdn, tdata->lock); + if (!tdata->expired) { + tdata->expired = true; + destroy_tdata = prof_tdata_should_destroy(tsdn, tdata, false); + } else { + destroy_tdata = false; + } + malloc_mutex_unlock(tsdn, tdata->lock); + + return destroy_tdata; +} + +static prof_tdata_t * +prof_tdata_reset_iter(prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata, + void *arg) { + tsdn_t *tsdn = (tsdn_t *)arg; + + return (prof_tdata_expire(tsdn, tdata) ? tdata : NULL); +} + +void +prof_reset(tsd_t *tsd, size_t lg_sample) { + prof_tdata_t *next; + + assert(lg_sample < (sizeof(uint64_t) << 3)); + + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); + + lg_prof_sample = lg_sample; + prof_unbias_map_init(); + + next = NULL; + do { + prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next, + prof_tdata_reset_iter, (void *)tsd); + if (to_destroy != NULL) { + next = tdata_tree_next(&tdatas, to_destroy); + prof_tdata_destroy_locked(tsd, to_destroy, false); + } else { + next = NULL; + } + } while (next != NULL); + + malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx); +} + +static bool +prof_tctx_should_destroy(tsd_t *tsd, prof_tctx_t *tctx) { + malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock); + + if (opt_prof_accum) { + return false; + } + if (tctx->cnts.curobjs != 0) { + return false; + } + if (tctx->prepared) { + return false; + } + if (tctx->recent_count != 0) { + return false; + } + return true; +} + +static void +prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) { + malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock); + + assert(tctx->cnts.curobjs == 0); + assert(tctx->cnts.curbytes == 0); + /* + * These asserts are not correct -- see the comment about races in + * prof.c + * + * assert(tctx->cnts.curobjs_shifted_unbiased == 0); + * assert(tctx->cnts.curbytes_unbiased == 0); + */ + assert(!opt_prof_accum); + assert(tctx->cnts.accumobjs == 0); + assert(tctx->cnts.accumbytes == 0); + /* + * These ones are, since accumbyte counts never go down. Either + * prof_accum is off (in which case these should never have changed from + * their initial value of zero), or it's on (in which case we shouldn't + * be destroying this tctx). + */ + assert(tctx->cnts.accumobjs_shifted_unbiased == 0); + assert(tctx->cnts.accumbytes_unbiased == 0); + + prof_gctx_t *gctx = tctx->gctx; + + { + prof_tdata_t *tdata = tctx->tdata; + tctx->tdata = NULL; + ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL); + bool destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), + tdata, false); + malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); + if (destroy_tdata) { + prof_tdata_destroy(tsd, tdata, false); + } + } + + bool destroy_tctx, destroy_gctx; + + malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); + switch (tctx->state) { + case prof_tctx_state_nominal: + tctx_tree_remove(&gctx->tctxs, tctx); + destroy_tctx = true; + if (prof_gctx_should_destroy(gctx)) { + /* + * Increment gctx->nlimbo in order to keep another + * thread from winning the race to destroy gctx while + * this one has gctx->lock dropped. Without this, it + * would be possible for another thread to: + * + * 1) Sample an allocation associated with gctx. + * 2) Deallocate the sampled object. + * 3) Successfully prof_gctx_try_destroy(gctx). + * + * The result would be that gctx no longer exists by the + * time this thread accesses it in + * prof_gctx_try_destroy(). + */ + gctx->nlimbo++; + destroy_gctx = true; + } else { + destroy_gctx = false; + } + break; + case prof_tctx_state_dumping: + /* + * A dumping thread needs tctx to remain valid until dumping + * has finished. Change state such that the dumping thread will + * complete destruction during a late dump iteration phase. + */ + tctx->state = prof_tctx_state_purgatory; + destroy_tctx = false; + destroy_gctx = false; + break; + default: + not_reached(); + destroy_tctx = false; + destroy_gctx = false; + } + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); + if (destroy_gctx) { + prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx); + } + if (destroy_tctx) { + idalloctm(tsd_tsdn(tsd), tctx, NULL, NULL, true, true); + } +} + +void +prof_tctx_try_destroy(tsd_t *tsd, prof_tctx_t *tctx) { + malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock); + if (prof_tctx_should_destroy(tsd, tctx)) { + /* tctx->tdata->lock will be released in prof_tctx_destroy(). */ + prof_tctx_destroy(tsd, tctx); + } else { + malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock); + } +} + +/******************************************************************************/ diff --git a/contrib/jemalloc/src/prof_log.c b/contrib/jemalloc/src/prof_log.c new file mode 100644 index 00000000000000..0632c3b37e55e8 --- /dev/null +++ b/contrib/jemalloc/src/prof_log.c @@ -0,0 +1,717 @@ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/buf_writer.h" +#include "jemalloc/internal/ckh.h" +#include "jemalloc/internal/emitter.h" +#include "jemalloc/internal/hash.h" +#include "jemalloc/internal/malloc_io.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/prof_data.h" +#include "jemalloc/internal/prof_log.h" +#include "jemalloc/internal/prof_sys.h" + +bool opt_prof_log = false; +typedef enum prof_logging_state_e prof_logging_state_t; +enum prof_logging_state_e { + prof_logging_state_stopped, + prof_logging_state_started, + prof_logging_state_dumping +}; + +/* + * - stopped: log_start never called, or previous log_stop has completed. + * - started: log_start called, log_stop not called yet. Allocations are logged. + * - dumping: log_stop called but not finished; samples are not logged anymore. + */ +prof_logging_state_t prof_logging_state = prof_logging_state_stopped; + +/* Used in unit tests. */ +static bool prof_log_dummy = false; + +/* Incremented for every log file that is output. */ +static uint64_t log_seq = 0; +static char log_filename[ + /* Minimize memory bloat for non-prof builds. */ +#ifdef JEMALLOC_PROF + PATH_MAX + +#endif + 1]; + +/* Timestamp for most recent call to log_start(). */ +static nstime_t log_start_timestamp; + +/* Increment these when adding to the log_bt and log_thr linked lists. */ +static size_t log_bt_index = 0; +static size_t log_thr_index = 0; + +/* Linked list node definitions. These are only used in this file. */ +typedef struct prof_bt_node_s prof_bt_node_t; + +struct prof_bt_node_s { + prof_bt_node_t *next; + size_t index; + prof_bt_t bt; + /* Variable size backtrace vector pointed to by bt. */ + void *vec[1]; +}; + +typedef struct prof_thr_node_s prof_thr_node_t; + +struct prof_thr_node_s { + prof_thr_node_t *next; + size_t index; + uint64_t thr_uid; + /* Variable size based on thr_name_sz. */ + char name[1]; +}; + +typedef struct prof_alloc_node_s prof_alloc_node_t; + +/* This is output when logging sampled allocations. */ +struct prof_alloc_node_s { + prof_alloc_node_t *next; + /* Indices into an array of thread data. */ + size_t alloc_thr_ind; + size_t free_thr_ind; + + /* Indices into an array of backtraces. */ + size_t alloc_bt_ind; + size_t free_bt_ind; + + uint64_t alloc_time_ns; + uint64_t free_time_ns; + + size_t usize; +}; + +/* + * Created on the first call to prof_try_log and deleted on prof_log_stop. + * These are the backtraces and threads that have already been logged by an + * allocation. + */ +static bool log_tables_initialized = false; +static ckh_t log_bt_node_set; +static ckh_t log_thr_node_set; + +/* Store linked lists for logged data. */ +static prof_bt_node_t *log_bt_first = NULL; +static prof_bt_node_t *log_bt_last = NULL; +static prof_thr_node_t *log_thr_first = NULL; +static prof_thr_node_t *log_thr_last = NULL; +static prof_alloc_node_t *log_alloc_first = NULL; +static prof_alloc_node_t *log_alloc_last = NULL; + +/* Protects the prof_logging_state and any log_{...} variable. */ +malloc_mutex_t log_mtx; + +/******************************************************************************/ +/* + * Function prototypes for static functions that are referenced prior to + * definition. + */ + +/* Hashtable functions for log_bt_node_set and log_thr_node_set. */ +static void prof_thr_node_hash(const void *key, size_t r_hash[2]); +static bool prof_thr_node_keycomp(const void *k1, const void *k2); +static void prof_bt_node_hash(const void *key, size_t r_hash[2]); +static bool prof_bt_node_keycomp(const void *k1, const void *k2); + +/******************************************************************************/ + +static size_t +prof_log_bt_index(tsd_t *tsd, prof_bt_t *bt) { + assert(prof_logging_state == prof_logging_state_started); + malloc_mutex_assert_owner(tsd_tsdn(tsd), &log_mtx); + + prof_bt_node_t dummy_node; + dummy_node.bt = *bt; + prof_bt_node_t *node; + + /* See if this backtrace is already cached in the table. */ + if (ckh_search(&log_bt_node_set, (void *)(&dummy_node), + (void **)(&node), NULL)) { + size_t sz = offsetof(prof_bt_node_t, vec) + + (bt->len * sizeof(void *)); + prof_bt_node_t *new_node = (prof_bt_node_t *) + iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL, + true, arena_get(TSDN_NULL, 0, true), true); + if (log_bt_first == NULL) { + log_bt_first = new_node; + log_bt_last = new_node; + } else { + log_bt_last->next = new_node; + log_bt_last = new_node; + } + + new_node->next = NULL; + new_node->index = log_bt_index; + /* + * Copy the backtrace: bt is inside a tdata or gctx, which + * might die before prof_log_stop is called. + */ + new_node->bt.len = bt->len; + memcpy(new_node->vec, bt->vec, bt->len * sizeof(void *)); + new_node->bt.vec = new_node->vec; + + log_bt_index++; + ckh_insert(tsd, &log_bt_node_set, (void *)new_node, NULL); + return new_node->index; + } else { + return node->index; + } +} + +static size_t +prof_log_thr_index(tsd_t *tsd, uint64_t thr_uid, const char *name) { + assert(prof_logging_state == prof_logging_state_started); + malloc_mutex_assert_owner(tsd_tsdn(tsd), &log_mtx); + + prof_thr_node_t dummy_node; + dummy_node.thr_uid = thr_uid; + prof_thr_node_t *node; + + /* See if this thread is already cached in the table. */ + if (ckh_search(&log_thr_node_set, (void *)(&dummy_node), + (void **)(&node), NULL)) { + size_t sz = offsetof(prof_thr_node_t, name) + strlen(name) + 1; + prof_thr_node_t *new_node = (prof_thr_node_t *) + iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL, + true, arena_get(TSDN_NULL, 0, true), true); + if (log_thr_first == NULL) { + log_thr_first = new_node; + log_thr_last = new_node; + } else { + log_thr_last->next = new_node; + log_thr_last = new_node; + } + + new_node->next = NULL; + new_node->index = log_thr_index; + new_node->thr_uid = thr_uid; + strcpy(new_node->name, name); + + log_thr_index++; + ckh_insert(tsd, &log_thr_node_set, (void *)new_node, NULL); + return new_node->index; + } else { + return node->index; + } +} + +JEMALLOC_COLD +void +prof_try_log(tsd_t *tsd, size_t usize, prof_info_t *prof_info) { + cassert(config_prof); + prof_tctx_t *tctx = prof_info->alloc_tctx; + malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock); + + prof_tdata_t *cons_tdata = prof_tdata_get(tsd, false); + if (cons_tdata == NULL) { + /* + * We decide not to log these allocations. cons_tdata will be + * NULL only when the current thread is in a weird state (e.g. + * it's being destroyed). + */ + return; + } + + malloc_mutex_lock(tsd_tsdn(tsd), &log_mtx); + + if (prof_logging_state != prof_logging_state_started) { + goto label_done; + } + + if (!log_tables_initialized) { + bool err1 = ckh_new(tsd, &log_bt_node_set, PROF_CKH_MINITEMS, + prof_bt_node_hash, prof_bt_node_keycomp); + bool err2 = ckh_new(tsd, &log_thr_node_set, PROF_CKH_MINITEMS, + prof_thr_node_hash, prof_thr_node_keycomp); + if (err1 || err2) { + goto label_done; + } + log_tables_initialized = true; + } + + nstime_t alloc_time = prof_info->alloc_time; + nstime_t free_time; + nstime_prof_init_update(&free_time); + + size_t sz = sizeof(prof_alloc_node_t); + prof_alloc_node_t *new_node = (prof_alloc_node_t *) + iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL, true, + arena_get(TSDN_NULL, 0, true), true); + + const char *prod_thr_name = (tctx->tdata->thread_name == NULL)? + "" : tctx->tdata->thread_name; + const char *cons_thr_name = prof_thread_name_get(tsd); + + prof_bt_t bt; + /* Initialize the backtrace, using the buffer in tdata to store it. */ + bt_init(&bt, cons_tdata->vec); + prof_backtrace(tsd, &bt); + prof_bt_t *cons_bt = &bt; + + /* We haven't destroyed tctx yet, so gctx should be good to read. */ + prof_bt_t *prod_bt = &tctx->gctx->bt; + + new_node->next = NULL; + new_node->alloc_thr_ind = prof_log_thr_index(tsd, tctx->tdata->thr_uid, + prod_thr_name); + new_node->free_thr_ind = prof_log_thr_index(tsd, cons_tdata->thr_uid, + cons_thr_name); + new_node->alloc_bt_ind = prof_log_bt_index(tsd, prod_bt); + new_node->free_bt_ind = prof_log_bt_index(tsd, cons_bt); + new_node->alloc_time_ns = nstime_ns(&alloc_time); + new_node->free_time_ns = nstime_ns(&free_time); + new_node->usize = usize; + + if (log_alloc_first == NULL) { + log_alloc_first = new_node; + log_alloc_last = new_node; + } else { + log_alloc_last->next = new_node; + log_alloc_last = new_node; + } + +label_done: + malloc_mutex_unlock(tsd_tsdn(tsd), &log_mtx); +} + +static void +prof_bt_node_hash(const void *key, size_t r_hash[2]) { + const prof_bt_node_t *bt_node = (prof_bt_node_t *)key; + prof_bt_hash((void *)(&bt_node->bt), r_hash); +} + +static bool +prof_bt_node_keycomp(const void *k1, const void *k2) { + const prof_bt_node_t *bt_node1 = (prof_bt_node_t *)k1; + const prof_bt_node_t *bt_node2 = (prof_bt_node_t *)k2; + return prof_bt_keycomp((void *)(&bt_node1->bt), + (void *)(&bt_node2->bt)); +} + +static void +prof_thr_node_hash(const void *key, size_t r_hash[2]) { + const prof_thr_node_t *thr_node = (prof_thr_node_t *)key; + hash(&thr_node->thr_uid, sizeof(uint64_t), 0x94122f35U, r_hash); +} + +static bool +prof_thr_node_keycomp(const void *k1, const void *k2) { + const prof_thr_node_t *thr_node1 = (prof_thr_node_t *)k1; + const prof_thr_node_t *thr_node2 = (prof_thr_node_t *)k2; + return thr_node1->thr_uid == thr_node2->thr_uid; +} + +/* Used in unit tests. */ +size_t +prof_log_bt_count(void) { + cassert(config_prof); + size_t cnt = 0; + prof_bt_node_t *node = log_bt_first; + while (node != NULL) { + cnt++; + node = node->next; + } + return cnt; +} + +/* Used in unit tests. */ +size_t +prof_log_alloc_count(void) { + cassert(config_prof); + size_t cnt = 0; + prof_alloc_node_t *node = log_alloc_first; + while (node != NULL) { + cnt++; + node = node->next; + } + return cnt; +} + +/* Used in unit tests. */ +size_t +prof_log_thr_count(void) { + cassert(config_prof); + size_t cnt = 0; + prof_thr_node_t *node = log_thr_first; + while (node != NULL) { + cnt++; + node = node->next; + } + return cnt; +} + +/* Used in unit tests. */ +bool +prof_log_is_logging(void) { + cassert(config_prof); + return prof_logging_state == prof_logging_state_started; +} + +/* Used in unit tests. */ +bool +prof_log_rep_check(void) { + cassert(config_prof); + if (prof_logging_state == prof_logging_state_stopped + && log_tables_initialized) { + return true; + } + + if (log_bt_last != NULL && log_bt_last->next != NULL) { + return true; + } + if (log_thr_last != NULL && log_thr_last->next != NULL) { + return true; + } + if (log_alloc_last != NULL && log_alloc_last->next != NULL) { + return true; + } + + size_t bt_count = prof_log_bt_count(); + size_t thr_count = prof_log_thr_count(); + size_t alloc_count = prof_log_alloc_count(); + + + if (prof_logging_state == prof_logging_state_stopped) { + if (bt_count != 0 || thr_count != 0 || alloc_count || 0) { + return true; + } + } + + prof_alloc_node_t *node = log_alloc_first; + while (node != NULL) { + if (node->alloc_bt_ind >= bt_count) { + return true; + } + if (node->free_bt_ind >= bt_count) { + return true; + } + if (node->alloc_thr_ind >= thr_count) { + return true; + } + if (node->free_thr_ind >= thr_count) { + return true; + } + if (node->alloc_time_ns > node->free_time_ns) { + return true; + } + node = node->next; + } + + return false; +} + +/* Used in unit tests. */ +void +prof_log_dummy_set(bool new_value) { + cassert(config_prof); + prof_log_dummy = new_value; +} + +/* Used as an atexit function to stop logging on exit. */ +static void +prof_log_stop_final(void) { + tsd_t *tsd = tsd_fetch(); + prof_log_stop(tsd_tsdn(tsd)); +} + +JEMALLOC_COLD +bool +prof_log_start(tsdn_t *tsdn, const char *filename) { + cassert(config_prof); + + if (!opt_prof) { + return true; + } + + bool ret = false; + + malloc_mutex_lock(tsdn, &log_mtx); + + static bool prof_log_atexit_called = false; + if (!prof_log_atexit_called) { + prof_log_atexit_called = true; + if (atexit(prof_log_stop_final) != 0) { + malloc_write(": Error in atexit() " + "for logging\n"); + if (opt_abort) { + abort(); + } + ret = true; + goto label_done; + } + } + + if (prof_logging_state != prof_logging_state_stopped) { + ret = true; + } else if (filename == NULL) { + /* Make default name. */ + prof_get_default_filename(tsdn, log_filename, log_seq); + log_seq++; + prof_logging_state = prof_logging_state_started; + } else if (strlen(filename) >= PROF_DUMP_FILENAME_LEN) { + ret = true; + } else { + strcpy(log_filename, filename); + prof_logging_state = prof_logging_state_started; + } + + if (!ret) { + nstime_prof_init_update(&log_start_timestamp); + } +label_done: + malloc_mutex_unlock(tsdn, &log_mtx); + + return ret; +} + +struct prof_emitter_cb_arg_s { + int fd; + ssize_t ret; +}; + +static void +prof_emitter_write_cb(void *opaque, const char *to_write) { + struct prof_emitter_cb_arg_s *arg = + (struct prof_emitter_cb_arg_s *)opaque; + size_t bytes = strlen(to_write); + if (prof_log_dummy) { + return; + } + arg->ret = malloc_write_fd(arg->fd, to_write, bytes); +} + +/* + * prof_log_emit_{...} goes through the appropriate linked list, emitting each + * node to the json and deallocating it. + */ +static void +prof_log_emit_threads(tsd_t *tsd, emitter_t *emitter) { + emitter_json_array_kv_begin(emitter, "threads"); + prof_thr_node_t *thr_node = log_thr_first; + prof_thr_node_t *thr_old_node; + while (thr_node != NULL) { + emitter_json_object_begin(emitter); + + emitter_json_kv(emitter, "thr_uid", emitter_type_uint64, + &thr_node->thr_uid); + + char *thr_name = thr_node->name; + + emitter_json_kv(emitter, "thr_name", emitter_type_string, + &thr_name); + + emitter_json_object_end(emitter); + thr_old_node = thr_node; + thr_node = thr_node->next; + idalloctm(tsd_tsdn(tsd), thr_old_node, NULL, NULL, true, true); + } + emitter_json_array_end(emitter); +} + +static void +prof_log_emit_traces(tsd_t *tsd, emitter_t *emitter) { + emitter_json_array_kv_begin(emitter, "stack_traces"); + prof_bt_node_t *bt_node = log_bt_first; + prof_bt_node_t *bt_old_node; + /* + * Calculate how many hex digits we need: twice number of bytes, two for + * "0x", and then one more for terminating '\0'. + */ + char buf[2 * sizeof(intptr_t) + 3]; + size_t buf_sz = sizeof(buf); + while (bt_node != NULL) { + emitter_json_array_begin(emitter); + size_t i; + for (i = 0; i < bt_node->bt.len; i++) { + malloc_snprintf(buf, buf_sz, "%p", bt_node->bt.vec[i]); + char *trace_str = buf; + emitter_json_value(emitter, emitter_type_string, + &trace_str); + } + emitter_json_array_end(emitter); + + bt_old_node = bt_node; + bt_node = bt_node->next; + idalloctm(tsd_tsdn(tsd), bt_old_node, NULL, NULL, true, true); + } + emitter_json_array_end(emitter); +} + +static void +prof_log_emit_allocs(tsd_t *tsd, emitter_t *emitter) { + emitter_json_array_kv_begin(emitter, "allocations"); + prof_alloc_node_t *alloc_node = log_alloc_first; + prof_alloc_node_t *alloc_old_node; + while (alloc_node != NULL) { + emitter_json_object_begin(emitter); + + emitter_json_kv(emitter, "alloc_thread", emitter_type_size, + &alloc_node->alloc_thr_ind); + + emitter_json_kv(emitter, "free_thread", emitter_type_size, + &alloc_node->free_thr_ind); + + emitter_json_kv(emitter, "alloc_trace", emitter_type_size, + &alloc_node->alloc_bt_ind); + + emitter_json_kv(emitter, "free_trace", emitter_type_size, + &alloc_node->free_bt_ind); + + emitter_json_kv(emitter, "alloc_timestamp", + emitter_type_uint64, &alloc_node->alloc_time_ns); + + emitter_json_kv(emitter, "free_timestamp", emitter_type_uint64, + &alloc_node->free_time_ns); + + emitter_json_kv(emitter, "usize", emitter_type_uint64, + &alloc_node->usize); + + emitter_json_object_end(emitter); + + alloc_old_node = alloc_node; + alloc_node = alloc_node->next; + idalloctm(tsd_tsdn(tsd), alloc_old_node, NULL, NULL, true, + true); + } + emitter_json_array_end(emitter); +} + +static void +prof_log_emit_metadata(emitter_t *emitter) { + emitter_json_object_kv_begin(emitter, "info"); + + nstime_t now; + + nstime_prof_init_update(&now); + uint64_t ns = nstime_ns(&now) - nstime_ns(&log_start_timestamp); + emitter_json_kv(emitter, "duration", emitter_type_uint64, &ns); + + char *vers = JEMALLOC_VERSION; + emitter_json_kv(emitter, "version", + emitter_type_string, &vers); + + emitter_json_kv(emitter, "lg_sample_rate", + emitter_type_int, &lg_prof_sample); + + const char *res_type = prof_time_res_mode_names[opt_prof_time_res]; + emitter_json_kv(emitter, "prof_time_resolution", emitter_type_string, + &res_type); + + int pid = prof_getpid(); + emitter_json_kv(emitter, "pid", emitter_type_int, &pid); + + emitter_json_object_end(emitter); +} + +#define PROF_LOG_STOP_BUFSIZE PROF_DUMP_BUFSIZE +JEMALLOC_COLD +bool +prof_log_stop(tsdn_t *tsdn) { + cassert(config_prof); + if (!opt_prof || !prof_booted) { + return true; + } + + tsd_t *tsd = tsdn_tsd(tsdn); + malloc_mutex_lock(tsdn, &log_mtx); + + if (prof_logging_state != prof_logging_state_started) { + malloc_mutex_unlock(tsdn, &log_mtx); + return true; + } + + /* + * Set the state to dumping. We'll set it to stopped when we're done. + * Since other threads won't be able to start/stop/log when the state is + * dumping, we don't have to hold the lock during the whole method. + */ + prof_logging_state = prof_logging_state_dumping; + malloc_mutex_unlock(tsdn, &log_mtx); + + + emitter_t emitter; + + /* Create a file. */ + + int fd; + if (prof_log_dummy) { + fd = 0; + } else { + fd = creat(log_filename, 0644); + } + + if (fd == -1) { + malloc_printf(": creat() for log file \"%s\" " + " failed with %d\n", log_filename, errno); + if (opt_abort) { + abort(); + } + return true; + } + + struct prof_emitter_cb_arg_s arg; + arg.fd = fd; + + buf_writer_t buf_writer; + buf_writer_init(tsdn, &buf_writer, prof_emitter_write_cb, &arg, NULL, + PROF_LOG_STOP_BUFSIZE); + emitter_init(&emitter, emitter_output_json_compact, buf_writer_cb, + &buf_writer); + + emitter_begin(&emitter); + prof_log_emit_metadata(&emitter); + prof_log_emit_threads(tsd, &emitter); + prof_log_emit_traces(tsd, &emitter); + prof_log_emit_allocs(tsd, &emitter); + emitter_end(&emitter); + + buf_writer_terminate(tsdn, &buf_writer); + + /* Reset global state. */ + if (log_tables_initialized) { + ckh_delete(tsd, &log_bt_node_set); + ckh_delete(tsd, &log_thr_node_set); + } + log_tables_initialized = false; + log_bt_index = 0; + log_thr_index = 0; + log_bt_first = NULL; + log_bt_last = NULL; + log_thr_first = NULL; + log_thr_last = NULL; + log_alloc_first = NULL; + log_alloc_last = NULL; + + malloc_mutex_lock(tsdn, &log_mtx); + prof_logging_state = prof_logging_state_stopped; + malloc_mutex_unlock(tsdn, &log_mtx); + + if (prof_log_dummy) { + return false; + } + return close(fd) || arg.ret == -1; +} +#undef PROF_LOG_STOP_BUFSIZE + +JEMALLOC_COLD +bool +prof_log_init(tsd_t *tsd) { + cassert(config_prof); + if (malloc_mutex_init(&log_mtx, "prof_log", + WITNESS_RANK_PROF_LOG, malloc_mutex_rank_exclusive)) { + return true; + } + + if (opt_prof_log) { + prof_log_start(tsd_tsdn(tsd), NULL); + } + + return false; +} + +/******************************************************************************/ diff --git a/contrib/jemalloc/src/prof_recent.c b/contrib/jemalloc/src/prof_recent.c new file mode 100644 index 00000000000000..834a9446c16fab --- /dev/null +++ b/contrib/jemalloc/src/prof_recent.c @@ -0,0 +1,600 @@ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/buf_writer.h" +#include "jemalloc/internal/emitter.h" +#include "jemalloc/internal/prof_data.h" +#include "jemalloc/internal/prof_recent.h" + +ssize_t opt_prof_recent_alloc_max = PROF_RECENT_ALLOC_MAX_DEFAULT; +malloc_mutex_t prof_recent_alloc_mtx; /* Protects the fields below */ +static atomic_zd_t prof_recent_alloc_max; +static ssize_t prof_recent_alloc_count = 0; +prof_recent_list_t prof_recent_alloc_list; + +malloc_mutex_t prof_recent_dump_mtx; /* Protects dumping. */ + +static void +prof_recent_alloc_max_init() { + atomic_store_zd(&prof_recent_alloc_max, opt_prof_recent_alloc_max, + ATOMIC_RELAXED); +} + +static inline ssize_t +prof_recent_alloc_max_get_no_lock() { + return atomic_load_zd(&prof_recent_alloc_max, ATOMIC_RELAXED); +} + +static inline ssize_t +prof_recent_alloc_max_get(tsd_t *tsd) { + malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx); + return prof_recent_alloc_max_get_no_lock(); +} + +static inline ssize_t +prof_recent_alloc_max_update(tsd_t *tsd, ssize_t max) { + malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx); + ssize_t old_max = prof_recent_alloc_max_get(tsd); + atomic_store_zd(&prof_recent_alloc_max, max, ATOMIC_RELAXED); + return old_max; +} + +static prof_recent_t * +prof_recent_allocate_node(tsdn_t *tsdn) { + return (prof_recent_t *)iallocztm(tsdn, sizeof(prof_recent_t), + sz_size2index(sizeof(prof_recent_t)), false, NULL, true, + arena_get(tsdn, 0, false), true); +} + +static void +prof_recent_free_node(tsdn_t *tsdn, prof_recent_t *node) { + assert(node != NULL); + assert(isalloc(tsdn, node) == sz_s2u(sizeof(prof_recent_t))); + idalloctm(tsdn, node, NULL, NULL, true, true); +} + +static inline void +increment_recent_count(tsd_t *tsd, prof_tctx_t *tctx) { + malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock); + ++tctx->recent_count; + assert(tctx->recent_count > 0); +} + +bool +prof_recent_alloc_prepare(tsd_t *tsd, prof_tctx_t *tctx) { + cassert(config_prof); + assert(opt_prof && prof_booted); + malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock); + malloc_mutex_assert_not_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx); + + /* + * Check whether last-N mode is turned on without trying to acquire the + * lock, so as to optimize for the following two scenarios: + * (1) Last-N mode is switched off; + * (2) Dumping, during which last-N mode is temporarily turned off so + * as not to block sampled allocations. + */ + if (prof_recent_alloc_max_get_no_lock() == 0) { + return false; + } + + /* + * Increment recent_count to hold the tctx so that it won't be gone + * even after tctx->tdata->lock is released. This acts as a + * "placeholder"; the real recording of the allocation requires a lock + * on prof_recent_alloc_mtx and is done in prof_recent_alloc (when + * tctx->tdata->lock has been released). + */ + increment_recent_count(tsd, tctx); + return true; +} + +static void +decrement_recent_count(tsd_t *tsd, prof_tctx_t *tctx) { + malloc_mutex_assert_not_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx); + assert(tctx != NULL); + malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock); + assert(tctx->recent_count > 0); + --tctx->recent_count; + prof_tctx_try_destroy(tsd, tctx); +} + +static inline edata_t * +prof_recent_alloc_edata_get_no_lock(const prof_recent_t *n) { + return (edata_t *)atomic_load_p(&n->alloc_edata, ATOMIC_ACQUIRE); +} + +edata_t * +prof_recent_alloc_edata_get_no_lock_test(const prof_recent_t *n) { + cassert(config_prof); + return prof_recent_alloc_edata_get_no_lock(n); +} + +static inline edata_t * +prof_recent_alloc_edata_get(tsd_t *tsd, const prof_recent_t *n) { + malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx); + return prof_recent_alloc_edata_get_no_lock(n); +} + +static void +prof_recent_alloc_edata_set(tsd_t *tsd, prof_recent_t *n, edata_t *edata) { + malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx); + atomic_store_p(&n->alloc_edata, edata, ATOMIC_RELEASE); +} + +void +edata_prof_recent_alloc_init(edata_t *edata) { + cassert(config_prof); + edata_prof_recent_alloc_set_dont_call_directly(edata, NULL); +} + +static inline prof_recent_t * +edata_prof_recent_alloc_get_no_lock(const edata_t *edata) { + cassert(config_prof); + return edata_prof_recent_alloc_get_dont_call_directly(edata); +} + +prof_recent_t * +edata_prof_recent_alloc_get_no_lock_test(const edata_t *edata) { + cassert(config_prof); + return edata_prof_recent_alloc_get_no_lock(edata); +} + +static inline prof_recent_t * +edata_prof_recent_alloc_get(tsd_t *tsd, const edata_t *edata) { + malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx); + prof_recent_t *recent_alloc = + edata_prof_recent_alloc_get_no_lock(edata); + assert(recent_alloc == NULL || + prof_recent_alloc_edata_get(tsd, recent_alloc) == edata); + return recent_alloc; +} + +static prof_recent_t * +edata_prof_recent_alloc_update_internal(tsd_t *tsd, edata_t *edata, + prof_recent_t *recent_alloc) { + malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx); + prof_recent_t *old_recent_alloc = + edata_prof_recent_alloc_get(tsd, edata); + edata_prof_recent_alloc_set_dont_call_directly(edata, recent_alloc); + return old_recent_alloc; +} + +static void +edata_prof_recent_alloc_set(tsd_t *tsd, edata_t *edata, + prof_recent_t *recent_alloc) { + malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx); + assert(recent_alloc != NULL); + prof_recent_t *old_recent_alloc = + edata_prof_recent_alloc_update_internal(tsd, edata, recent_alloc); + assert(old_recent_alloc == NULL); + prof_recent_alloc_edata_set(tsd, recent_alloc, edata); +} + +static void +edata_prof_recent_alloc_reset(tsd_t *tsd, edata_t *edata, + prof_recent_t *recent_alloc) { + malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx); + assert(recent_alloc != NULL); + prof_recent_t *old_recent_alloc = + edata_prof_recent_alloc_update_internal(tsd, edata, NULL); + assert(old_recent_alloc == recent_alloc); + assert(edata == prof_recent_alloc_edata_get(tsd, recent_alloc)); + prof_recent_alloc_edata_set(tsd, recent_alloc, NULL); +} + +/* + * This function should be called right before an allocation is released, so + * that the associated recent allocation record can contain the following + * information: + * (1) The allocation is released; + * (2) The time of the deallocation; and + * (3) The prof_tctx associated with the deallocation. + */ +void +prof_recent_alloc_reset(tsd_t *tsd, edata_t *edata) { + cassert(config_prof); + /* + * Check whether the recent allocation record still exists without + * trying to acquire the lock. + */ + if (edata_prof_recent_alloc_get_no_lock(edata) == NULL) { + return; + } + + prof_tctx_t *dalloc_tctx = prof_tctx_create(tsd); + /* + * In case dalloc_tctx is NULL, e.g. due to OOM, we will not record the + * deallocation time / tctx, which is handled later, after we check + * again when holding the lock. + */ + + if (dalloc_tctx != NULL) { + malloc_mutex_lock(tsd_tsdn(tsd), dalloc_tctx->tdata->lock); + increment_recent_count(tsd, dalloc_tctx); + dalloc_tctx->prepared = false; + malloc_mutex_unlock(tsd_tsdn(tsd), dalloc_tctx->tdata->lock); + } + + malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx); + /* Check again after acquiring the lock. */ + prof_recent_t *recent = edata_prof_recent_alloc_get(tsd, edata); + if (recent != NULL) { + assert(nstime_equals_zero(&recent->dalloc_time)); + assert(recent->dalloc_tctx == NULL); + if (dalloc_tctx != NULL) { + nstime_prof_update(&recent->dalloc_time); + recent->dalloc_tctx = dalloc_tctx; + dalloc_tctx = NULL; + } + edata_prof_recent_alloc_reset(tsd, edata, recent); + } + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx); + + if (dalloc_tctx != NULL) { + /* We lost the rase - the allocation record was just gone. */ + decrement_recent_count(tsd, dalloc_tctx); + } +} + +static void +prof_recent_alloc_evict_edata(tsd_t *tsd, prof_recent_t *recent_alloc) { + malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx); + edata_t *edata = prof_recent_alloc_edata_get(tsd, recent_alloc); + if (edata != NULL) { + edata_prof_recent_alloc_reset(tsd, edata, recent_alloc); + } +} + +static bool +prof_recent_alloc_is_empty(tsd_t *tsd) { + malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx); + if (ql_empty(&prof_recent_alloc_list)) { + assert(prof_recent_alloc_count == 0); + return true; + } else { + assert(prof_recent_alloc_count > 0); + return false; + } +} + +static void +prof_recent_alloc_assert_count(tsd_t *tsd) { + malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx); + if (!config_debug) { + return; + } + ssize_t count = 0; + prof_recent_t *n; + ql_foreach(n, &prof_recent_alloc_list, link) { + ++count; + } + assert(count == prof_recent_alloc_count); + assert(prof_recent_alloc_max_get(tsd) == -1 || + count <= prof_recent_alloc_max_get(tsd)); +} + +void +prof_recent_alloc(tsd_t *tsd, edata_t *edata, size_t size, size_t usize) { + cassert(config_prof); + assert(edata != NULL); + prof_tctx_t *tctx = edata_prof_tctx_get(edata); + + malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tctx->tdata->lock); + malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx); + prof_recent_alloc_assert_count(tsd); + + /* + * Reserve a new prof_recent_t node if needed. If needed, we release + * the prof_recent_alloc_mtx lock and allocate. Then, rather than + * immediately checking for OOM, we regain the lock and try to make use + * of the reserve node if needed. There are six scenarios: + * + * \ now | no need | need but OOMed | need and allocated + * later \ | | | + * ------------------------------------------------------------ + * no need | (1) | (2) | (3) + * ------------------------------------------------------------ + * need | (4) | (5) | (6) + * + * First, "(4)" never happens, because we don't release the lock in the + * middle if there's no need for a new node; in such cases "(1)" always + * takes place, which is trivial. + * + * Out of the remaining four scenarios, "(6)" is the common case and is + * trivial. "(5)" is also trivial, in which case we'll rollback the + * effect of prof_recent_alloc_prepare() as expected. + * + * "(2)" / "(3)" occurs when the need for a new node is gone after we + * regain the lock. If the new node is successfully allocated, i.e. in + * the case of "(3)", we'll release it in the end; otherwise, i.e. in + * the case of "(2)", we do nothing - we're lucky that the OOM ends up + * doing no harm at all. + * + * Therefore, the only performance cost of the "release lock" -> + * "allocate" -> "regain lock" design is the "(3)" case, but it happens + * very rarely, so the cost is relatively small compared to the gain of + * not having to have the lock order of prof_recent_alloc_mtx above all + * the allocation locks. + */ + prof_recent_t *reserve = NULL; + if (prof_recent_alloc_max_get(tsd) == -1 || + prof_recent_alloc_count < prof_recent_alloc_max_get(tsd)) { + assert(prof_recent_alloc_max_get(tsd) != 0); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx); + reserve = prof_recent_allocate_node(tsd_tsdn(tsd)); + malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx); + prof_recent_alloc_assert_count(tsd); + } + + if (prof_recent_alloc_max_get(tsd) == 0) { + assert(prof_recent_alloc_is_empty(tsd)); + goto label_rollback; + } + + prof_tctx_t *old_alloc_tctx, *old_dalloc_tctx; + if (prof_recent_alloc_count == prof_recent_alloc_max_get(tsd)) { + /* If upper limit is reached, rotate the head. */ + assert(prof_recent_alloc_max_get(tsd) != -1); + assert(!prof_recent_alloc_is_empty(tsd)); + prof_recent_t *head = ql_first(&prof_recent_alloc_list); + old_alloc_tctx = head->alloc_tctx; + assert(old_alloc_tctx != NULL); + old_dalloc_tctx = head->dalloc_tctx; + prof_recent_alloc_evict_edata(tsd, head); + ql_rotate(&prof_recent_alloc_list, link); + } else { + /* Otherwise make use of the new node. */ + assert(prof_recent_alloc_max_get(tsd) == -1 || + prof_recent_alloc_count < prof_recent_alloc_max_get(tsd)); + if (reserve == NULL) { + goto label_rollback; + } + ql_elm_new(reserve, link); + ql_tail_insert(&prof_recent_alloc_list, reserve, link); + reserve = NULL; + old_alloc_tctx = NULL; + old_dalloc_tctx = NULL; + ++prof_recent_alloc_count; + } + + /* Fill content into the tail node. */ + prof_recent_t *tail = ql_last(&prof_recent_alloc_list, link); + assert(tail != NULL); + tail->size = size; + tail->usize = usize; + nstime_copy(&tail->alloc_time, edata_prof_alloc_time_get(edata)); + tail->alloc_tctx = tctx; + nstime_init_zero(&tail->dalloc_time); + tail->dalloc_tctx = NULL; + edata_prof_recent_alloc_set(tsd, edata, tail); + + assert(!prof_recent_alloc_is_empty(tsd)); + prof_recent_alloc_assert_count(tsd); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx); + + if (reserve != NULL) { + prof_recent_free_node(tsd_tsdn(tsd), reserve); + } + + /* + * Asynchronously handle the tctx of the old node, so that there's no + * simultaneous holdings of prof_recent_alloc_mtx and tdata->lock. + * In the worst case this may delay the tctx release but it's better + * than holding prof_recent_alloc_mtx for longer. + */ + if (old_alloc_tctx != NULL) { + decrement_recent_count(tsd, old_alloc_tctx); + } + if (old_dalloc_tctx != NULL) { + decrement_recent_count(tsd, old_dalloc_tctx); + } + return; + +label_rollback: + assert(edata_prof_recent_alloc_get(tsd, edata) == NULL); + prof_recent_alloc_assert_count(tsd); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx); + if (reserve != NULL) { + prof_recent_free_node(tsd_tsdn(tsd), reserve); + } + decrement_recent_count(tsd, tctx); +} + +ssize_t +prof_recent_alloc_max_ctl_read() { + cassert(config_prof); + /* Don't bother to acquire the lock. */ + return prof_recent_alloc_max_get_no_lock(); +} + +static void +prof_recent_alloc_restore_locked(tsd_t *tsd, prof_recent_list_t *to_delete) { + malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx); + ssize_t max = prof_recent_alloc_max_get(tsd); + if (max == -1 || prof_recent_alloc_count <= max) { + /* Easy case - no need to alter the list. */ + ql_new(to_delete); + prof_recent_alloc_assert_count(tsd); + return; + } + + prof_recent_t *node; + ql_foreach(node, &prof_recent_alloc_list, link) { + if (prof_recent_alloc_count == max) { + break; + } + prof_recent_alloc_evict_edata(tsd, node); + --prof_recent_alloc_count; + } + assert(prof_recent_alloc_count == max); + + ql_move(to_delete, &prof_recent_alloc_list); + if (max == 0) { + assert(node == NULL); + } else { + assert(node != NULL); + ql_split(to_delete, node, &prof_recent_alloc_list, link); + } + assert(!ql_empty(to_delete)); + prof_recent_alloc_assert_count(tsd); +} + +static void +prof_recent_alloc_async_cleanup(tsd_t *tsd, prof_recent_list_t *to_delete) { + malloc_mutex_assert_not_owner(tsd_tsdn(tsd), &prof_recent_dump_mtx); + malloc_mutex_assert_not_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx); + while (!ql_empty(to_delete)) { + prof_recent_t *node = ql_first(to_delete); + ql_remove(to_delete, node, link); + decrement_recent_count(tsd, node->alloc_tctx); + if (node->dalloc_tctx != NULL) { + decrement_recent_count(tsd, node->dalloc_tctx); + } + prof_recent_free_node(tsd_tsdn(tsd), node); + } +} + +ssize_t +prof_recent_alloc_max_ctl_write(tsd_t *tsd, ssize_t max) { + cassert(config_prof); + assert(max >= -1); + malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx); + prof_recent_alloc_assert_count(tsd); + const ssize_t old_max = prof_recent_alloc_max_update(tsd, max); + prof_recent_list_t to_delete; + prof_recent_alloc_restore_locked(tsd, &to_delete); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx); + prof_recent_alloc_async_cleanup(tsd, &to_delete); + return old_max; +} + +static void +prof_recent_alloc_dump_bt(emitter_t *emitter, prof_tctx_t *tctx) { + char bt_buf[2 * sizeof(intptr_t) + 3]; + char *s = bt_buf; + assert(tctx != NULL); + prof_bt_t *bt = &tctx->gctx->bt; + for (size_t i = 0; i < bt->len; ++i) { + malloc_snprintf(bt_buf, sizeof(bt_buf), "%p", bt->vec[i]); + emitter_json_value(emitter, emitter_type_string, &s); + } +} + +static void +prof_recent_alloc_dump_node(emitter_t *emitter, prof_recent_t *node) { + emitter_json_object_begin(emitter); + + emitter_json_kv(emitter, "size", emitter_type_size, &node->size); + emitter_json_kv(emitter, "usize", emitter_type_size, &node->usize); + bool released = prof_recent_alloc_edata_get_no_lock(node) == NULL; + emitter_json_kv(emitter, "released", emitter_type_bool, &released); + + emitter_json_kv(emitter, "alloc_thread_uid", emitter_type_uint64, + &node->alloc_tctx->thr_uid); + prof_tdata_t *alloc_tdata = node->alloc_tctx->tdata; + assert(alloc_tdata != NULL); + if (alloc_tdata->thread_name != NULL) { + emitter_json_kv(emitter, "alloc_thread_name", + emitter_type_string, &alloc_tdata->thread_name); + } + uint64_t alloc_time_ns = nstime_ns(&node->alloc_time); + emitter_json_kv(emitter, "alloc_time", emitter_type_uint64, + &alloc_time_ns); + emitter_json_array_kv_begin(emitter, "alloc_trace"); + prof_recent_alloc_dump_bt(emitter, node->alloc_tctx); + emitter_json_array_end(emitter); + + if (released && node->dalloc_tctx != NULL) { + emitter_json_kv(emitter, "dalloc_thread_uid", + emitter_type_uint64, &node->dalloc_tctx->thr_uid); + prof_tdata_t *dalloc_tdata = node->dalloc_tctx->tdata; + assert(dalloc_tdata != NULL); + if (dalloc_tdata->thread_name != NULL) { + emitter_json_kv(emitter, "dalloc_thread_name", + emitter_type_string, &dalloc_tdata->thread_name); + } + assert(!nstime_equals_zero(&node->dalloc_time)); + uint64_t dalloc_time_ns = nstime_ns(&node->dalloc_time); + emitter_json_kv(emitter, "dalloc_time", emitter_type_uint64, + &dalloc_time_ns); + emitter_json_array_kv_begin(emitter, "dalloc_trace"); + prof_recent_alloc_dump_bt(emitter, node->dalloc_tctx); + emitter_json_array_end(emitter); + } + + emitter_json_object_end(emitter); +} + +#define PROF_RECENT_PRINT_BUFSIZE 65536 +JEMALLOC_COLD +void +prof_recent_alloc_dump(tsd_t *tsd, write_cb_t *write_cb, void *cbopaque) { + cassert(config_prof); + malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_dump_mtx); + buf_writer_t buf_writer; + buf_writer_init(tsd_tsdn(tsd), &buf_writer, write_cb, cbopaque, NULL, + PROF_RECENT_PRINT_BUFSIZE); + emitter_t emitter; + emitter_init(&emitter, emitter_output_json_compact, buf_writer_cb, + &buf_writer); + prof_recent_list_t temp_list; + + malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx); + prof_recent_alloc_assert_count(tsd); + ssize_t dump_max = prof_recent_alloc_max_get(tsd); + ql_move(&temp_list, &prof_recent_alloc_list); + ssize_t dump_count = prof_recent_alloc_count; + prof_recent_alloc_count = 0; + prof_recent_alloc_assert_count(tsd); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx); + + emitter_begin(&emitter); + uint64_t sample_interval = (uint64_t)1U << lg_prof_sample; + emitter_json_kv(&emitter, "sample_interval", emitter_type_uint64, + &sample_interval); + emitter_json_kv(&emitter, "recent_alloc_max", emitter_type_ssize, + &dump_max); + emitter_json_array_kv_begin(&emitter, "recent_alloc"); + prof_recent_t *node; + ql_foreach(node, &temp_list, link) { + prof_recent_alloc_dump_node(&emitter, node); + } + emitter_json_array_end(&emitter); + emitter_end(&emitter); + + malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx); + prof_recent_alloc_assert_count(tsd); + ql_concat(&temp_list, &prof_recent_alloc_list, link); + ql_move(&prof_recent_alloc_list, &temp_list); + prof_recent_alloc_count += dump_count; + prof_recent_alloc_restore_locked(tsd, &temp_list); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx); + + buf_writer_terminate(tsd_tsdn(tsd), &buf_writer); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_dump_mtx); + + prof_recent_alloc_async_cleanup(tsd, &temp_list); +} +#undef PROF_RECENT_PRINT_BUFSIZE + +bool +prof_recent_init() { + cassert(config_prof); + prof_recent_alloc_max_init(); + + if (malloc_mutex_init(&prof_recent_alloc_mtx, "prof_recent_alloc", + WITNESS_RANK_PROF_RECENT_ALLOC, malloc_mutex_rank_exclusive)) { + return true; + } + + if (malloc_mutex_init(&prof_recent_dump_mtx, "prof_recent_dump", + WITNESS_RANK_PROF_RECENT_DUMP, malloc_mutex_rank_exclusive)) { + return true; + } + + ql_new(&prof_recent_alloc_list); + + return false; +} diff --git a/contrib/jemalloc/src/prof_stats.c b/contrib/jemalloc/src/prof_stats.c new file mode 100644 index 00000000000000..5d1a506bb72d4f --- /dev/null +++ b/contrib/jemalloc/src/prof_stats.c @@ -0,0 +1,57 @@ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/prof_stats.h" + +bool opt_prof_stats = false; +malloc_mutex_t prof_stats_mtx; +static prof_stats_t prof_stats_live[PROF_SC_NSIZES]; +static prof_stats_t prof_stats_accum[PROF_SC_NSIZES]; + +static void +prof_stats_enter(tsd_t *tsd, szind_t ind) { + assert(opt_prof && opt_prof_stats); + assert(ind < SC_NSIZES); + malloc_mutex_lock(tsd_tsdn(tsd), &prof_stats_mtx); +} + +static void +prof_stats_leave(tsd_t *tsd) { + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_stats_mtx); +} + +void +prof_stats_inc(tsd_t *tsd, szind_t ind, size_t size) { + cassert(config_prof); + prof_stats_enter(tsd, ind); + prof_stats_live[ind].req_sum += size; + prof_stats_live[ind].count++; + prof_stats_accum[ind].req_sum += size; + prof_stats_accum[ind].count++; + prof_stats_leave(tsd); +} + +void +prof_stats_dec(tsd_t *tsd, szind_t ind, size_t size) { + cassert(config_prof); + prof_stats_enter(tsd, ind); + prof_stats_live[ind].req_sum -= size; + prof_stats_live[ind].count--; + prof_stats_leave(tsd); +} + +void +prof_stats_get_live(tsd_t *tsd, szind_t ind, prof_stats_t *stats) { + cassert(config_prof); + prof_stats_enter(tsd, ind); + memcpy(stats, &prof_stats_live[ind], sizeof(prof_stats_t)); + prof_stats_leave(tsd); +} + +void +prof_stats_get_accum(tsd_t *tsd, szind_t ind, prof_stats_t *stats) { + cassert(config_prof); + prof_stats_enter(tsd, ind); + memcpy(stats, &prof_stats_accum[ind], sizeof(prof_stats_t)); + prof_stats_leave(tsd); +} diff --git a/contrib/jemalloc/src/prof_sys.c b/contrib/jemalloc/src/prof_sys.c new file mode 100644 index 00000000000000..b5f1f5b225e171 --- /dev/null +++ b/contrib/jemalloc/src/prof_sys.c @@ -0,0 +1,669 @@ +#define JEMALLOC_PROF_SYS_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/buf_writer.h" +#include "jemalloc/internal/ctl.h" +#include "jemalloc/internal/prof_data.h" +#include "jemalloc/internal/prof_sys.h" + +#ifdef JEMALLOC_PROF_LIBUNWIND +#define UNW_LOCAL_ONLY +#include +#endif + +#ifdef JEMALLOC_PROF_LIBGCC +/* + * We have a circular dependency -- jemalloc_internal.h tells us if we should + * use libgcc's unwinding functionality, but after we've included that, we've + * already hooked _Unwind_Backtrace. We'll temporarily disable hooking. + */ +#undef _Unwind_Backtrace +#include +#define _Unwind_Backtrace JEMALLOC_TEST_HOOK(_Unwind_Backtrace, test_hooks_libc_hook) +#endif + +/******************************************************************************/ + +malloc_mutex_t prof_dump_filename_mtx; + +bool prof_do_mock = false; + +static uint64_t prof_dump_seq; +static uint64_t prof_dump_iseq; +static uint64_t prof_dump_mseq; +static uint64_t prof_dump_useq; + +static char *prof_prefix = NULL; + +/* The fallback allocator profiling functionality will use. */ +base_t *prof_base; + +void +bt_init(prof_bt_t *bt, void **vec) { + cassert(config_prof); + + bt->vec = vec; + bt->len = 0; +} + +#ifdef JEMALLOC_PROF_LIBUNWIND +static void +prof_backtrace_impl(void **vec, unsigned *len, unsigned max_len) { + int nframes; + + cassert(config_prof); + assert(*len == 0); + assert(vec != NULL); + assert(max_len == PROF_BT_MAX); + + nframes = unw_backtrace(vec, PROF_BT_MAX); + if (nframes <= 0) { + return; + } + *len = nframes; +} +#elif (defined(JEMALLOC_PROF_LIBGCC)) +static _Unwind_Reason_Code +prof_unwind_init_callback(struct _Unwind_Context *context, void *arg) { + cassert(config_prof); + + return _URC_NO_REASON; +} + +static _Unwind_Reason_Code +prof_unwind_callback(struct _Unwind_Context *context, void *arg) { + prof_unwind_data_t *data = (prof_unwind_data_t *)arg; + void *ip; + + cassert(config_prof); + + ip = (void *)_Unwind_GetIP(context); + if (ip == NULL) { + return _URC_END_OF_STACK; + } + data->vec[*data->len] = ip; + (*data->len)++; + if (*data->len == data->max) { + return _URC_END_OF_STACK; + } + + return _URC_NO_REASON; +} + +static void +prof_backtrace_impl(void **vec, unsigned *len, unsigned max_len) { + prof_unwind_data_t data = {vec, len, max_len}; + + cassert(config_prof); + assert(vec != NULL); + assert(max_len == PROF_BT_MAX); + + _Unwind_Backtrace(prof_unwind_callback, &data); +} +#elif (defined(JEMALLOC_PROF_GCC)) +static void +prof_backtrace_impl(void **vec, unsigned *len, unsigned max_len) { +#define BT_FRAME(i) \ + if ((i) < max_len) { \ + void *p; \ + if (__builtin_frame_address(i) == 0) { \ + return; \ + } \ + p = __builtin_return_address(i); \ + if (p == NULL) { \ + return; \ + } \ + vec[(i)] = p; \ + *len = (i) + 1; \ + } else { \ + return; \ + } + + cassert(config_prof); + assert(vec != NULL); + assert(max_len == PROF_BT_MAX); + + BT_FRAME(0) + BT_FRAME(1) + BT_FRAME(2) + BT_FRAME(3) + BT_FRAME(4) + BT_FRAME(5) + BT_FRAME(6) + BT_FRAME(7) + BT_FRAME(8) + BT_FRAME(9) + + BT_FRAME(10) + BT_FRAME(11) + BT_FRAME(12) + BT_FRAME(13) + BT_FRAME(14) + BT_FRAME(15) + BT_FRAME(16) + BT_FRAME(17) + BT_FRAME(18) + BT_FRAME(19) + + BT_FRAME(20) + BT_FRAME(21) + BT_FRAME(22) + BT_FRAME(23) + BT_FRAME(24) + BT_FRAME(25) + BT_FRAME(26) + BT_FRAME(27) + BT_FRAME(28) + BT_FRAME(29) + + BT_FRAME(30) + BT_FRAME(31) + BT_FRAME(32) + BT_FRAME(33) + BT_FRAME(34) + BT_FRAME(35) + BT_FRAME(36) + BT_FRAME(37) + BT_FRAME(38) + BT_FRAME(39) + + BT_FRAME(40) + BT_FRAME(41) + BT_FRAME(42) + BT_FRAME(43) + BT_FRAME(44) + BT_FRAME(45) + BT_FRAME(46) + BT_FRAME(47) + BT_FRAME(48) + BT_FRAME(49) + + BT_FRAME(50) + BT_FRAME(51) + BT_FRAME(52) + BT_FRAME(53) + BT_FRAME(54) + BT_FRAME(55) + BT_FRAME(56) + BT_FRAME(57) + BT_FRAME(58) + BT_FRAME(59) + + BT_FRAME(60) + BT_FRAME(61) + BT_FRAME(62) + BT_FRAME(63) + BT_FRAME(64) + BT_FRAME(65) + BT_FRAME(66) + BT_FRAME(67) + BT_FRAME(68) + BT_FRAME(69) + + BT_FRAME(70) + BT_FRAME(71) + BT_FRAME(72) + BT_FRAME(73) + BT_FRAME(74) + BT_FRAME(75) + BT_FRAME(76) + BT_FRAME(77) + BT_FRAME(78) + BT_FRAME(79) + + BT_FRAME(80) + BT_FRAME(81) + BT_FRAME(82) + BT_FRAME(83) + BT_FRAME(84) + BT_FRAME(85) + BT_FRAME(86) + BT_FRAME(87) + BT_FRAME(88) + BT_FRAME(89) + + BT_FRAME(90) + BT_FRAME(91) + BT_FRAME(92) + BT_FRAME(93) + BT_FRAME(94) + BT_FRAME(95) + BT_FRAME(96) + BT_FRAME(97) + BT_FRAME(98) + BT_FRAME(99) + + BT_FRAME(100) + BT_FRAME(101) + BT_FRAME(102) + BT_FRAME(103) + BT_FRAME(104) + BT_FRAME(105) + BT_FRAME(106) + BT_FRAME(107) + BT_FRAME(108) + BT_FRAME(109) + + BT_FRAME(110) + BT_FRAME(111) + BT_FRAME(112) + BT_FRAME(113) + BT_FRAME(114) + BT_FRAME(115) + BT_FRAME(116) + BT_FRAME(117) + BT_FRAME(118) + BT_FRAME(119) + + BT_FRAME(120) + BT_FRAME(121) + BT_FRAME(122) + BT_FRAME(123) + BT_FRAME(124) + BT_FRAME(125) + BT_FRAME(126) + BT_FRAME(127) +#undef BT_FRAME +} +#else +static void +prof_backtrace_impl(void **vec, unsigned *len, unsigned max_len) { + cassert(config_prof); + not_reached(); +} +#endif + +void +prof_backtrace(tsd_t *tsd, prof_bt_t *bt) { + cassert(config_prof); + prof_backtrace_hook_t prof_backtrace_hook = prof_backtrace_hook_get(); + assert(prof_backtrace_hook != NULL); + + pre_reentrancy(tsd, NULL); + prof_backtrace_hook(bt->vec, &bt->len, PROF_BT_MAX); + post_reentrancy(tsd); +} + +void +prof_hooks_init() { + prof_backtrace_hook_set(&prof_backtrace_impl); + prof_dump_hook_set(NULL); +} + +void +prof_unwind_init() { +#ifdef JEMALLOC_PROF_LIBGCC + /* + * Cause the backtracing machinery to allocate its internal + * state before enabling profiling. + */ + _Unwind_Backtrace(prof_unwind_init_callback, NULL); +#endif +} + +static int +prof_sys_thread_name_read_impl(char *buf, size_t limit) { +#if defined(JEMALLOC_HAVE_PTHREAD_GETNAME_NP) + return pthread_getname_np(pthread_self(), buf, limit); +#elif defined(JEMALLOC_HAVE_PTHREAD_GET_NAME_NP) + pthread_get_name_np(pthread_self(), buf, limit); + return 0; +#else + return ENOSYS; +#endif +} +prof_sys_thread_name_read_t *JET_MUTABLE prof_sys_thread_name_read = + prof_sys_thread_name_read_impl; + +void +prof_sys_thread_name_fetch(tsd_t *tsd) { +#define THREAD_NAME_MAX_LEN 16 + char buf[THREAD_NAME_MAX_LEN]; + if (!prof_sys_thread_name_read(buf, THREAD_NAME_MAX_LEN)) { + prof_thread_name_set_impl(tsd, buf); + } +#undef THREAD_NAME_MAX_LEN +} + +int +prof_getpid(void) { +#ifdef _WIN32 + return GetCurrentProcessId(); +#else + return getpid(); +#endif +} + +/* + * This buffer is rather large for stack allocation, so use a single buffer for + * all profile dumps; protected by prof_dump_mtx. + */ +static char prof_dump_buf[PROF_DUMP_BUFSIZE]; + +typedef struct prof_dump_arg_s prof_dump_arg_t; +struct prof_dump_arg_s { + /* + * Whether error should be handled locally: if true, then we print out + * error message as well as abort (if opt_abort is true) when an error + * occurred, and we also report the error back to the caller in the end; + * if false, then we only report the error back to the caller in the + * end. + */ + const bool handle_error_locally; + /* + * Whether there has been an error in the dumping process, which could + * have happened either in file opening or in file writing. When an + * error has already occurred, we will stop further writing to the file. + */ + bool error; + /* File descriptor of the dump file. */ + int prof_dump_fd; +}; + +static void +prof_dump_check_possible_error(prof_dump_arg_t *arg, bool err_cond, + const char *format, ...) { + assert(!arg->error); + if (!err_cond) { + return; + } + + arg->error = true; + if (!arg->handle_error_locally) { + return; + } + + va_list ap; + char buf[PROF_PRINTF_BUFSIZE]; + va_start(ap, format); + malloc_vsnprintf(buf, sizeof(buf), format, ap); + va_end(ap); + malloc_write(buf); + + if (opt_abort) { + abort(); + } +} + +static int +prof_dump_open_file_impl(const char *filename, int mode) { + return creat(filename, mode); +} +prof_dump_open_file_t *JET_MUTABLE prof_dump_open_file = + prof_dump_open_file_impl; + +static void +prof_dump_open(prof_dump_arg_t *arg, const char *filename) { + arg->prof_dump_fd = prof_dump_open_file(filename, 0644); + prof_dump_check_possible_error(arg, arg->prof_dump_fd == -1, + ": failed to open \"%s\"\n", filename); +} + +prof_dump_write_file_t *JET_MUTABLE prof_dump_write_file = malloc_write_fd; + +static void +prof_dump_flush(void *opaque, const char *s) { + cassert(config_prof); + prof_dump_arg_t *arg = (prof_dump_arg_t *)opaque; + if (!arg->error) { + ssize_t err = prof_dump_write_file(arg->prof_dump_fd, s, + strlen(s)); + prof_dump_check_possible_error(arg, err == -1, + ": failed to write during heap profile flush\n"); + } +} + +static void +prof_dump_close(prof_dump_arg_t *arg) { + if (arg->prof_dump_fd != -1) { + close(arg->prof_dump_fd); + } +} + +#ifndef _WIN32 +JEMALLOC_FORMAT_PRINTF(1, 2) +static int +prof_open_maps_internal(const char *format, ...) { + int mfd; + va_list ap; + char filename[PATH_MAX + 1]; + + va_start(ap, format); + malloc_vsnprintf(filename, sizeof(filename), format, ap); + va_end(ap); + +#if defined(O_CLOEXEC) + mfd = open(filename, O_RDONLY | O_CLOEXEC); +#else + mfd = open(filename, O_RDONLY); + if (mfd != -1) { + fcntl(mfd, F_SETFD, fcntl(mfd, F_GETFD) | FD_CLOEXEC); + } +#endif + + return mfd; +} +#endif + +static int +prof_dump_open_maps_impl() { + int mfd; + + cassert(config_prof); +#if defined(__FreeBSD__) || defined(__DragonFly__) + mfd = prof_open_maps_internal("/proc/curproc/map"); +#elif defined(_WIN32) + mfd = -1; // Not implemented +#else + int pid = prof_getpid(); + + mfd = prof_open_maps_internal("/proc/%d/task/%d/maps", pid, pid); + if (mfd == -1) { + mfd = prof_open_maps_internal("/proc/%d/maps", pid); + } +#endif + return mfd; +} +prof_dump_open_maps_t *JET_MUTABLE prof_dump_open_maps = + prof_dump_open_maps_impl; + +static ssize_t +prof_dump_read_maps_cb(void *read_cbopaque, void *buf, size_t limit) { + int mfd = *(int *)read_cbopaque; + assert(mfd != -1); + return malloc_read_fd(mfd, buf, limit); +} + +static void +prof_dump_maps(buf_writer_t *buf_writer) { + int mfd = prof_dump_open_maps(); + if (mfd == -1) { + return; + } + + buf_writer_cb(buf_writer, "\nMAPPED_LIBRARIES:\n"); + buf_writer_pipe(buf_writer, prof_dump_read_maps_cb, &mfd); + close(mfd); +} + +static bool +prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, + bool leakcheck) { + cassert(config_prof); + assert(tsd_reentrancy_level_get(tsd) == 0); + + prof_tdata_t * tdata = prof_tdata_get(tsd, true); + if (tdata == NULL) { + return true; + } + + prof_dump_arg_t arg = {/* handle_error_locally */ !propagate_err, + /* error */ false, /* prof_dump_fd */ -1}; + + pre_reentrancy(tsd, NULL); + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx); + + prof_dump_open(&arg, filename); + buf_writer_t buf_writer; + bool err = buf_writer_init(tsd_tsdn(tsd), &buf_writer, prof_dump_flush, + &arg, prof_dump_buf, PROF_DUMP_BUFSIZE); + assert(!err); + prof_dump_impl(tsd, buf_writer_cb, &buf_writer, tdata, leakcheck); + prof_dump_maps(&buf_writer); + buf_writer_terminate(tsd_tsdn(tsd), &buf_writer); + prof_dump_close(&arg); + + prof_dump_hook_t dump_hook = prof_dump_hook_get(); + if (dump_hook != NULL) { + dump_hook(filename); + } + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx); + post_reentrancy(tsd); + + return arg.error; +} + +/* + * If profiling is off, then PROF_DUMP_FILENAME_LEN is 1, so we'll end up + * calling strncpy with a size of 0, which triggers a -Wstringop-truncation + * warning (strncpy can never actually be called in this case, since we bail out + * much earlier when config_prof is false). This function works around the + * warning to let us leave the warning on. + */ +static inline void +prof_strncpy(char *UNUSED dest, const char *UNUSED src, size_t UNUSED size) { + cassert(config_prof); +#ifdef JEMALLOC_PROF + strncpy(dest, src, size); +#endif +} + +static const char * +prof_prefix_get(tsdn_t* tsdn) { + malloc_mutex_assert_owner(tsdn, &prof_dump_filename_mtx); + + return prof_prefix == NULL ? opt_prof_prefix : prof_prefix; +} + +static bool +prof_prefix_is_empty(tsdn_t *tsdn) { + malloc_mutex_lock(tsdn, &prof_dump_filename_mtx); + bool ret = (prof_prefix_get(tsdn)[0] == '\0'); + malloc_mutex_unlock(tsdn, &prof_dump_filename_mtx); + return ret; +} + +#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1) +#define VSEQ_INVALID UINT64_C(0xffffffffffffffff) +static void +prof_dump_filename(tsd_t *tsd, char *filename, char v, uint64_t vseq) { + cassert(config_prof); + + assert(tsd_reentrancy_level_get(tsd) == 0); + const char *prefix = prof_prefix_get(tsd_tsdn(tsd)); + + if (vseq != VSEQ_INVALID) { + /* "...v.heap" */ + malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, + "%s.%d.%"FMTu64".%c%"FMTu64".heap", prefix, prof_getpid(), + prof_dump_seq, v, vseq); + } else { + /* "....heap" */ + malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, + "%s.%d.%"FMTu64".%c.heap", prefix, prof_getpid(), + prof_dump_seq, v); + } + prof_dump_seq++; +} + +void +prof_get_default_filename(tsdn_t *tsdn, char *filename, uint64_t ind) { + malloc_mutex_lock(tsdn, &prof_dump_filename_mtx); + malloc_snprintf(filename, PROF_DUMP_FILENAME_LEN, + "%s.%d.%"FMTu64".json", prof_prefix_get(tsdn), prof_getpid(), ind); + malloc_mutex_unlock(tsdn, &prof_dump_filename_mtx); +} + +void +prof_fdump_impl(tsd_t *tsd) { + char filename[DUMP_FILENAME_BUFSIZE]; + + assert(!prof_prefix_is_empty(tsd_tsdn(tsd))); + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_filename_mtx); + prof_dump_filename(tsd, filename, 'f', VSEQ_INVALID); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_filename_mtx); + prof_dump(tsd, false, filename, opt_prof_leak); +} + +bool +prof_prefix_set(tsdn_t *tsdn, const char *prefix) { + cassert(config_prof); + ctl_mtx_assert_held(tsdn); + malloc_mutex_lock(tsdn, &prof_dump_filename_mtx); + if (prof_prefix == NULL) { + malloc_mutex_unlock(tsdn, &prof_dump_filename_mtx); + /* Everything is still guarded by ctl_mtx. */ + char *buffer = base_alloc(tsdn, prof_base, + PROF_DUMP_FILENAME_LEN, QUANTUM); + if (buffer == NULL) { + return true; + } + malloc_mutex_lock(tsdn, &prof_dump_filename_mtx); + prof_prefix = buffer; + } + assert(prof_prefix != NULL); + + prof_strncpy(prof_prefix, prefix, PROF_DUMP_FILENAME_LEN - 1); + prof_prefix[PROF_DUMP_FILENAME_LEN - 1] = '\0'; + malloc_mutex_unlock(tsdn, &prof_dump_filename_mtx); + + return false; +} + +void +prof_idump_impl(tsd_t *tsd) { + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_filename_mtx); + if (prof_prefix_get(tsd_tsdn(tsd))[0] == '\0') { + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_filename_mtx); + return; + } + char filename[PATH_MAX + 1]; + prof_dump_filename(tsd, filename, 'i', prof_dump_iseq); + prof_dump_iseq++; + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_filename_mtx); + prof_dump(tsd, false, filename, false); +} + +bool +prof_mdump_impl(tsd_t *tsd, const char *filename) { + char filename_buf[DUMP_FILENAME_BUFSIZE]; + if (filename == NULL) { + /* No filename specified, so automatically generate one. */ + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_filename_mtx); + if (prof_prefix_get(tsd_tsdn(tsd))[0] == '\0') { + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_filename_mtx); + return true; + } + prof_dump_filename(tsd, filename_buf, 'm', prof_dump_mseq); + prof_dump_mseq++; + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_filename_mtx); + filename = filename_buf; + } + return prof_dump(tsd, true, filename, false); +} + +void +prof_gdump_impl(tsd_t *tsd) { + tsdn_t *tsdn = tsd_tsdn(tsd); + malloc_mutex_lock(tsdn, &prof_dump_filename_mtx); + if (prof_prefix_get(tsdn)[0] == '\0') { + malloc_mutex_unlock(tsdn, &prof_dump_filename_mtx); + return; + } + char filename[DUMP_FILENAME_BUFSIZE]; + prof_dump_filename(tsd, filename, 'u', prof_dump_useq); + prof_dump_useq++; + malloc_mutex_unlock(tsdn, &prof_dump_filename_mtx); + prof_dump(tsd, false, filename, false); +} diff --git a/contrib/jemalloc/src/psset.c b/contrib/jemalloc/src/psset.c new file mode 100644 index 00000000000000..9a8f054f111c96 --- /dev/null +++ b/contrib/jemalloc/src/psset.c @@ -0,0 +1,385 @@ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/psset.h" + +#include "jemalloc/internal/fb.h" + +void +psset_init(psset_t *psset) { + for (unsigned i = 0; i < PSSET_NPSIZES; i++) { + hpdata_age_heap_new(&psset->pageslabs[i]); + } + fb_init(psset->pageslab_bitmap, PSSET_NPSIZES); + memset(&psset->merged_stats, 0, sizeof(psset->merged_stats)); + memset(&psset->stats, 0, sizeof(psset->stats)); + hpdata_empty_list_init(&psset->empty); + for (int i = 0; i < PSSET_NPURGE_LISTS; i++) { + hpdata_purge_list_init(&psset->to_purge[i]); + } + fb_init(psset->purge_bitmap, PSSET_NPURGE_LISTS); + hpdata_hugify_list_init(&psset->to_hugify); +} + +static void +psset_bin_stats_accum(psset_bin_stats_t *dst, psset_bin_stats_t *src) { + dst->npageslabs += src->npageslabs; + dst->nactive += src->nactive; + dst->ndirty += src->ndirty; +} + +void +psset_stats_accum(psset_stats_t *dst, psset_stats_t *src) { + psset_bin_stats_accum(&dst->full_slabs[0], &src->full_slabs[0]); + psset_bin_stats_accum(&dst->full_slabs[1], &src->full_slabs[1]); + psset_bin_stats_accum(&dst->empty_slabs[0], &src->empty_slabs[0]); + psset_bin_stats_accum(&dst->empty_slabs[1], &src->empty_slabs[1]); + for (pszind_t i = 0; i < PSSET_NPSIZES; i++) { + psset_bin_stats_accum(&dst->nonfull_slabs[i][0], + &src->nonfull_slabs[i][0]); + psset_bin_stats_accum(&dst->nonfull_slabs[i][1], + &src->nonfull_slabs[i][1]); + } +} + +/* + * The stats maintenance strategy is to remove a pageslab's contribution to the + * stats when we call psset_update_begin, and re-add it (to a potentially new + * bin) when we call psset_update_end. + */ +JEMALLOC_ALWAYS_INLINE void +psset_bin_stats_insert_remove(psset_t *psset, psset_bin_stats_t *binstats, + hpdata_t *ps, bool insert) { + size_t mul = insert ? (size_t)1 : (size_t)-1; + size_t huge_idx = (size_t)hpdata_huge_get(ps); + + binstats[huge_idx].npageslabs += mul * 1; + binstats[huge_idx].nactive += mul * hpdata_nactive_get(ps); + binstats[huge_idx].ndirty += mul * hpdata_ndirty_get(ps); + + psset->merged_stats.npageslabs += mul * 1; + psset->merged_stats.nactive += mul * hpdata_nactive_get(ps); + psset->merged_stats.ndirty += mul * hpdata_ndirty_get(ps); + + if (config_debug) { + psset_bin_stats_t check_stats = {0}; + for (size_t huge = 0; huge <= 1; huge++) { + psset_bin_stats_accum(&check_stats, + &psset->stats.full_slabs[huge]); + psset_bin_stats_accum(&check_stats, + &psset->stats.empty_slabs[huge]); + for (pszind_t pind = 0; pind < PSSET_NPSIZES; pind++) { + psset_bin_stats_accum(&check_stats, + &psset->stats.nonfull_slabs[pind][huge]); + } + } + assert(psset->merged_stats.npageslabs + == check_stats.npageslabs); + assert(psset->merged_stats.nactive == check_stats.nactive); + assert(psset->merged_stats.ndirty == check_stats.ndirty); + } +} + +static void +psset_bin_stats_insert(psset_t *psset, psset_bin_stats_t *binstats, + hpdata_t *ps) { + psset_bin_stats_insert_remove(psset, binstats, ps, true); +} + +static void +psset_bin_stats_remove(psset_t *psset, psset_bin_stats_t *binstats, + hpdata_t *ps) { + psset_bin_stats_insert_remove(psset, binstats, ps, false); +} + +static void +psset_hpdata_heap_remove(psset_t *psset, pszind_t pind, hpdata_t *ps) { + hpdata_age_heap_remove(&psset->pageslabs[pind], ps); + if (hpdata_age_heap_empty(&psset->pageslabs[pind])) { + fb_unset(psset->pageslab_bitmap, PSSET_NPSIZES, (size_t)pind); + } +} + +static void +psset_hpdata_heap_insert(psset_t *psset, pszind_t pind, hpdata_t *ps) { + if (hpdata_age_heap_empty(&psset->pageslabs[pind])) { + fb_set(psset->pageslab_bitmap, PSSET_NPSIZES, (size_t)pind); + } + hpdata_age_heap_insert(&psset->pageslabs[pind], ps); +} + +static void +psset_stats_insert(psset_t* psset, hpdata_t *ps) { + if (hpdata_empty(ps)) { + psset_bin_stats_insert(psset, psset->stats.empty_slabs, ps); + } else if (hpdata_full(ps)) { + psset_bin_stats_insert(psset, psset->stats.full_slabs, ps); + } else { + size_t longest_free_range = hpdata_longest_free_range_get(ps); + + pszind_t pind = sz_psz2ind(sz_psz_quantize_floor( + longest_free_range << LG_PAGE)); + assert(pind < PSSET_NPSIZES); + + psset_bin_stats_insert(psset, psset->stats.nonfull_slabs[pind], + ps); + } +} + +static void +psset_stats_remove(psset_t *psset, hpdata_t *ps) { + if (hpdata_empty(ps)) { + psset_bin_stats_remove(psset, psset->stats.empty_slabs, ps); + } else if (hpdata_full(ps)) { + psset_bin_stats_remove(psset, psset->stats.full_slabs, ps); + } else { + size_t longest_free_range = hpdata_longest_free_range_get(ps); + + pszind_t pind = sz_psz2ind(sz_psz_quantize_floor( + longest_free_range << LG_PAGE)); + assert(pind < PSSET_NPSIZES); + + psset_bin_stats_remove(psset, psset->stats.nonfull_slabs[pind], + ps); + } +} + +/* + * Put ps into some container so that it can be found during future allocation + * requests. + */ +static void +psset_alloc_container_insert(psset_t *psset, hpdata_t *ps) { + assert(!hpdata_in_psset_alloc_container_get(ps)); + hpdata_in_psset_alloc_container_set(ps, true); + if (hpdata_empty(ps)) { + /* + * This prepend, paired with popping the head in psset_fit, + * means we implement LIFO ordering for the empty slabs set, + * which seems reasonable. + */ + hpdata_empty_list_prepend(&psset->empty, ps); + } else if (hpdata_full(ps)) { + /* + * We don't need to keep track of the full slabs; we're never + * going to return them from a psset_pick_alloc call. + */ + } else { + size_t longest_free_range = hpdata_longest_free_range_get(ps); + + pszind_t pind = sz_psz2ind(sz_psz_quantize_floor( + longest_free_range << LG_PAGE)); + assert(pind < PSSET_NPSIZES); + + psset_hpdata_heap_insert(psset, pind, ps); + } +} + +/* Remove ps from those collections. */ +static void +psset_alloc_container_remove(psset_t *psset, hpdata_t *ps) { + assert(hpdata_in_psset_alloc_container_get(ps)); + hpdata_in_psset_alloc_container_set(ps, false); + + if (hpdata_empty(ps)) { + hpdata_empty_list_remove(&psset->empty, ps); + } else if (hpdata_full(ps)) { + /* Same as above -- do nothing in this case. */ + } else { + size_t longest_free_range = hpdata_longest_free_range_get(ps); + + pszind_t pind = sz_psz2ind(sz_psz_quantize_floor( + longest_free_range << LG_PAGE)); + assert(pind < PSSET_NPSIZES); + + psset_hpdata_heap_remove(psset, pind, ps); + } +} + +static size_t +psset_purge_list_ind(hpdata_t *ps) { + size_t ndirty = hpdata_ndirty_get(ps); + /* Shouldn't have something with no dirty pages purgeable. */ + assert(ndirty > 0); + /* + * Higher indices correspond to lists we'd like to purge earlier; make + * the two highest indices correspond to empty lists, which we attempt + * to purge before purging any non-empty list. This has two advantages: + * - Empty page slabs are the least likely to get reused (we'll only + * pick them for an allocation if we have no other choice). + * - Empty page slabs can purge every dirty page they contain in a + * single call, which is not usually the case. + * + * We purge hugeified empty slabs before nonhugeified ones, on the basis + * that they are fully dirty, while nonhugified slabs might not be, so + * we free up more pages more easily. + */ + if (hpdata_nactive_get(ps) == 0) { + if (hpdata_huge_get(ps)) { + return PSSET_NPURGE_LISTS - 1; + } else { + return PSSET_NPURGE_LISTS - 2; + } + } + + pszind_t pind = sz_psz2ind(sz_psz_quantize_floor(ndirty << LG_PAGE)); + /* + * For non-empty slabs, we may reuse them again. Prefer purging + * non-hugeified slabs before hugeified ones then, among pages of + * similar dirtiness. We still get some benefit from the hugification. + */ + return (size_t)pind * 2 + (hpdata_huge_get(ps) ? 0 : 1); +} + +static void +psset_maybe_remove_purge_list(psset_t *psset, hpdata_t *ps) { + /* + * Remove the hpdata from its purge list (if it's in one). Even if it's + * going to stay in the same one, by appending it during + * psset_update_end, we move it to the end of its queue, so that we + * purge LRU within a given dirtiness bucket. + */ + if (hpdata_purge_allowed_get(ps)) { + size_t ind = psset_purge_list_ind(ps); + hpdata_purge_list_t *purge_list = &psset->to_purge[ind]; + hpdata_purge_list_remove(purge_list, ps); + if (hpdata_purge_list_empty(purge_list)) { + fb_unset(psset->purge_bitmap, PSSET_NPURGE_LISTS, ind); + } + } +} + +static void +psset_maybe_insert_purge_list(psset_t *psset, hpdata_t *ps) { + if (hpdata_purge_allowed_get(ps)) { + size_t ind = psset_purge_list_ind(ps); + hpdata_purge_list_t *purge_list = &psset->to_purge[ind]; + if (hpdata_purge_list_empty(purge_list)) { + fb_set(psset->purge_bitmap, PSSET_NPURGE_LISTS, ind); + } + hpdata_purge_list_append(purge_list, ps); + } + +} + +void +psset_update_begin(psset_t *psset, hpdata_t *ps) { + hpdata_assert_consistent(ps); + assert(hpdata_in_psset_get(ps)); + hpdata_updating_set(ps, true); + psset_stats_remove(psset, ps); + if (hpdata_in_psset_alloc_container_get(ps)) { + /* + * Some metadata updates can break alloc container invariants + * (e.g. the longest free range determines the hpdata_heap_t the + * pageslab lives in). + */ + assert(hpdata_alloc_allowed_get(ps)); + psset_alloc_container_remove(psset, ps); + } + psset_maybe_remove_purge_list(psset, ps); + /* + * We don't update presence in the hugify list; we try to keep it FIFO, + * even in the presence of other metadata updates. We'll update + * presence at the end of the metadata update if necessary. + */ +} + +void +psset_update_end(psset_t *psset, hpdata_t *ps) { + assert(hpdata_in_psset_get(ps)); + hpdata_updating_set(ps, false); + psset_stats_insert(psset, ps); + + /* + * The update begin should have removed ps from whatever alloc container + * it was in. + */ + assert(!hpdata_in_psset_alloc_container_get(ps)); + if (hpdata_alloc_allowed_get(ps)) { + psset_alloc_container_insert(psset, ps); + } + psset_maybe_insert_purge_list(psset, ps); + + if (hpdata_hugify_allowed_get(ps) + && !hpdata_in_psset_hugify_container_get(ps)) { + hpdata_in_psset_hugify_container_set(ps, true); + hpdata_hugify_list_append(&psset->to_hugify, ps); + } else if (!hpdata_hugify_allowed_get(ps) + && hpdata_in_psset_hugify_container_get(ps)) { + hpdata_in_psset_hugify_container_set(ps, false); + hpdata_hugify_list_remove(&psset->to_hugify, ps); + } + hpdata_assert_consistent(ps); +} + +hpdata_t * +psset_pick_alloc(psset_t *psset, size_t size) { + assert((size & PAGE_MASK) == 0); + assert(size <= HUGEPAGE); + + pszind_t min_pind = sz_psz2ind(sz_psz_quantize_ceil(size)); + pszind_t pind = (pszind_t)fb_ffs(psset->pageslab_bitmap, PSSET_NPSIZES, + (size_t)min_pind); + if (pind == PSSET_NPSIZES) { + return hpdata_empty_list_first(&psset->empty); + } + hpdata_t *ps = hpdata_age_heap_first(&psset->pageslabs[pind]); + if (ps == NULL) { + return NULL; + } + + hpdata_assert_consistent(ps); + + return ps; +} + +hpdata_t * +psset_pick_purge(psset_t *psset) { + ssize_t ind_ssz = fb_fls(psset->purge_bitmap, PSSET_NPURGE_LISTS, + PSSET_NPURGE_LISTS - 1); + if (ind_ssz < 0) { + return NULL; + } + pszind_t ind = (pszind_t)ind_ssz; + assert(ind < PSSET_NPURGE_LISTS); + hpdata_t *ps = hpdata_purge_list_first(&psset->to_purge[ind]); + assert(ps != NULL); + return ps; +} + +hpdata_t * +psset_pick_hugify(psset_t *psset) { + return hpdata_hugify_list_first(&psset->to_hugify); +} + +void +psset_insert(psset_t *psset, hpdata_t *ps) { + hpdata_in_psset_set(ps, true); + + psset_stats_insert(psset, ps); + if (hpdata_alloc_allowed_get(ps)) { + psset_alloc_container_insert(psset, ps); + } + psset_maybe_insert_purge_list(psset, ps); + + if (hpdata_hugify_allowed_get(ps)) { + hpdata_in_psset_hugify_container_set(ps, true); + hpdata_hugify_list_append(&psset->to_hugify, ps); + } +} + +void +psset_remove(psset_t *psset, hpdata_t *ps) { + hpdata_in_psset_set(ps, false); + + psset_stats_remove(psset, ps); + if (hpdata_in_psset_alloc_container_get(ps)) { + psset_alloc_container_remove(psset, ps); + } + psset_maybe_remove_purge_list(psset, ps); + if (hpdata_in_psset_hugify_container_get(ps)) { + hpdata_in_psset_hugify_container_set(ps, false); + hpdata_hugify_list_remove(&psset->to_hugify, ps); + } +} diff --git a/contrib/jemalloc/src/rtree.c b/contrib/jemalloc/src/rtree.c index 4ae41fe2fec93b..6496b5afdc4a0d 100644 --- a/contrib/jemalloc/src/rtree.c +++ b/contrib/jemalloc/src/rtree.c @@ -1,4 +1,3 @@ -#define JEMALLOC_RTREE_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" @@ -10,7 +9,7 @@ * used. */ bool -rtree_new(rtree_t *rtree, bool zeroed) { +rtree_new(rtree_t *rtree, base_t *base, bool zeroed) { #ifdef JEMALLOC_JET if (!zeroed) { memset(rtree, 0, sizeof(rtree_t)); /* Clear root. */ @@ -18,6 +17,7 @@ rtree_new(rtree_t *rtree, bool zeroed) { #else assert(zeroed); #endif + rtree->base = base; if (malloc_mutex_init(&rtree->init_lock, "rtree", WITNESS_RANK_RTREE, malloc_mutex_rank_exclusive)) { @@ -28,75 +28,16 @@ rtree_new(rtree_t *rtree, bool zeroed) { } static rtree_node_elm_t * -rtree_node_alloc_impl(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) { - return (rtree_node_elm_t *)base_alloc(tsdn, b0get(), nelms * - sizeof(rtree_node_elm_t), CACHELINE); +rtree_node_alloc(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) { + return (rtree_node_elm_t *)base_alloc(tsdn, rtree->base, + nelms * sizeof(rtree_node_elm_t), CACHELINE); } -rtree_node_alloc_t *JET_MUTABLE rtree_node_alloc = rtree_node_alloc_impl; - -static void -rtree_node_dalloc_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *node) { - /* Nodes are never deleted during normal operation. */ - not_reached(); -} -rtree_node_dalloc_t *JET_MUTABLE rtree_node_dalloc = - rtree_node_dalloc_impl; static rtree_leaf_elm_t * -rtree_leaf_alloc_impl(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) { - return (rtree_leaf_elm_t *)base_alloc(tsdn, b0get(), nelms * - sizeof(rtree_leaf_elm_t), CACHELINE); -} -rtree_leaf_alloc_t *JET_MUTABLE rtree_leaf_alloc = rtree_leaf_alloc_impl; - -static void -rtree_leaf_dalloc_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *leaf) { - /* Leaves are never deleted during normal operation. */ - not_reached(); +rtree_leaf_alloc(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) { + return (rtree_leaf_elm_t *)base_alloc(tsdn, rtree->base, + nelms * sizeof(rtree_leaf_elm_t), CACHELINE); } -rtree_leaf_dalloc_t *JET_MUTABLE rtree_leaf_dalloc = - rtree_leaf_dalloc_impl; - -#ifdef JEMALLOC_JET -# if RTREE_HEIGHT > 1 -static void -rtree_delete_subtree(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *subtree, - unsigned level) { - size_t nchildren = ZU(1) << rtree_levels[level].bits; - if (level + 2 < RTREE_HEIGHT) { - for (size_t i = 0; i < nchildren; i++) { - rtree_node_elm_t *node = - (rtree_node_elm_t *)atomic_load_p(&subtree[i].child, - ATOMIC_RELAXED); - if (node != NULL) { - rtree_delete_subtree(tsdn, rtree, node, level + - 1); - } - } - } else { - for (size_t i = 0; i < nchildren; i++) { - rtree_leaf_elm_t *leaf = - (rtree_leaf_elm_t *)atomic_load_p(&subtree[i].child, - ATOMIC_RELAXED); - if (leaf != NULL) { - rtree_leaf_dalloc(tsdn, rtree, leaf); - } - } - } - - if (subtree != rtree->root) { - rtree_node_dalloc(tsdn, rtree, subtree); - } -} -# endif - -void -rtree_delete(tsdn_t *tsdn, rtree_t *rtree) { -# if RTREE_HEIGHT > 1 - rtree_delete_subtree(tsdn, rtree, rtree->root, 0); -# endif -} -#endif static rtree_node_elm_t * rtree_node_init(tsdn_t *tsdn, rtree_t *rtree, unsigned level, diff --git a/contrib/jemalloc/src/safety_check.c b/contrib/jemalloc/src/safety_check.c index 804155dcfc6a87..209fdda92b5cd4 100644 --- a/contrib/jemalloc/src/safety_check.c +++ b/contrib/jemalloc/src/safety_check.c @@ -1,9 +1,21 @@ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" -static void (*safety_check_abort)(const char *message); +static safety_check_abort_hook_t safety_check_abort; -void safety_check_set_abort(void (*abort_fn)(const char *)) { +void safety_check_fail_sized_dealloc(bool current_dealloc, const void *ptr, + size_t true_size, size_t input_size) { + char *src = current_dealloc ? "the current pointer being freed" : + "in thread cache, possibly from previous deallocations"; + + safety_check_fail(": size mismatch detected (true size %zu " + "vs input size %zu), likely caused by application sized " + "deallocation bugs (source address: %p, %s). Suggest building with " + "--enable-debug or address sanitizer for debugging. Abort.\n", + true_size, input_size, ptr, src); +} + +void safety_check_set_abort(safety_check_abort_hook_t abort_fn) { safety_check_abort = abort_fn; } diff --git a/contrib/jemalloc/src/san.c b/contrib/jemalloc/src/san.c new file mode 100644 index 00000000000000..6e51291135c75d --- /dev/null +++ b/contrib/jemalloc/src/san.c @@ -0,0 +1,208 @@ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/ehooks.h" +#include "jemalloc/internal/san.h" +#include "jemalloc/internal/tsd.h" + +/* The sanitizer options. */ +size_t opt_san_guard_large = SAN_GUARD_LARGE_EVERY_N_EXTENTS_DEFAULT; +size_t opt_san_guard_small = SAN_GUARD_SMALL_EVERY_N_EXTENTS_DEFAULT; + +/* Aligned (-1 is off) ptrs will be junked & stashed on dealloc. */ +ssize_t opt_lg_san_uaf_align = SAN_LG_UAF_ALIGN_DEFAULT; + +/* + * Initialized in san_init(). When disabled, the mask is set to (uintptr_t)-1 + * to always fail the nonfast_align check. + */ +uintptr_t san_cache_bin_nonfast_mask = SAN_CACHE_BIN_NONFAST_MASK_DEFAULT; + +static inline void +san_find_guarded_addr(edata_t *edata, uintptr_t *guard1, uintptr_t *guard2, + uintptr_t *addr, size_t size, bool left, bool right) { + assert(!edata_guarded_get(edata)); + assert(size % PAGE == 0); + *addr = (uintptr_t)edata_base_get(edata); + if (left) { + *guard1 = *addr; + *addr += SAN_PAGE_GUARD; + } else { + *guard1 = 0; + } + + if (right) { + *guard2 = *addr + size; + } else { + *guard2 = 0; + } +} + +static inline void +san_find_unguarded_addr(edata_t *edata, uintptr_t *guard1, uintptr_t *guard2, + uintptr_t *addr, size_t size, bool left, bool right) { + assert(edata_guarded_get(edata)); + assert(size % PAGE == 0); + *addr = (uintptr_t)edata_base_get(edata); + if (right) { + *guard2 = *addr + size; + } else { + *guard2 = 0; + } + + if (left) { + *guard1 = *addr - SAN_PAGE_GUARD; + assert(*guard1 != 0); + *addr = *guard1; + } else { + *guard1 = 0; + } +} + +void +san_guard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, emap_t *emap, + bool left, bool right, bool remap) { + assert(left || right); + if (remap) { + emap_deregister_boundary(tsdn, emap, edata); + } + + size_t size_with_guards = edata_size_get(edata); + size_t usize = (left && right) + ? san_two_side_unguarded_sz(size_with_guards) + : san_one_side_unguarded_sz(size_with_guards); + + uintptr_t guard1, guard2, addr; + san_find_guarded_addr(edata, &guard1, &guard2, &addr, usize, left, + right); + + assert(edata_state_get(edata) == extent_state_active); + ehooks_guard(tsdn, ehooks, (void *)guard1, (void *)guard2); + + /* Update the guarded addr and usable size of the edata. */ + edata_size_set(edata, usize); + edata_addr_set(edata, (void *)addr); + edata_guarded_set(edata, true); + + if (remap) { + emap_register_boundary(tsdn, emap, edata, SC_NSIZES, + /* slab */ false); + } +} + +static void +san_unguard_pages_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, + emap_t *emap, bool left, bool right, bool remap) { + assert(left || right); + /* Remove the inner boundary which no longer exists. */ + if (remap) { + assert(edata_state_get(edata) == extent_state_active); + emap_deregister_boundary(tsdn, emap, edata); + } else { + assert(edata_state_get(edata) == extent_state_retained); + } + + size_t size = edata_size_get(edata); + size_t size_with_guards = (left && right) + ? san_two_side_guarded_sz(size) + : san_one_side_guarded_sz(size); + + uintptr_t guard1, guard2, addr; + san_find_unguarded_addr(edata, &guard1, &guard2, &addr, size, left, + right); + + ehooks_unguard(tsdn, ehooks, (void *)guard1, (void *)guard2); + + /* Update the true addr and usable size of the edata. */ + edata_size_set(edata, size_with_guards); + edata_addr_set(edata, (void *)addr); + edata_guarded_set(edata, false); + + /* + * Then re-register the outer boundary including the guards, if + * requested. + */ + if (remap) { + emap_register_boundary(tsdn, emap, edata, SC_NSIZES, + /* slab */ false); + } +} + +void +san_unguard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, + emap_t *emap, bool left, bool right) { + san_unguard_pages_impl(tsdn, ehooks, edata, emap, left, right, + /* remap */ true); +} + +void +san_unguard_pages_pre_destroy(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, + emap_t *emap) { + emap_assert_not_mapped(tsdn, emap, edata); + /* + * We don't want to touch the emap of about to be destroyed extents, as + * they have been unmapped upon eviction from the retained ecache. Also, + * we unguard the extents to the right, because retained extents only + * own their right guard page per san_bump_alloc's logic. + */ + san_unguard_pages_impl(tsdn, ehooks, edata, emap, /* left */ false, + /* right */ true, /* remap */ false); +} + +static bool +san_stashed_corrupted(void *ptr, size_t size) { + if (san_junk_ptr_should_slow()) { + for (size_t i = 0; i < size; i++) { + if (((char *)ptr)[i] != (char)uaf_detect_junk) { + return true; + } + } + return false; + } + + void *first, *mid, *last; + san_junk_ptr_locations(ptr, size, &first, &mid, &last); + if (*(uintptr_t *)first != uaf_detect_junk || + *(uintptr_t *)mid != uaf_detect_junk || + *(uintptr_t *)last != uaf_detect_junk) { + return true; + } + + return false; +} + +void +san_check_stashed_ptrs(void **ptrs, size_t nstashed, size_t usize) { + /* + * Verify that the junked-filled & stashed pointers remain unchanged, to + * detect write-after-free. + */ + for (size_t n = 0; n < nstashed; n++) { + void *stashed = ptrs[n]; + assert(stashed != NULL); + assert(cache_bin_nonfast_aligned(stashed)); + if (unlikely(san_stashed_corrupted(stashed, usize))) { + safety_check_fail(": Write-after-free " + "detected on deallocated pointer %p (size %zu).\n", + stashed, usize); + } + } +} + +void +tsd_san_init(tsd_t *tsd) { + *tsd_san_extents_until_guard_smallp_get(tsd) = opt_san_guard_small; + *tsd_san_extents_until_guard_largep_get(tsd) = opt_san_guard_large; +} + +void +san_init(ssize_t lg_san_uaf_align) { + assert(lg_san_uaf_align == -1 || lg_san_uaf_align >= LG_PAGE); + if (lg_san_uaf_align == -1) { + san_cache_bin_nonfast_mask = (uintptr_t)-1; + return; + } + + san_cache_bin_nonfast_mask = ((uintptr_t)1 << lg_san_uaf_align) - 1; +} diff --git a/contrib/jemalloc/src/san_bump.c b/contrib/jemalloc/src/san_bump.c new file mode 100644 index 00000000000000..888974555f2859 --- /dev/null +++ b/contrib/jemalloc/src/san_bump.c @@ -0,0 +1,104 @@ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/san_bump.h" +#include "jemalloc/internal/pac.h" +#include "jemalloc/internal/san.h" +#include "jemalloc/internal/ehooks.h" +#include "jemalloc/internal/edata_cache.h" + +static bool +san_bump_grow_locked(tsdn_t *tsdn, san_bump_alloc_t *sba, pac_t *pac, + ehooks_t *ehooks, size_t size); + +edata_t * +san_bump_alloc(tsdn_t *tsdn, san_bump_alloc_t* sba, pac_t *pac, + ehooks_t *ehooks, size_t size, bool zero) { + assert(san_bump_enabled()); + + edata_t* to_destroy; + size_t guarded_size = san_one_side_guarded_sz(size); + + malloc_mutex_lock(tsdn, &sba->mtx); + + if (sba->curr_reg == NULL || + edata_size_get(sba->curr_reg) < guarded_size) { + /* + * If the current region can't accommodate the allocation, + * try replacing it with a larger one and destroy current if the + * replacement succeeds. + */ + to_destroy = sba->curr_reg; + bool err = san_bump_grow_locked(tsdn, sba, pac, ehooks, + guarded_size); + if (err) { + goto label_err; + } + } else { + to_destroy = NULL; + } + assert(guarded_size <= edata_size_get(sba->curr_reg)); + size_t trail_size = edata_size_get(sba->curr_reg) - guarded_size; + + edata_t* edata; + if (trail_size != 0) { + edata_t* curr_reg_trail = extent_split_wrapper(tsdn, pac, + ehooks, sba->curr_reg, guarded_size, trail_size, + /* holding_core_locks */ true); + if (curr_reg_trail == NULL) { + goto label_err; + } + edata = sba->curr_reg; + sba->curr_reg = curr_reg_trail; + } else { + edata = sba->curr_reg; + sba->curr_reg = NULL; + } + + malloc_mutex_unlock(tsdn, &sba->mtx); + + assert(!edata_guarded_get(edata)); + assert(sba->curr_reg == NULL || !edata_guarded_get(sba->curr_reg)); + assert(to_destroy == NULL || !edata_guarded_get(to_destroy)); + + if (to_destroy != NULL) { + extent_destroy_wrapper(tsdn, pac, ehooks, to_destroy); + } + + san_guard_pages(tsdn, ehooks, edata, pac->emap, /* left */ false, + /* right */ true, /* remap */ true); + + if (extent_commit_zero(tsdn, ehooks, edata, /* commit */ true, zero, + /* growing_retained */ false)) { + extent_record(tsdn, pac, ehooks, &pac->ecache_retained, + edata); + return NULL; + } + + if (config_prof) { + extent_gdump_add(tsdn, edata); + } + + return edata; +label_err: + malloc_mutex_unlock(tsdn, &sba->mtx); + return NULL; +} + +static bool +san_bump_grow_locked(tsdn_t *tsdn, san_bump_alloc_t *sba, pac_t *pac, + ehooks_t *ehooks, size_t size) { + malloc_mutex_assert_owner(tsdn, &sba->mtx); + + bool committed = false, zeroed = false; + size_t alloc_size = size > SBA_RETAINED_ALLOC_SIZE ? size : + SBA_RETAINED_ALLOC_SIZE; + assert((alloc_size & PAGE_MASK) == 0); + sba->curr_reg = extent_alloc_wrapper(tsdn, pac, ehooks, NULL, + alloc_size, PAGE, zeroed, &committed, + /* growing_retained */ true); + if (sba->curr_reg == NULL) { + return true; + } + return false; +} diff --git a/contrib/jemalloc/src/sc.c b/contrib/jemalloc/src/sc.c index 89ddb6ba6a9129..e4a94d89f24546 100644 --- a/contrib/jemalloc/src/sc.c +++ b/contrib/jemalloc/src/sc.c @@ -13,9 +13,7 @@ * at least the damage is compartmentalized to this file. */ -sc_data_t sc_data_global; - -static size_t +size_t reg_size_compute(int lg_base, int lg_delta, int ndelta) { return (ZU(1) << lg_base) + (ZU(ndelta) << lg_delta); } @@ -64,9 +62,8 @@ size_class( sc->lg_base = lg_base; sc->lg_delta = lg_delta; sc->ndelta = ndelta; - sc->psz = (reg_size_compute(lg_base, lg_delta, ndelta) - % (ZU(1) << lg_page) == 0); - size_t size = (ZU(1) << lg_base) + (ZU(ndelta) << lg_delta); + size_t size = reg_size_compute(lg_base, lg_delta, ndelta); + sc->psz = (size % (ZU(1) << lg_page) == 0); if (index == 0) { assert(!sc->psz); } @@ -245,7 +242,7 @@ size_classes( assert(sc_data->lg_large_minclass == SC_LG_LARGE_MINCLASS); assert(sc_data->large_maxclass == SC_LARGE_MAXCLASS); - /* + /* * In the allocation fastpath, we want to assume that we can * unconditionally subtract the requested allocation size from * a ssize_t, and detect passing through 0 correctly. This @@ -257,12 +254,8 @@ size_classes( void sc_data_init(sc_data_t *sc_data) { - assert(!sc_data->initialized); - - int lg_max_lookup = 12; - size_classes(sc_data, LG_SIZEOF_PTR, LG_QUANTUM, SC_LG_TINY_MIN, - lg_max_lookup, LG_PAGE, 2); + SC_LG_MAX_LOOKUP, LG_PAGE, SC_LG_NGROUP); sc_data->initialized = true; } diff --git a/contrib/jemalloc/src/sec.c b/contrib/jemalloc/src/sec.c new file mode 100644 index 00000000000000..df6755904951ec --- /dev/null +++ b/contrib/jemalloc/src/sec.c @@ -0,0 +1,422 @@ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/sec.h" + +static edata_t *sec_alloc(tsdn_t *tsdn, pai_t *self, size_t size, + size_t alignment, bool zero, bool guarded, bool frequent_reuse, + bool *deferred_work_generated); +static bool sec_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata, + size_t old_size, size_t new_size, bool zero, bool *deferred_work_generated); +static bool sec_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata, + size_t old_size, size_t new_size, bool *deferred_work_generated); +static void sec_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata, + bool *deferred_work_generated); + +static void +sec_bin_init(sec_bin_t *bin) { + bin->being_batch_filled = false; + bin->bytes_cur = 0; + edata_list_active_init(&bin->freelist); +} + +bool +sec_init(tsdn_t *tsdn, sec_t *sec, base_t *base, pai_t *fallback, + const sec_opts_t *opts) { + assert(opts->max_alloc >= PAGE); + + size_t max_alloc = PAGE_FLOOR(opts->max_alloc); + pszind_t npsizes = sz_psz2ind(max_alloc) + 1; + + size_t sz_shards = opts->nshards * sizeof(sec_shard_t); + size_t sz_bins = opts->nshards * (size_t)npsizes * sizeof(sec_bin_t); + size_t sz_alloc = sz_shards + sz_bins; + void *dynalloc = base_alloc(tsdn, base, sz_alloc, CACHELINE); + if (dynalloc == NULL) { + return true; + } + sec_shard_t *shard_cur = (sec_shard_t *)dynalloc; + sec->shards = shard_cur; + sec_bin_t *bin_cur = (sec_bin_t *)&shard_cur[opts->nshards]; + /* Just for asserts, below. */ + sec_bin_t *bin_start = bin_cur; + + for (size_t i = 0; i < opts->nshards; i++) { + sec_shard_t *shard = shard_cur; + shard_cur++; + bool err = malloc_mutex_init(&shard->mtx, "sec_shard", + WITNESS_RANK_SEC_SHARD, malloc_mutex_rank_exclusive); + if (err) { + return true; + } + shard->enabled = true; + shard->bins = bin_cur; + for (pszind_t j = 0; j < npsizes; j++) { + sec_bin_init(&shard->bins[j]); + bin_cur++; + } + shard->bytes_cur = 0; + shard->to_flush_next = 0; + } + /* + * Should have exactly matched the bin_start to the first unused byte + * after the shards. + */ + assert((void *)shard_cur == (void *)bin_start); + /* And the last bin to use up the last bytes of the allocation. */ + assert((char *)bin_cur == ((char *)dynalloc + sz_alloc)); + sec->fallback = fallback; + + + sec->opts = *opts; + sec->npsizes = npsizes; + + /* + * Initialize these last so that an improper use of an SEC whose + * initialization failed will segfault in an easy-to-spot way. + */ + sec->pai.alloc = &sec_alloc; + sec->pai.alloc_batch = &pai_alloc_batch_default; + sec->pai.expand = &sec_expand; + sec->pai.shrink = &sec_shrink; + sec->pai.dalloc = &sec_dalloc; + sec->pai.dalloc_batch = &pai_dalloc_batch_default; + + return false; +} + +static sec_shard_t * +sec_shard_pick(tsdn_t *tsdn, sec_t *sec) { + /* + * Eventually, we should implement affinity, tracking source shard using + * the edata_t's newly freed up fields. For now, just randomly + * distribute across all shards. + */ + if (tsdn_null(tsdn)) { + return &sec->shards[0]; + } + tsd_t *tsd = tsdn_tsd(tsdn); + uint8_t *idxp = tsd_sec_shardp_get(tsd); + if (*idxp == (uint8_t)-1) { + /* + * First use; initialize using the trick from Daniel Lemire's + * "A fast alternative to the modulo reduction. Use a 64 bit + * number to store 32 bits, since we'll deliberately overflow + * when we multiply by the number of shards. + */ + uint64_t rand32 = prng_lg_range_u64(tsd_prng_statep_get(tsd), 32); + uint32_t idx = + (uint32_t)((rand32 * (uint64_t)sec->opts.nshards) >> 32); + assert(idx < (uint32_t)sec->opts.nshards); + *idxp = (uint8_t)idx; + } + return &sec->shards[*idxp]; +} + +/* + * Perhaps surprisingly, this can be called on the alloc pathways; if we hit an + * empty cache, we'll try to fill it, which can push the shard over it's limit. + */ +static void +sec_flush_some_and_unlock(tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard) { + malloc_mutex_assert_owner(tsdn, &shard->mtx); + edata_list_active_t to_flush; + edata_list_active_init(&to_flush); + while (shard->bytes_cur > sec->opts.bytes_after_flush) { + /* Pick a victim. */ + sec_bin_t *bin = &shard->bins[shard->to_flush_next]; + + /* Update our victim-picking state. */ + shard->to_flush_next++; + if (shard->to_flush_next == sec->npsizes) { + shard->to_flush_next = 0; + } + + assert(shard->bytes_cur >= bin->bytes_cur); + if (bin->bytes_cur != 0) { + shard->bytes_cur -= bin->bytes_cur; + bin->bytes_cur = 0; + edata_list_active_concat(&to_flush, &bin->freelist); + } + /* + * Either bin->bytes_cur was 0, in which case we didn't touch + * the bin list but it should be empty anyways (or else we + * missed a bytes_cur update on a list modification), or it + * *was* 0 and we emptied it ourselves. Either way, it should + * be empty now. + */ + assert(edata_list_active_empty(&bin->freelist)); + } + + malloc_mutex_unlock(tsdn, &shard->mtx); + bool deferred_work_generated = false; + pai_dalloc_batch(tsdn, sec->fallback, &to_flush, + &deferred_work_generated); +} + +static edata_t * +sec_shard_alloc_locked(tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard, + sec_bin_t *bin) { + malloc_mutex_assert_owner(tsdn, &shard->mtx); + if (!shard->enabled) { + return NULL; + } + edata_t *edata = edata_list_active_first(&bin->freelist); + if (edata != NULL) { + edata_list_active_remove(&bin->freelist, edata); + assert(edata_size_get(edata) <= bin->bytes_cur); + bin->bytes_cur -= edata_size_get(edata); + assert(edata_size_get(edata) <= shard->bytes_cur); + shard->bytes_cur -= edata_size_get(edata); + } + return edata; +} + +static edata_t * +sec_batch_fill_and_alloc(tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard, + sec_bin_t *bin, size_t size) { + malloc_mutex_assert_not_owner(tsdn, &shard->mtx); + + edata_list_active_t result; + edata_list_active_init(&result); + bool deferred_work_generated = false; + size_t nalloc = pai_alloc_batch(tsdn, sec->fallback, size, + 1 + sec->opts.batch_fill_extra, &result, &deferred_work_generated); + + edata_t *ret = edata_list_active_first(&result); + if (ret != NULL) { + edata_list_active_remove(&result, ret); + } + + malloc_mutex_lock(tsdn, &shard->mtx); + bin->being_batch_filled = false; + /* + * Handle the easy case first: nothing to cache. Note that this can + * only happen in case of OOM, since sec_alloc checks the expected + * number of allocs, and doesn't bother going down the batch_fill + * pathway if there won't be anything left to cache. So to be in this + * code path, we must have asked for > 1 alloc, but only gotten 1 back. + */ + if (nalloc <= 1) { + malloc_mutex_unlock(tsdn, &shard->mtx); + return ret; + } + + size_t new_cached_bytes = (nalloc - 1) * size; + + edata_list_active_concat(&bin->freelist, &result); + bin->bytes_cur += new_cached_bytes; + shard->bytes_cur += new_cached_bytes; + + if (shard->bytes_cur > sec->opts.max_bytes) { + sec_flush_some_and_unlock(tsdn, sec, shard); + } else { + malloc_mutex_unlock(tsdn, &shard->mtx); + } + + return ret; +} + +static edata_t * +sec_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero, + bool guarded, bool frequent_reuse, bool *deferred_work_generated) { + assert((size & PAGE_MASK) == 0); + assert(!guarded); + + sec_t *sec = (sec_t *)self; + + if (zero || alignment > PAGE || sec->opts.nshards == 0 + || size > sec->opts.max_alloc) { + return pai_alloc(tsdn, sec->fallback, size, alignment, zero, + /* guarded */ false, frequent_reuse, + deferred_work_generated); + } + pszind_t pszind = sz_psz2ind(size); + assert(pszind < sec->npsizes); + + sec_shard_t *shard = sec_shard_pick(tsdn, sec); + sec_bin_t *bin = &shard->bins[pszind]; + bool do_batch_fill = false; + + malloc_mutex_lock(tsdn, &shard->mtx); + edata_t *edata = sec_shard_alloc_locked(tsdn, sec, shard, bin); + if (edata == NULL) { + if (!bin->being_batch_filled + && sec->opts.batch_fill_extra > 0) { + bin->being_batch_filled = true; + do_batch_fill = true; + } + } + malloc_mutex_unlock(tsdn, &shard->mtx); + if (edata == NULL) { + if (do_batch_fill) { + edata = sec_batch_fill_and_alloc(tsdn, sec, shard, bin, + size); + } else { + edata = pai_alloc(tsdn, sec->fallback, size, alignment, + zero, /* guarded */ false, frequent_reuse, + deferred_work_generated); + } + } + return edata; +} + +static bool +sec_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size, + size_t new_size, bool zero, bool *deferred_work_generated) { + sec_t *sec = (sec_t *)self; + return pai_expand(tsdn, sec->fallback, edata, old_size, new_size, zero, + deferred_work_generated); +} + +static bool +sec_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size, + size_t new_size, bool *deferred_work_generated) { + sec_t *sec = (sec_t *)self; + return pai_shrink(tsdn, sec->fallback, edata, old_size, new_size, + deferred_work_generated); +} + +static void +sec_flush_all_locked(tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard) { + malloc_mutex_assert_owner(tsdn, &shard->mtx); + shard->bytes_cur = 0; + edata_list_active_t to_flush; + edata_list_active_init(&to_flush); + for (pszind_t i = 0; i < sec->npsizes; i++) { + sec_bin_t *bin = &shard->bins[i]; + bin->bytes_cur = 0; + edata_list_active_concat(&to_flush, &bin->freelist); + } + + /* + * Ordinarily we would try to avoid doing the batch deallocation while + * holding the shard mutex, but the flush_all pathways only happen when + * we're disabling the HPA or resetting the arena, both of which are + * rare pathways. + */ + bool deferred_work_generated = false; + pai_dalloc_batch(tsdn, sec->fallback, &to_flush, + &deferred_work_generated); +} + +static void +sec_shard_dalloc_and_unlock(tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard, + edata_t *edata) { + malloc_mutex_assert_owner(tsdn, &shard->mtx); + assert(shard->bytes_cur <= sec->opts.max_bytes); + size_t size = edata_size_get(edata); + pszind_t pszind = sz_psz2ind(size); + assert(pszind < sec->npsizes); + /* + * Prepending here results in LIFO allocation per bin, which seems + * reasonable. + */ + sec_bin_t *bin = &shard->bins[pszind]; + edata_list_active_prepend(&bin->freelist, edata); + bin->bytes_cur += size; + shard->bytes_cur += size; + if (shard->bytes_cur > sec->opts.max_bytes) { + /* + * We've exceeded the shard limit. We make two nods in the + * direction of fragmentation avoidance: we flush everything in + * the shard, rather than one particular bin, and we hold the + * lock while flushing (in case one of the extents we flush is + * highly preferred from a fragmentation-avoidance perspective + * in the backing allocator). This has the extra advantage of + * not requiring advanced cache balancing strategies. + */ + sec_flush_some_and_unlock(tsdn, sec, shard); + malloc_mutex_assert_not_owner(tsdn, &shard->mtx); + } else { + malloc_mutex_unlock(tsdn, &shard->mtx); + } +} + +static void +sec_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata, + bool *deferred_work_generated) { + sec_t *sec = (sec_t *)self; + if (sec->opts.nshards == 0 + || edata_size_get(edata) > sec->opts.max_alloc) { + pai_dalloc(tsdn, sec->fallback, edata, + deferred_work_generated); + return; + } + sec_shard_t *shard = sec_shard_pick(tsdn, sec); + malloc_mutex_lock(tsdn, &shard->mtx); + if (shard->enabled) { + sec_shard_dalloc_and_unlock(tsdn, sec, shard, edata); + } else { + malloc_mutex_unlock(tsdn, &shard->mtx); + pai_dalloc(tsdn, sec->fallback, edata, + deferred_work_generated); + } +} + +void +sec_flush(tsdn_t *tsdn, sec_t *sec) { + for (size_t i = 0; i < sec->opts.nshards; i++) { + malloc_mutex_lock(tsdn, &sec->shards[i].mtx); + sec_flush_all_locked(tsdn, sec, &sec->shards[i]); + malloc_mutex_unlock(tsdn, &sec->shards[i].mtx); + } +} + +void +sec_disable(tsdn_t *tsdn, sec_t *sec) { + for (size_t i = 0; i < sec->opts.nshards; i++) { + malloc_mutex_lock(tsdn, &sec->shards[i].mtx); + sec->shards[i].enabled = false; + sec_flush_all_locked(tsdn, sec, &sec->shards[i]); + malloc_mutex_unlock(tsdn, &sec->shards[i].mtx); + } +} + +void +sec_stats_merge(tsdn_t *tsdn, sec_t *sec, sec_stats_t *stats) { + size_t sum = 0; + for (size_t i = 0; i < sec->opts.nshards; i++) { + /* + * We could save these lock acquisitions by making bytes_cur + * atomic, but stats collection is rare anyways and we expect + * the number and type of stats to get more interesting. + */ + malloc_mutex_lock(tsdn, &sec->shards[i].mtx); + sum += sec->shards[i].bytes_cur; + malloc_mutex_unlock(tsdn, &sec->shards[i].mtx); + } + stats->bytes += sum; +} + +void +sec_mutex_stats_read(tsdn_t *tsdn, sec_t *sec, + mutex_prof_data_t *mutex_prof_data) { + for (size_t i = 0; i < sec->opts.nshards; i++) { + malloc_mutex_lock(tsdn, &sec->shards[i].mtx); + malloc_mutex_prof_accum(tsdn, mutex_prof_data, + &sec->shards[i].mtx); + malloc_mutex_unlock(tsdn, &sec->shards[i].mtx); + } +} + +void +sec_prefork2(tsdn_t *tsdn, sec_t *sec) { + for (size_t i = 0; i < sec->opts.nshards; i++) { + malloc_mutex_prefork(tsdn, &sec->shards[i].mtx); + } +} + +void +sec_postfork_parent(tsdn_t *tsdn, sec_t *sec) { + for (size_t i = 0; i < sec->opts.nshards; i++) { + malloc_mutex_postfork_parent(tsdn, &sec->shards[i].mtx); + } +} + +void +sec_postfork_child(tsdn_t *tsdn, sec_t *sec) { + for (size_t i = 0; i < sec->opts.nshards; i++) { + malloc_mutex_postfork_child(tsdn, &sec->shards[i].mtx); + } +} diff --git a/contrib/jemalloc/src/stats.c b/contrib/jemalloc/src/stats.c index 118e05d2911a6b..efc70fd3c8b2b6 100644 --- a/contrib/jemalloc/src/stats.c +++ b/contrib/jemalloc/src/stats.c @@ -1,12 +1,13 @@ -#define JEMALLOC_STATS_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/assert.h" #include "jemalloc/internal/ctl.h" #include "jemalloc/internal/emitter.h" +#include "jemalloc/internal/fxp.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/mutex_prof.h" +#include "jemalloc/internal/prof_stats.h" const char *global_mutex_names[mutex_prof_num_global_mutexes] = { #define OP(mtx) #mtx, @@ -25,22 +26,28 @@ const char *arena_mutex_names[mutex_prof_num_arena_mutexes] = { xmallctl(n, (void *)v, &sz, NULL, 0); \ } while (0) -#define CTL_M2_GET(n, i, v, t) do { \ - size_t mib[CTL_MAX_DEPTH]; \ - size_t miblen = sizeof(mib) / sizeof(size_t); \ +#define CTL_LEAF_PREPARE(mib, miblen, name) do { \ + assert(miblen < CTL_MAX_DEPTH); \ + size_t miblen_new = CTL_MAX_DEPTH; \ + xmallctlmibnametomib(mib, miblen, name, &miblen_new); \ + assert(miblen_new > miblen); \ +} while (0) + +#define CTL_LEAF(mib, miblen, leaf, v, t) do { \ + assert(miblen < CTL_MAX_DEPTH); \ + size_t miblen_new = CTL_MAX_DEPTH; \ size_t sz = sizeof(t); \ - xmallctlnametomib(n, mib, &miblen); \ - mib[2] = (i); \ - xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \ + xmallctlbymibname(mib, miblen, leaf, &miblen_new, (void *)v, \ + &sz, NULL, 0); \ + assert(miblen_new == miblen + 1); \ } while (0) -#define CTL_M2_M4_GET(n, i, j, v, t) do { \ +#define CTL_M2_GET(n, i, v, t) do { \ size_t mib[CTL_MAX_DEPTH]; \ size_t miblen = sizeof(mib) / sizeof(size_t); \ size_t sz = sizeof(t); \ xmallctlnametomib(n, mib, &miblen); \ mib[2] = (i); \ - mib[4] = (j); \ xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \ } while (0) @@ -50,6 +57,13 @@ const char *arena_mutex_names[mutex_prof_num_arena_mutexes] = { bool opt_stats_print = false; char opt_stats_print_opts[stats_print_tot_num_options+1] = ""; +int64_t opt_stats_interval = STATS_INTERVAL_DEFAULT; +char opt_stats_interval_opts[stats_print_tot_num_options+1] = ""; + +static counter_accum_t stats_interval_accumulated; +/* Per thread batch accum size for stats_interval. */ +static uint64_t stats_interval_accum_batch; + /******************************************************************************/ static uint64_t @@ -91,13 +105,6 @@ get_rate_str(uint64_t dividend, uint64_t divisor, char str[6]) { return false; } -#define MUTEX_CTL_STR_MAX_LENGTH 128 -static void -gen_mutex_ctl_str(char *str, size_t buf_len, const char *prefix, - const char *mutex, const char *counter) { - malloc_snprintf(str, buf_len, "stats.%s.%s.%s", prefix, mutex, counter); -} - static void mutex_stats_init_cols(emitter_row_t *row, const char *table_name, emitter_col_t *name, @@ -118,7 +125,7 @@ mutex_stats_init_cols(emitter_row_t *row, const char *table_name, #define WIDTH_uint32_t 12 #define WIDTH_uint64_t 16 -#define OP(counter, counter_type, human, derived, base_counter) \ +#define OP(counter, counter_type, human, derived, base_counter) \ col = &col_##counter_type[k_##counter_type]; \ ++k_##counter_type; \ emitter_col_init(col, row); \ @@ -134,27 +141,31 @@ mutex_stats_init_cols(emitter_row_t *row, const char *table_name, } static void -mutex_stats_read_global(const char *name, emitter_col_t *col_name, +mutex_stats_read_global(size_t mib[], size_t miblen, const char *name, + emitter_col_t *col_name, emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters], emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters], uint64_t uptime) { - char cmd[MUTEX_CTL_STR_MAX_LENGTH]; + CTL_LEAF_PREPARE(mib, miblen, name); + size_t miblen_name = miblen + 1; col_name->str_val = name; emitter_col_t *dst; #define EMITTER_TYPE_uint32_t emitter_type_uint32 #define EMITTER_TYPE_uint64_t emitter_type_uint64 -#define OP(counter, counter_type, human, derived, base_counter) \ +#define OP(counter, counter_type, human, derived, base_counter) \ dst = &col_##counter_type[mutex_counter_##counter]; \ dst->type = EMITTER_TYPE_##counter_type; \ if (!derived) { \ - gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \ - "mutexes", name, #counter); \ - CTL_GET(cmd, (counter_type *)&dst->bool_val, counter_type); \ - } else { \ - emitter_col_t *base = &col_##counter_type[mutex_counter_##base_counter]; \ - dst->counter_type##_val = rate_per_second(base->counter_type##_val, uptime); \ + CTL_LEAF(mib, miblen_name, #counter, \ + (counter_type *)&dst->bool_val, counter_type); \ + } else { \ + emitter_col_t *base = \ + &col_##counter_type[mutex_counter_##base_counter]; \ + dst->counter_type##_val = \ + (counter_type)rate_per_second( \ + base->counter_type##_val, uptime); \ } MUTEX_PROF_COUNTERS #undef OP @@ -163,28 +174,31 @@ mutex_stats_read_global(const char *name, emitter_col_t *col_name, } static void -mutex_stats_read_arena(unsigned arena_ind, mutex_prof_arena_ind_t mutex_ind, - const char *name, emitter_col_t *col_name, +mutex_stats_read_arena(size_t mib[], size_t miblen, const char *name, + emitter_col_t *col_name, emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters], emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters], uint64_t uptime) { - char cmd[MUTEX_CTL_STR_MAX_LENGTH]; + CTL_LEAF_PREPARE(mib, miblen, name); + size_t miblen_name = miblen + 1; col_name->str_val = name; emitter_col_t *dst; #define EMITTER_TYPE_uint32_t emitter_type_uint32 #define EMITTER_TYPE_uint64_t emitter_type_uint64 -#define OP(counter, counter_type, human, derived, base_counter) \ +#define OP(counter, counter_type, human, derived, base_counter) \ dst = &col_##counter_type[mutex_counter_##counter]; \ dst->type = EMITTER_TYPE_##counter_type; \ - if (!derived) { \ - gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \ - "arenas.0.mutexes", arena_mutex_names[mutex_ind], #counter);\ - CTL_M2_GET(cmd, arena_ind, (counter_type *)&dst->bool_val, counter_type); \ - } else { \ - emitter_col_t *base = &col_##counter_type[mutex_counter_##base_counter]; \ - dst->counter_type##_val = rate_per_second(base->counter_type##_val, uptime); \ + if (!derived) { \ + CTL_LEAF(mib, miblen_name, #counter, \ + (counter_type *)&dst->bool_val, counter_type); \ + } else { \ + emitter_col_t *base = \ + &col_##counter_type[mutex_counter_##base_counter]; \ + dst->counter_type##_val = \ + (counter_type)rate_per_second( \ + base->counter_type##_val, uptime); \ } MUTEX_PROF_COUNTERS #undef OP @@ -193,26 +207,29 @@ mutex_stats_read_arena(unsigned arena_ind, mutex_prof_arena_ind_t mutex_ind, } static void -mutex_stats_read_arena_bin(unsigned arena_ind, unsigned bin_ind, +mutex_stats_read_arena_bin(size_t mib[], size_t miblen, emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters], emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters], uint64_t uptime) { - char cmd[MUTEX_CTL_STR_MAX_LENGTH]; + CTL_LEAF_PREPARE(mib, miblen, "mutex"); + size_t miblen_mutex = miblen + 1; + emitter_col_t *dst; #define EMITTER_TYPE_uint32_t emitter_type_uint32 #define EMITTER_TYPE_uint64_t emitter_type_uint64 -#define OP(counter, counter_type, human, derived, base_counter) \ +#define OP(counter, counter_type, human, derived, base_counter) \ dst = &col_##counter_type[mutex_counter_##counter]; \ dst->type = EMITTER_TYPE_##counter_type; \ - if (!derived) { \ - gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \ - "arenas.0.bins.0","mutex", #counter); \ - CTL_M2_M4_GET(cmd, arena_ind, bin_ind, \ - (counter_type *)&dst->bool_val, counter_type); \ - } else { \ - emitter_col_t *base = &col_##counter_type[mutex_counter_##base_counter]; \ - dst->counter_type##_val = rate_per_second(base->counter_type##_val, uptime); \ + if (!derived) { \ + CTL_LEAF(mib, miblen_mutex, #counter, \ + (counter_type *)&dst->bool_val, counter_type); \ + } else { \ + emitter_col_t *base = \ + &col_##counter_type[mutex_counter_##base_counter]; \ + dst->counter_type##_val = \ + (counter_type)rate_per_second( \ + base->counter_type##_val, uptime); \ } MUTEX_PROF_COUNTERS #undef OP @@ -249,25 +266,42 @@ mutex_stats_emit(emitter_t *emitter, emitter_row_t *row, #undef EMITTER_TYPE_uint64_t } -#define COL(row_name, column_name, left_or_right, col_width, etype) \ - emitter_col_t col_##column_name; \ - emitter_col_init(&col_##column_name, &row_name); \ - col_##column_name.justify = emitter_justify_##left_or_right; \ - col_##column_name.width = col_width; \ +#define COL_DECLARE(column_name) \ + emitter_col_t col_##column_name; + +#define COL_INIT(row_name, column_name, left_or_right, col_width, etype)\ + emitter_col_init(&col_##column_name, &row_name); \ + col_##column_name.justify = emitter_justify_##left_or_right; \ + col_##column_name.width = col_width; \ col_##column_name.type = emitter_type_##etype; -#define COL_HDR(row_name, column_name, human, left_or_right, col_width, etype) \ - COL(row_name, column_name, left_or_right, col_width, etype) \ - emitter_col_t header_##column_name; \ - emitter_col_init(&header_##column_name, &header_##row_name); \ - header_##column_name.justify = emitter_justify_##left_or_right; \ - header_##column_name.width = col_width; \ - header_##column_name.type = emitter_type_title; \ +#define COL(row_name, column_name, left_or_right, col_width, etype) \ + COL_DECLARE(column_name); \ + COL_INIT(row_name, column_name, left_or_right, col_width, etype) + +#define COL_HDR_DECLARE(column_name) \ + COL_DECLARE(column_name); \ + emitter_col_t header_##column_name; + +#define COL_HDR_INIT(row_name, column_name, human, left_or_right, \ + col_width, etype) \ + COL_INIT(row_name, column_name, left_or_right, col_width, etype)\ + emitter_col_init(&header_##column_name, &header_##row_name); \ + header_##column_name.justify = emitter_justify_##left_or_right; \ + header_##column_name.width = col_width; \ + header_##column_name.type = emitter_type_title; \ header_##column_name.str_val = human ? human : #column_name; +#define COL_HDR(row_name, column_name, human, left_or_right, col_width, \ + etype) \ + COL_HDR_DECLARE(column_name) \ + COL_HDR_INIT(row_name, column_name, human, left_or_right, \ + col_width, etype) +JEMALLOC_COLD static void -stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, uint64_t uptime) { +stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, + uint64_t uptime) { size_t page; bool in_gap, in_gap_prev; unsigned nbins, j; @@ -282,6 +316,9 @@ stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, uint64_t upti emitter_row_t row; emitter_row_init(&row); + bool prof_stats_on = config_prof && opt_prof && opt_prof_stats + && i == MALLCTL_ARENAS_ALL; + COL_HDR(row, size, NULL, right, 20, size) COL_HDR(row, ind, NULL, right, 4, unsigned) COL_HDR(row, allocated, NULL, right, 13, uint64) @@ -291,6 +328,16 @@ stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, uint64_t upti COL_HDR(row, ndalloc_ps, "(#/sec)", right, 8, uint64) COL_HDR(row, nrequests, NULL, right, 13, uint64) COL_HDR(row, nrequests_ps, "(#/sec)", right, 10, uint64) + COL_HDR_DECLARE(prof_live_requested); + COL_HDR_DECLARE(prof_live_count); + COL_HDR_DECLARE(prof_accum_requested); + COL_HDR_DECLARE(prof_accum_count); + if (prof_stats_on) { + COL_HDR_INIT(row, prof_live_requested, NULL, right, 21, uint64) + COL_HDR_INIT(row, prof_live_count, NULL, right, 17, uint64) + COL_HDR_INIT(row, prof_accum_requested, NULL, right, 21, uint64) + COL_HDR_INIT(row, prof_accum_count, NULL, right, 17, uint64) + } COL_HDR(row, nshards, NULL, right, 9, unsigned) COL_HDR(row, curregs, NULL, right, 13, size) COL_HDR(row, curslabs, NULL, right, 13, size) @@ -334,6 +381,19 @@ stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, uint64_t upti emitter_table_row(emitter, &header_row); emitter_json_array_kv_begin(emitter, "bins"); + size_t stats_arenas_mib[CTL_MAX_DEPTH]; + CTL_LEAF_PREPARE(stats_arenas_mib, 0, "stats.arenas"); + stats_arenas_mib[2] = i; + CTL_LEAF_PREPARE(stats_arenas_mib, 3, "bins"); + + size_t arenas_bin_mib[CTL_MAX_DEPTH]; + CTL_LEAF_PREPARE(arenas_bin_mib, 0, "arenas.bin"); + + size_t prof_stats_mib[CTL_MAX_DEPTH]; + if (prof_stats_on) { + CTL_LEAF_PREPARE(prof_stats_mib, 0, "prof.stats.bins"); + } + for (j = 0, in_gap = false; j < nbins; j++) { uint64_t nslabs; size_t reg_size, slab_size, curregs; @@ -342,44 +402,57 @@ stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, uint64_t upti uint32_t nregs, nshards; uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes; uint64_t nreslabs; + prof_stats_t prof_live; + prof_stats_t prof_accum; + + stats_arenas_mib[4] = j; + arenas_bin_mib[2] = j; + + CTL_LEAF(stats_arenas_mib, 5, "nslabs", &nslabs, uint64_t); + + if (prof_stats_on) { + prof_stats_mib[3] = j; + CTL_LEAF(prof_stats_mib, 4, "live", &prof_live, + prof_stats_t); + CTL_LEAF(prof_stats_mib, 4, "accum", &prof_accum, + prof_stats_t); + } - CTL_M2_M4_GET("stats.arenas.0.bins.0.nslabs", i, j, &nslabs, - uint64_t); in_gap_prev = in_gap; - in_gap = (nslabs == 0); + if (prof_stats_on) { + in_gap = (nslabs == 0 && prof_accum.count == 0); + } else { + in_gap = (nslabs == 0); + } if (in_gap_prev && !in_gap) { emitter_table_printf(emitter, " ---\n"); } - CTL_M2_GET("arenas.bin.0.size", j, ®_size, size_t); - CTL_M2_GET("arenas.bin.0.nregs", j, &nregs, uint32_t); - CTL_M2_GET("arenas.bin.0.slab_size", j, &slab_size, size_t); - CTL_M2_GET("arenas.bin.0.nshards", j, &nshards, uint32_t); + if (in_gap && !emitter_outputs_json(emitter)) { + continue; + } - CTL_M2_M4_GET("stats.arenas.0.bins.0.nmalloc", i, j, &nmalloc, - uint64_t); - CTL_M2_M4_GET("stats.arenas.0.bins.0.ndalloc", i, j, &ndalloc, - uint64_t); - CTL_M2_M4_GET("stats.arenas.0.bins.0.curregs", i, j, &curregs, - size_t); - CTL_M2_M4_GET("stats.arenas.0.bins.0.nrequests", i, j, - &nrequests, uint64_t); - CTL_M2_M4_GET("stats.arenas.0.bins.0.nfills", i, j, &nfills, + CTL_LEAF(arenas_bin_mib, 3, "size", ®_size, size_t); + CTL_LEAF(arenas_bin_mib, 3, "nregs", &nregs, uint32_t); + CTL_LEAF(arenas_bin_mib, 3, "slab_size", &slab_size, size_t); + CTL_LEAF(arenas_bin_mib, 3, "nshards", &nshards, uint32_t); + CTL_LEAF(stats_arenas_mib, 5, "nmalloc", &nmalloc, uint64_t); + CTL_LEAF(stats_arenas_mib, 5, "ndalloc", &ndalloc, uint64_t); + CTL_LEAF(stats_arenas_mib, 5, "curregs", &curregs, size_t); + CTL_LEAF(stats_arenas_mib, 5, "nrequests", &nrequests, uint64_t); - CTL_M2_M4_GET("stats.arenas.0.bins.0.nflushes", i, j, &nflushes, - uint64_t); - CTL_M2_M4_GET("stats.arenas.0.bins.0.nreslabs", i, j, &nreslabs, - uint64_t); - CTL_M2_M4_GET("stats.arenas.0.bins.0.curslabs", i, j, &curslabs, - size_t); - CTL_M2_M4_GET("stats.arenas.0.bins.0.nonfull_slabs", i, j, &nonfull_slabs, + CTL_LEAF(stats_arenas_mib, 5, "nfills", &nfills, uint64_t); + CTL_LEAF(stats_arenas_mib, 5, "nflushes", &nflushes, uint64_t); + CTL_LEAF(stats_arenas_mib, 5, "nreslabs", &nreslabs, uint64_t); + CTL_LEAF(stats_arenas_mib, 5, "curslabs", &curslabs, size_t); + CTL_LEAF(stats_arenas_mib, 5, "nonfull_slabs", &nonfull_slabs, size_t); if (mutex) { - mutex_stats_read_arena_bin(i, j, col_mutex64, - col_mutex32, uptime); + mutex_stats_read_arena_bin(stats_arenas_mib, 5, + col_mutex64, col_mutex32, uptime); } emitter_json_object_begin(emitter); @@ -391,6 +464,16 @@ stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, uint64_t upti &curregs); emitter_json_kv(emitter, "nrequests", emitter_type_uint64, &nrequests); + if (prof_stats_on) { + emitter_json_kv(emitter, "prof_live_requested", + emitter_type_uint64, &prof_live.req_sum); + emitter_json_kv(emitter, "prof_live_count", + emitter_type_uint64, &prof_live.count); + emitter_json_kv(emitter, "prof_accum_requested", + emitter_type_uint64, &prof_accum.req_sum); + emitter_json_kv(emitter, "prof_accum_count", + emitter_type_uint64, &prof_accum.count); + } emitter_json_kv(emitter, "nfills", emitter_type_uint64, &nfills); emitter_json_kv(emitter, "nflushes", emitter_type_uint64, @@ -437,6 +520,13 @@ stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, uint64_t upti col_ndalloc_ps.uint64_val = rate_per_second(ndalloc, uptime); col_nrequests.uint64_val = nrequests; col_nrequests_ps.uint64_val = rate_per_second(nrequests, uptime); + if (prof_stats_on) { + col_prof_live_requested.uint64_val = prof_live.req_sum; + col_prof_live_count.uint64_val = prof_live.count; + col_prof_accum_requested.uint64_val = + prof_accum.req_sum; + col_prof_accum_count.uint64_val = prof_accum.count; + } col_nshards.unsigned_val = nshards; col_curregs.size_val = curregs; col_curslabs.size_val = curslabs; @@ -466,6 +556,7 @@ stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, uint64_t upti } } +JEMALLOC_COLD static void stats_arena_lextents_print(emitter_t *emitter, unsigned i, uint64_t uptime) { unsigned nbins, nlextents, j; @@ -479,6 +570,9 @@ stats_arena_lextents_print(emitter_t *emitter, unsigned i, uint64_t uptime) { emitter_row_t row; emitter_row_init(&row); + bool prof_stats_on = config_prof && opt_prof && opt_prof_stats + && i == MALLCTL_ARENAS_ALL; + COL_HDR(row, size, NULL, right, 20, size) COL_HDR(row, ind, NULL, right, 4, unsigned) COL_HDR(row, allocated, NULL, right, 13, size) @@ -488,6 +582,16 @@ stats_arena_lextents_print(emitter_t *emitter, unsigned i, uint64_t uptime) { COL_HDR(row, ndalloc_ps, "(#/sec)", right, 8, uint64) COL_HDR(row, nrequests, NULL, right, 13, uint64) COL_HDR(row, nrequests_ps, "(#/sec)", right, 8, uint64) + COL_HDR_DECLARE(prof_live_requested) + COL_HDR_DECLARE(prof_live_count) + COL_HDR_DECLARE(prof_accum_requested) + COL_HDR_DECLARE(prof_accum_count) + if (prof_stats_on) { + COL_HDR_INIT(row, prof_live_requested, NULL, right, 21, uint64) + COL_HDR_INIT(row, prof_live_count, NULL, right, 17, uint64) + COL_HDR_INIT(row, prof_accum_requested, NULL, right, 21, uint64) + COL_HDR_INIT(row, prof_accum_count, NULL, right, 17, uint64) + } COL_HDR(row, curlextents, NULL, right, 13, size) /* As with bins, we label the large extents table. */ @@ -496,16 +600,33 @@ stats_arena_lextents_print(emitter_t *emitter, unsigned i, uint64_t uptime) { emitter_table_row(emitter, &header_row); emitter_json_array_kv_begin(emitter, "lextents"); + size_t stats_arenas_mib[CTL_MAX_DEPTH]; + CTL_LEAF_PREPARE(stats_arenas_mib, 0, "stats.arenas"); + stats_arenas_mib[2] = i; + CTL_LEAF_PREPARE(stats_arenas_mib, 3, "lextents"); + + size_t arenas_lextent_mib[CTL_MAX_DEPTH]; + CTL_LEAF_PREPARE(arenas_lextent_mib, 0, "arenas.lextent"); + + size_t prof_stats_mib[CTL_MAX_DEPTH]; + if (prof_stats_on) { + CTL_LEAF_PREPARE(prof_stats_mib, 0, "prof.stats.lextents"); + } + for (j = 0, in_gap = false; j < nlextents; j++) { uint64_t nmalloc, ndalloc, nrequests; size_t lextent_size, curlextents; + prof_stats_t prof_live; + prof_stats_t prof_accum; + + stats_arenas_mib[4] = j; + arenas_lextent_mib[2] = j; + + CTL_LEAF(stats_arenas_mib, 5, "nmalloc", &nmalloc, uint64_t); + CTL_LEAF(stats_arenas_mib, 5, "ndalloc", &ndalloc, uint64_t); + CTL_LEAF(stats_arenas_mib, 5, "nrequests", &nrequests, + uint64_t); - CTL_M2_M4_GET("stats.arenas.0.lextents.0.nmalloc", i, j, - &nmalloc, uint64_t); - CTL_M2_M4_GET("stats.arenas.0.lextents.0.ndalloc", i, j, - &ndalloc, uint64_t); - CTL_M2_M4_GET("stats.arenas.0.lextents.0.nrequests", i, j, - &nrequests, uint64_t); in_gap_prev = in_gap; in_gap = (nrequests == 0); @@ -514,11 +635,29 @@ stats_arena_lextents_print(emitter_t *emitter, unsigned i, uint64_t uptime) { " ---\n"); } - CTL_M2_GET("arenas.lextent.0.size", j, &lextent_size, size_t); - CTL_M2_M4_GET("stats.arenas.0.lextents.0.curlextents", i, j, - &curlextents, size_t); + CTL_LEAF(arenas_lextent_mib, 3, "size", &lextent_size, size_t); + CTL_LEAF(stats_arenas_mib, 5, "curlextents", &curlextents, + size_t); + + if (prof_stats_on) { + prof_stats_mib[3] = j; + CTL_LEAF(prof_stats_mib, 4, "live", &prof_live, + prof_stats_t); + CTL_LEAF(prof_stats_mib, 4, "accum", &prof_accum, + prof_stats_t); + } emitter_json_object_begin(emitter); + if (prof_stats_on) { + emitter_json_kv(emitter, "prof_live_requested", + emitter_type_uint64, &prof_live.req_sum); + emitter_json_kv(emitter, "prof_live_count", + emitter_type_uint64, &prof_live.count); + emitter_json_kv(emitter, "prof_accum_requested", + emitter_type_uint64, &prof_accum.req_sum); + emitter_json_kv(emitter, "prof_accum_count", + emitter_type_uint64, &prof_accum.count); + } emitter_json_kv(emitter, "curlextents", emitter_type_size, &curlextents); emitter_json_object_end(emitter); @@ -532,6 +671,13 @@ stats_arena_lextents_print(emitter_t *emitter, unsigned i, uint64_t uptime) { col_ndalloc_ps.uint64_val = rate_per_second(ndalloc, uptime); col_nrequests.uint64_val = nrequests; col_nrequests_ps.uint64_val = rate_per_second(nrequests, uptime); + if (prof_stats_on) { + col_prof_live_requested.uint64_val = prof_live.req_sum; + col_prof_live_count.uint64_val = prof_live.count; + col_prof_accum_requested.uint64_val = + prof_accum.req_sum; + col_prof_accum_count.uint64_val = prof_accum.count; + } col_curlextents.size_val = curlextents; if (!in_gap) { @@ -544,6 +690,7 @@ stats_arena_lextents_print(emitter_t *emitter, unsigned i, uint64_t uptime) { } } +JEMALLOC_COLD static void stats_arena_extents_print(emitter_t *emitter, unsigned i) { unsigned j; @@ -570,22 +717,27 @@ stats_arena_extents_print(emitter_t *emitter, unsigned i) { emitter_table_row(emitter, &header_row); emitter_json_array_kv_begin(emitter, "extents"); + size_t stats_arenas_mib[CTL_MAX_DEPTH]; + CTL_LEAF_PREPARE(stats_arenas_mib, 0, "stats.arenas"); + stats_arenas_mib[2] = i; + CTL_LEAF_PREPARE(stats_arenas_mib, 3, "extents"); + in_gap = false; for (j = 0; j < SC_NPSIZES; j++) { size_t ndirty, nmuzzy, nretained, total, dirty_bytes, muzzy_bytes, retained_bytes, total_bytes; - CTL_M2_M4_GET("stats.arenas.0.extents.0.ndirty", i, j, - &ndirty, size_t); - CTL_M2_M4_GET("stats.arenas.0.extents.0.nmuzzy", i, j, - &nmuzzy, size_t); - CTL_M2_M4_GET("stats.arenas.0.extents.0.nretained", i, j, - &nretained, size_t); - CTL_M2_M4_GET("stats.arenas.0.extents.0.dirty_bytes", i, j, - &dirty_bytes, size_t); - CTL_M2_M4_GET("stats.arenas.0.extents.0.muzzy_bytes", i, j, - &muzzy_bytes, size_t); - CTL_M2_M4_GET("stats.arenas.0.extents.0.retained_bytes", i, j, + stats_arenas_mib[4] = j; + + CTL_LEAF(stats_arenas_mib, 5, "ndirty", &ndirty, size_t); + CTL_LEAF(stats_arenas_mib, 5, "nmuzzy", &nmuzzy, size_t); + CTL_LEAF(stats_arenas_mib, 5, "nretained", &nretained, size_t); + CTL_LEAF(stats_arenas_mib, 5, "dirty_bytes", &dirty_bytes, + size_t); + CTL_LEAF(stats_arenas_mib, 5, "muzzy_bytes", &muzzy_bytes, + size_t); + CTL_LEAF(stats_arenas_mib, 5, "retained_bytes", &retained_bytes, size_t); + total = ndirty + nmuzzy + nretained; total_bytes = dirty_bytes + muzzy_bytes + retained_bytes; @@ -632,6 +784,230 @@ stats_arena_extents_print(emitter_t *emitter, unsigned i) { } } +static void +stats_arena_hpa_shard_print(emitter_t *emitter, unsigned i, uint64_t uptime) { + emitter_row_t header_row; + emitter_row_init(&header_row); + emitter_row_t row; + emitter_row_init(&row); + + uint64_t npurge_passes; + uint64_t npurges; + uint64_t nhugifies; + uint64_t ndehugifies; + + CTL_M2_GET("stats.arenas.0.hpa_shard.npurge_passes", + i, &npurge_passes, uint64_t); + CTL_M2_GET("stats.arenas.0.hpa_shard.npurges", + i, &npurges, uint64_t); + CTL_M2_GET("stats.arenas.0.hpa_shard.nhugifies", + i, &nhugifies, uint64_t); + CTL_M2_GET("stats.arenas.0.hpa_shard.ndehugifies", + i, &ndehugifies, uint64_t); + + size_t npageslabs_huge; + size_t nactive_huge; + size_t ndirty_huge; + + size_t npageslabs_nonhuge; + size_t nactive_nonhuge; + size_t ndirty_nonhuge; + size_t nretained_nonhuge; + + size_t sec_bytes; + CTL_M2_GET("stats.arenas.0.hpa_sec_bytes", i, &sec_bytes, size_t); + emitter_kv(emitter, "sec_bytes", "Bytes in small extent cache", + emitter_type_size, &sec_bytes); + + /* First, global stats. */ + emitter_table_printf(emitter, + "HPA shard stats:\n" + " Purge passes: %" FMTu64 " (%" FMTu64 " / sec)\n" + " Purges: %" FMTu64 " (%" FMTu64 " / sec)\n" + " Hugeifies: %" FMTu64 " (%" FMTu64 " / sec)\n" + " Dehugifies: %" FMTu64 " (%" FMTu64 " / sec)\n" + "\n", + npurge_passes, rate_per_second(npurge_passes, uptime), + npurges, rate_per_second(npurges, uptime), + nhugifies, rate_per_second(nhugifies, uptime), + ndehugifies, rate_per_second(ndehugifies, uptime)); + + emitter_json_object_kv_begin(emitter, "hpa_shard"); + emitter_json_kv(emitter, "npurge_passes", emitter_type_uint64, + &npurge_passes); + emitter_json_kv(emitter, "npurges", emitter_type_uint64, + &npurges); + emitter_json_kv(emitter, "nhugifies", emitter_type_uint64, + &nhugifies); + emitter_json_kv(emitter, "ndehugifies", emitter_type_uint64, + &ndehugifies); + + /* Next, full slab stats. */ + CTL_M2_GET("stats.arenas.0.hpa_shard.full_slabs.npageslabs_huge", + i, &npageslabs_huge, size_t); + CTL_M2_GET("stats.arenas.0.hpa_shard.full_slabs.nactive_huge", + i, &nactive_huge, size_t); + CTL_M2_GET("stats.arenas.0.hpa_shard.full_slabs.ndirty_huge", + i, &ndirty_huge, size_t); + + CTL_M2_GET("stats.arenas.0.hpa_shard.full_slabs.npageslabs_nonhuge", + i, &npageslabs_nonhuge, size_t); + CTL_M2_GET("stats.arenas.0.hpa_shard.full_slabs.nactive_nonhuge", + i, &nactive_nonhuge, size_t); + CTL_M2_GET("stats.arenas.0.hpa_shard.full_slabs.ndirty_nonhuge", + i, &ndirty_nonhuge, size_t); + nretained_nonhuge = npageslabs_nonhuge * HUGEPAGE_PAGES + - nactive_nonhuge - ndirty_nonhuge; + + emitter_table_printf(emitter, + " In full slabs:\n" + " npageslabs: %zu huge, %zu nonhuge\n" + " nactive: %zu huge, %zu nonhuge \n" + " ndirty: %zu huge, %zu nonhuge \n" + " nretained: 0 huge, %zu nonhuge \n", + npageslabs_huge, npageslabs_nonhuge, + nactive_huge, nactive_nonhuge, + ndirty_huge, ndirty_nonhuge, + nretained_nonhuge); + + emitter_json_object_kv_begin(emitter, "full_slabs"); + emitter_json_kv(emitter, "npageslabs_huge", emitter_type_size, + &npageslabs_huge); + emitter_json_kv(emitter, "nactive_huge", emitter_type_size, + &nactive_huge); + emitter_json_kv(emitter, "nactive_huge", emitter_type_size, + &nactive_huge); + emitter_json_kv(emitter, "npageslabs_nonhuge", emitter_type_size, + &npageslabs_nonhuge); + emitter_json_kv(emitter, "nactive_nonhuge", emitter_type_size, + &nactive_nonhuge); + emitter_json_kv(emitter, "ndirty_nonhuge", emitter_type_size, + &ndirty_nonhuge); + emitter_json_object_end(emitter); /* End "full_slabs" */ + + /* Next, empty slab stats. */ + CTL_M2_GET("stats.arenas.0.hpa_shard.empty_slabs.npageslabs_huge", + i, &npageslabs_huge, size_t); + CTL_M2_GET("stats.arenas.0.hpa_shard.empty_slabs.nactive_huge", + i, &nactive_huge, size_t); + CTL_M2_GET("stats.arenas.0.hpa_shard.empty_slabs.ndirty_huge", + i, &ndirty_huge, size_t); + + CTL_M2_GET("stats.arenas.0.hpa_shard.empty_slabs.npageslabs_nonhuge", + i, &npageslabs_nonhuge, size_t); + CTL_M2_GET("stats.arenas.0.hpa_shard.empty_slabs.nactive_nonhuge", + i, &nactive_nonhuge, size_t); + CTL_M2_GET("stats.arenas.0.hpa_shard.empty_slabs.ndirty_nonhuge", + i, &ndirty_nonhuge, size_t); + nretained_nonhuge = npageslabs_nonhuge * HUGEPAGE_PAGES + - nactive_nonhuge - ndirty_nonhuge; + + emitter_table_printf(emitter, + " In empty slabs:\n" + " npageslabs: %zu huge, %zu nonhuge\n" + " nactive: %zu huge, %zu nonhuge \n" + " ndirty: %zu huge, %zu nonhuge \n" + " nretained: 0 huge, %zu nonhuge \n" + "\n", + npageslabs_huge, npageslabs_nonhuge, + nactive_huge, nactive_nonhuge, + ndirty_huge, ndirty_nonhuge, + nretained_nonhuge); + + emitter_json_object_kv_begin(emitter, "empty_slabs"); + emitter_json_kv(emitter, "npageslabs_huge", emitter_type_size, + &npageslabs_huge); + emitter_json_kv(emitter, "nactive_huge", emitter_type_size, + &nactive_huge); + emitter_json_kv(emitter, "nactive_huge", emitter_type_size, + &nactive_huge); + emitter_json_kv(emitter, "npageslabs_nonhuge", emitter_type_size, + &npageslabs_nonhuge); + emitter_json_kv(emitter, "nactive_nonhuge", emitter_type_size, + &nactive_nonhuge); + emitter_json_kv(emitter, "ndirty_nonhuge", emitter_type_size, + &ndirty_nonhuge); + emitter_json_object_end(emitter); /* End "empty_slabs" */ + + COL_HDR(row, size, NULL, right, 20, size) + COL_HDR(row, ind, NULL, right, 4, unsigned) + COL_HDR(row, npageslabs_huge, NULL, right, 16, size) + COL_HDR(row, nactive_huge, NULL, right, 16, size) + COL_HDR(row, ndirty_huge, NULL, right, 16, size) + COL_HDR(row, npageslabs_nonhuge, NULL, right, 20, size) + COL_HDR(row, nactive_nonhuge, NULL, right, 20, size) + COL_HDR(row, ndirty_nonhuge, NULL, right, 20, size) + COL_HDR(row, nretained_nonhuge, NULL, right, 20, size) + + size_t stats_arenas_mib[CTL_MAX_DEPTH]; + CTL_LEAF_PREPARE(stats_arenas_mib, 0, "stats.arenas"); + stats_arenas_mib[2] = i; + CTL_LEAF_PREPARE(stats_arenas_mib, 3, "hpa_shard.nonfull_slabs"); + + emitter_table_row(emitter, &header_row); + emitter_json_array_kv_begin(emitter, "nonfull_slabs"); + bool in_gap = false; + for (pszind_t j = 0; j < PSSET_NPSIZES && j < SC_NPSIZES; j++) { + stats_arenas_mib[5] = j; + + CTL_LEAF(stats_arenas_mib, 6, "npageslabs_huge", + &npageslabs_huge, size_t); + CTL_LEAF(stats_arenas_mib, 6, "nactive_huge", + &nactive_huge, size_t); + CTL_LEAF(stats_arenas_mib, 6, "ndirty_huge", + &ndirty_huge, size_t); + + CTL_LEAF(stats_arenas_mib, 6, "npageslabs_nonhuge", + &npageslabs_nonhuge, size_t); + CTL_LEAF(stats_arenas_mib, 6, "nactive_nonhuge", + &nactive_nonhuge, size_t); + CTL_LEAF(stats_arenas_mib, 6, "ndirty_nonhuge", + &ndirty_nonhuge, size_t); + nretained_nonhuge = npageslabs_nonhuge * HUGEPAGE_PAGES + - nactive_nonhuge - ndirty_nonhuge; + + bool in_gap_prev = in_gap; + in_gap = (npageslabs_huge == 0 && npageslabs_nonhuge == 0); + if (in_gap_prev && !in_gap) { + emitter_table_printf(emitter, + " ---\n"); + } + + col_size.size_val = sz_pind2sz(j); + col_ind.size_val = j; + col_npageslabs_huge.size_val = npageslabs_huge; + col_nactive_huge.size_val = nactive_huge; + col_ndirty_huge.size_val = ndirty_huge; + col_npageslabs_nonhuge.size_val = npageslabs_nonhuge; + col_nactive_nonhuge.size_val = nactive_nonhuge; + col_ndirty_nonhuge.size_val = ndirty_nonhuge; + col_nretained_nonhuge.size_val = nretained_nonhuge; + if (!in_gap) { + emitter_table_row(emitter, &row); + } + + emitter_json_object_begin(emitter); + emitter_json_kv(emitter, "npageslabs_huge", emitter_type_size, + &npageslabs_huge); + emitter_json_kv(emitter, "nactive_huge", emitter_type_size, + &nactive_huge); + emitter_json_kv(emitter, "ndirty_huge", emitter_type_size, + &ndirty_huge); + emitter_json_kv(emitter, "npageslabs_nonhuge", emitter_type_size, + &npageslabs_nonhuge); + emitter_json_kv(emitter, "nactive_nonhuge", emitter_type_size, + &nactive_nonhuge); + emitter_json_kv(emitter, "ndirty_nonhuge", emitter_type_size, + &ndirty_nonhuge); + emitter_json_object_end(emitter); + } + emitter_json_array_end(emitter); /* End "nonfull_slabs" */ + emitter_json_object_end(emitter); /* End "hpa_shard" */ + if (in_gap) { + emitter_table_printf(emitter, " ---\n"); + } +} + static void stats_arena_mutexes_print(emitter_t *emitter, unsigned arena_ind, uint64_t uptime) { emitter_row_t row; @@ -645,21 +1021,27 @@ stats_arena_mutexes_print(emitter_t *emitter, unsigned arena_ind, uint64_t uptim emitter_json_object_kv_begin(emitter, "mutexes"); emitter_table_row(emitter, &row); + size_t stats_arenas_mib[CTL_MAX_DEPTH]; + CTL_LEAF_PREPARE(stats_arenas_mib, 0, "stats.arenas"); + stats_arenas_mib[2] = arena_ind; + CTL_LEAF_PREPARE(stats_arenas_mib, 3, "mutexes"); + for (mutex_prof_arena_ind_t i = 0; i < mutex_prof_num_arena_mutexes; i++) { const char *name = arena_mutex_names[i]; emitter_json_object_kv_begin(emitter, name); - mutex_stats_read_arena(arena_ind, i, name, &col_name, col64, - col32, uptime); + mutex_stats_read_arena(stats_arenas_mib, 4, name, &col_name, + col64, col32, uptime); mutex_stats_emit(emitter, &row, col64, col32); emitter_json_object_end(emitter); /* Close the mutex dict. */ } emitter_json_object_end(emitter); /* End "mutexes". */ } +JEMALLOC_COLD static void stats_arena_print(emitter_t *emitter, unsigned i, bool bins, bool large, - bool mutex, bool extents) { + bool mutex, bool extents, bool hpa) { unsigned nthreads; const char *dss; ssize_t dirty_decay_ms, muzzy_decay_ms; @@ -673,7 +1055,7 @@ stats_arena_print(emitter_t *emitter, unsigned i, bool bins, bool large, size_t large_allocated; uint64_t large_nmalloc, large_ndalloc, large_nrequests, large_nfills, large_nflushes; - size_t tcache_bytes, abandoned_vm; + size_t tcache_bytes, tcache_stashed_bytes, abandoned_vm; uint64_t uptime; CTL_GET("arenas.page", &page, size_t); @@ -817,12 +1199,12 @@ stats_arena_print(emitter_t *emitter, unsigned i, bool bins, bool large, COL(alloc_count_row, count_nmalloc, right, 16, title); col_count_nmalloc.str_val = "nmalloc"; - COL(alloc_count_row, count_nmalloc_ps, right, 8, title); + COL(alloc_count_row, count_nmalloc_ps, right, 10, title); col_count_nmalloc_ps.str_val = "(#/sec)"; COL(alloc_count_row, count_ndalloc, right, 16, title); col_count_ndalloc.str_val = "ndalloc"; - COL(alloc_count_row, count_ndalloc_ps, right, 8, title); + COL(alloc_count_row, count_ndalloc_ps, right, 10, title); col_count_ndalloc_ps.str_val = "(#/sec)"; COL(alloc_count_row, count_nrequests, right, 16, title); @@ -962,6 +1344,7 @@ stats_arena_print(emitter_t *emitter, unsigned i, bool bins, bool large, GET_AND_EMIT_MEM_STAT(internal) GET_AND_EMIT_MEM_STAT(metadata_thp) GET_AND_EMIT_MEM_STAT(tcache_bytes) + GET_AND_EMIT_MEM_STAT(tcache_stashed_bytes) GET_AND_EMIT_MEM_STAT(resident) GET_AND_EMIT_MEM_STAT(abandoned_vm) GET_AND_EMIT_MEM_STAT(extent_avail) @@ -979,8 +1362,12 @@ stats_arena_print(emitter_t *emitter, unsigned i, bool bins, bool large, if (extents) { stats_arena_extents_print(emitter, i); } + if (hpa) { + stats_arena_hpa_shard_print(emitter, i, uptime); + } } +JEMALLOC_COLD static void stats_general_print(emitter_t *emitter) { const char *cpv; @@ -988,14 +1375,18 @@ stats_general_print(emitter_t *emitter) { unsigned uv; uint32_t u32v; uint64_t u64v; + int64_t i64v; ssize_t ssv, ssv2; - size_t sv, bsz, usz, ssz, sssz, cpsz; + size_t sv, bsz, usz, u32sz, u64sz, i64sz, ssz, sssz, cpsz; bsz = sizeof(bool); usz = sizeof(unsigned); ssz = sizeof(size_t); sssz = sizeof(ssize_t); cpsz = sizeof(const char *); + u32sz = sizeof(uint32_t); + i64sz = sizeof(int64_t); + u64sz = sizeof(uint64_t); CTL_GET("version", &cpv, const char *); emitter_kv(emitter, "version", "Version", emitter_type_string, &cpv); @@ -1051,6 +1442,11 @@ stats_general_print(emitter_t *emitter) { #define OPT_WRITE_UNSIGNED(name) \ OPT_WRITE(name, uv, usz, emitter_type_unsigned) +#define OPT_WRITE_INT64(name) \ + OPT_WRITE(name, i64v, i64sz, emitter_type_int64) +#define OPT_WRITE_UINT64(name) \ + OPT_WRITE(name, u64v, u64sz, emitter_type_uint64) + #define OPT_WRITE_SIZE_T(name) \ OPT_WRITE(name, sv, ssz, emitter_type_size) #define OPT_WRITE_SSIZE_T(name) \ @@ -1066,13 +1462,43 @@ stats_general_print(emitter_t *emitter) { OPT_WRITE_BOOL("abort") OPT_WRITE_BOOL("abort_conf") + OPT_WRITE_BOOL("cache_oblivious") OPT_WRITE_BOOL("confirm_conf") OPT_WRITE_BOOL("retain") OPT_WRITE_CHAR_P("dss") OPT_WRITE_UNSIGNED("narenas") OPT_WRITE_CHAR_P("percpu_arena") OPT_WRITE_SIZE_T("oversize_threshold") + OPT_WRITE_BOOL("hpa") + OPT_WRITE_SIZE_T("hpa_slab_max_alloc") + OPT_WRITE_SIZE_T("hpa_hugification_threshold") + OPT_WRITE_UINT64("hpa_hugify_delay_ms") + OPT_WRITE_UINT64("hpa_min_purge_interval_ms") + if (je_mallctl("opt.hpa_dirty_mult", (void *)&u32v, &u32sz, NULL, 0) + == 0) { + /* + * We cheat a little and "know" the secret meaning of this + * representation. + */ + if (u32v == (uint32_t)-1) { + const char *neg1 = "-1"; + emitter_kv(emitter, "hpa_dirty_mult", + "opt.hpa_dirty_mult", emitter_type_string, &neg1); + } else { + char buf[FXP_BUF_SIZE]; + fxp_print(u32v, buf); + const char *bufp = buf; + emitter_kv(emitter, "hpa_dirty_mult", + "opt.hpa_dirty_mult", emitter_type_string, &bufp); + } + } + OPT_WRITE_SIZE_T("hpa_sec_nshards") + OPT_WRITE_SIZE_T("hpa_sec_max_alloc") + OPT_WRITE_SIZE_T("hpa_sec_max_bytes") + OPT_WRITE_SIZE_T("hpa_sec_bytes_after_flush") + OPT_WRITE_SIZE_T("hpa_sec_batch_fill_extra") OPT_WRITE_CHAR_P("metadata_thp") + OPT_WRITE_INT64("mutex_max_spin") OPT_WRITE_BOOL_MUTABLE("background_thread", "background_thread") OPT_WRITE_SSIZE_T_MUTABLE("dirty_decay_ms", "arenas.dirty_decay_ms") OPT_WRITE_SSIZE_T_MUTABLE("muzzy_decay_ms", "arenas.muzzy_decay_ms") @@ -1081,8 +1507,17 @@ stats_general_print(emitter_t *emitter) { OPT_WRITE_BOOL("zero") OPT_WRITE_BOOL("utrace") OPT_WRITE_BOOL("xmalloc") + OPT_WRITE_BOOL("experimental_infallible_new") OPT_WRITE_BOOL("tcache") - OPT_WRITE_SSIZE_T("lg_tcache_max") + OPT_WRITE_SIZE_T("tcache_max") + OPT_WRITE_UNSIGNED("tcache_nslots_small_min") + OPT_WRITE_UNSIGNED("tcache_nslots_small_max") + OPT_WRITE_UNSIGNED("tcache_nslots_large") + OPT_WRITE_SSIZE_T("lg_tcache_nslots_mul") + OPT_WRITE_SIZE_T("tcache_gc_incr_bytes") + OPT_WRITE_SIZE_T("tcache_gc_delay_bytes") + OPT_WRITE_UNSIGNED("lg_tcache_flush_small_div") + OPT_WRITE_UNSIGNED("lg_tcache_flush_large_div") OPT_WRITE_CHAR_P("thp") OPT_WRITE_BOOL("prof") OPT_WRITE_CHAR_P("prof_prefix") @@ -1095,8 +1530,14 @@ stats_general_print(emitter_t *emitter) { OPT_WRITE_BOOL("prof_gdump") OPT_WRITE_BOOL("prof_final") OPT_WRITE_BOOL("prof_leak") + OPT_WRITE_BOOL("prof_leak_error") OPT_WRITE_BOOL("stats_print") OPT_WRITE_CHAR_P("stats_print_opts") + OPT_WRITE_BOOL("stats_print") + OPT_WRITE_CHAR_P("stats_print_opts") + OPT_WRITE_INT64("stats_interval") + OPT_WRITE_CHAR_P("stats_interval_opts") + OPT_WRITE_CHAR_P("zero_realloc") emitter_dict_end(emitter); @@ -1167,38 +1608,41 @@ stats_general_print(emitter_t *emitter) { "Maximum thread-cached size class", emitter_type_size, &sv); } - unsigned nbins; - CTL_GET("arenas.nbins", &nbins, unsigned); + unsigned arenas_nbins; + CTL_GET("arenas.nbins", &arenas_nbins, unsigned); emitter_kv(emitter, "nbins", "Number of bin size classes", - emitter_type_unsigned, &nbins); + emitter_type_unsigned, &arenas_nbins); - unsigned nhbins; - CTL_GET("arenas.nhbins", &nhbins, unsigned); + unsigned arenas_nhbins; + CTL_GET("arenas.nhbins", &arenas_nhbins, unsigned); emitter_kv(emitter, "nhbins", "Number of thread-cache bin size classes", - emitter_type_unsigned, &nhbins); + emitter_type_unsigned, &arenas_nhbins); /* * We do enough mallctls in a loop that we actually want to omit them * (not just omit the printing). */ - if (emitter->output == emitter_output_json) { + if (emitter_outputs_json(emitter)) { emitter_json_array_kv_begin(emitter, "bin"); - for (unsigned i = 0; i < nbins; i++) { + size_t arenas_bin_mib[CTL_MAX_DEPTH]; + CTL_LEAF_PREPARE(arenas_bin_mib, 0, "arenas.bin"); + for (unsigned i = 0; i < arenas_nbins; i++) { + arenas_bin_mib[2] = i; emitter_json_object_begin(emitter); - CTL_M2_GET("arenas.bin.0.size", i, &sv, size_t); + CTL_LEAF(arenas_bin_mib, 3, "size", &sv, size_t); emitter_json_kv(emitter, "size", emitter_type_size, &sv); - CTL_M2_GET("arenas.bin.0.nregs", i, &u32v, uint32_t); + CTL_LEAF(arenas_bin_mib, 3, "nregs", &u32v, uint32_t); emitter_json_kv(emitter, "nregs", emitter_type_uint32, &u32v); - CTL_M2_GET("arenas.bin.0.slab_size", i, &sv, size_t); + CTL_LEAF(arenas_bin_mib, 3, "slab_size", &sv, size_t); emitter_json_kv(emitter, "slab_size", emitter_type_size, &sv); - CTL_M2_GET("arenas.bin.0.nshards", i, &u32v, uint32_t); + CTL_LEAF(arenas_bin_mib, 3, "nshards", &u32v, uint32_t); emitter_json_kv(emitter, "nshards", emitter_type_uint32, &u32v); @@ -1212,12 +1656,15 @@ stats_general_print(emitter_t *emitter) { emitter_kv(emitter, "nlextents", "Number of large size classes", emitter_type_unsigned, &nlextents); - if (emitter->output == emitter_output_json) { + if (emitter_outputs_json(emitter)) { emitter_json_array_kv_begin(emitter, "lextent"); + size_t arenas_lextent_mib[CTL_MAX_DEPTH]; + CTL_LEAF_PREPARE(arenas_lextent_mib, 0, "arenas.lextent"); for (unsigned i = 0; i < nlextents; i++) { + arenas_lextent_mib[2] = i; emitter_json_object_begin(emitter); - CTL_M2_GET("arenas.lextent.0.size", i, &sv, size_t); + CTL_LEAF(arenas_lextent_mib, 3, "size", &sv, size_t); emitter_json_kv(emitter, "size", emitter_type_size, &sv); @@ -1229,9 +1676,10 @@ stats_general_print(emitter_t *emitter) { emitter_json_object_end(emitter); /* Close "arenas" */ } +JEMALLOC_COLD static void stats_print_helper(emitter_t *emitter, bool merged, bool destroyed, - bool unmerged, bool bins, bool large, bool mutex, bool extents) { + bool unmerged, bool bins, bool large, bool mutex, bool extents, bool hpa) { /* * These should be deleted. We keep them around for a while, to aid in * the transition to the emitter code. @@ -1239,6 +1687,7 @@ stats_print_helper(emitter_t *emitter, bool merged, bool destroyed, size_t allocated, active, metadata, metadata_thp, resident, mapped, retained; size_t num_background_threads; + size_t zero_reallocs; uint64_t background_thread_num_runs, background_thread_run_interval; CTL_GET("stats.allocated", &allocated, size_t); @@ -1249,6 +1698,8 @@ stats_print_helper(emitter_t *emitter, bool merged, bool destroyed, CTL_GET("stats.mapped", &mapped, size_t); CTL_GET("stats.retained", &retained, size_t); + CTL_GET("stats.zero_reallocs", &zero_reallocs, size_t); + if (have_background_thread) { CTL_GET("stats.background_thread.num_threads", &num_background_threads, size_t); @@ -1272,12 +1723,18 @@ stats_print_helper(emitter_t *emitter, bool merged, bool destroyed, emitter_json_kv(emitter, "resident", emitter_type_size, &resident); emitter_json_kv(emitter, "mapped", emitter_type_size, &mapped); emitter_json_kv(emitter, "retained", emitter_type_size, &retained); + emitter_json_kv(emitter, "zero_reallocs", emitter_type_size, + &zero_reallocs); emitter_table_printf(emitter, "Allocated: %zu, active: %zu, " "metadata: %zu (n_thp %zu), resident: %zu, mapped: %zu, " "retained: %zu\n", allocated, active, metadata, metadata_thp, resident, mapped, retained); + /* Strange behaviors */ + emitter_table_printf(emitter, + "Count of realloc(non-null-ptr, 0) calls: %zu\n", zero_reallocs); + /* Background thread stats. */ emitter_json_object_kv_begin(emitter, "background_thread"); emitter_json_kv(emitter, "num_threads", emitter_type_size, @@ -1308,9 +1765,11 @@ stats_print_helper(emitter_t *emitter, bool merged, bool destroyed, CTL_M2_GET("stats.arenas.0.uptime", 0, &uptime, uint64_t); + size_t stats_mutexes_mib[CTL_MAX_DEPTH]; + CTL_LEAF_PREPARE(stats_mutexes_mib, 0, "stats.mutexes"); for (int i = 0; i < mutex_prof_num_global_mutexes; i++) { - mutex_stats_read_global(global_mutex_names[i], &name, - col64, col32, uptime); + mutex_stats_read_global(stats_mutexes_mib, 2, + global_mutex_names[i], &name, col64, col32, uptime); emitter_json_object_kv_begin(emitter, global_mutex_names[i]); mutex_stats_emit(emitter, &row, col64, col32); emitter_json_object_end(emitter); @@ -1355,7 +1814,7 @@ stats_print_helper(emitter_t *emitter, bool merged, bool destroyed, emitter_table_printf(emitter, "Merged arenas stats:\n"); emitter_json_object_kv_begin(emitter, "merged"); stats_arena_print(emitter, MALLCTL_ARENAS_ALL, bins, - large, mutex, extents); + large, mutex, extents, hpa); emitter_json_object_end(emitter); /* Close "merged". */ } @@ -1366,7 +1825,7 @@ stats_print_helper(emitter_t *emitter, bool merged, bool destroyed, "Destroyed arenas stats:\n"); emitter_json_object_kv_begin(emitter, "destroyed"); stats_arena_print(emitter, MALLCTL_ARENAS_DESTROYED, - bins, large, mutex, extents); + bins, large, mutex, extents, hpa); emitter_json_object_end(emitter); /* Close "destroyed". */ } @@ -1382,7 +1841,7 @@ stats_print_helper(emitter_t *emitter, bool merged, bool destroyed, emitter_table_printf(emitter, "arenas[%s]:\n", arena_ind_str); stats_arena_print(emitter, i, bins, - large, mutex, extents); + large, mutex, extents, hpa); /* Close "". */ emitter_json_object_end(emitter); } @@ -1393,8 +1852,7 @@ stats_print_helper(emitter_t *emitter, bool merged, bool destroyed, } void -stats_print(void (*write_cb)(void *, const char *), void *cbopaque, - const char *opts) { +stats_print(write_cb_t *write_cb, void *cbopaque, const char *opts) { int err; uint64_t epoch; size_t u64sz; @@ -1437,8 +1895,8 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque, emitter_t emitter; emitter_init(&emitter, - json ? emitter_output_json : emitter_output_table, write_cb, - cbopaque); + json ? emitter_output_json_compact : emitter_output_table, + write_cb, cbopaque); emitter_begin(&emitter); emitter_table_printf(&emitter, "___ Begin jemalloc statistics ___\n"); emitter_json_object_kv_begin(&emitter, "jemalloc"); @@ -1448,10 +1906,68 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque, } if (config_stats) { stats_print_helper(&emitter, merged, destroyed, unmerged, - bins, large, mutex, extents); + bins, large, mutex, extents, hpa); } emitter_json_object_end(&emitter); /* Closes the "jemalloc" dict. */ emitter_table_printf(&emitter, "--- End jemalloc statistics ---\n"); emitter_end(&emitter); } + +uint64_t +stats_interval_new_event_wait(tsd_t *tsd) { + return stats_interval_accum_batch; +} + +uint64_t +stats_interval_postponed_event_wait(tsd_t *tsd) { + return TE_MIN_START_WAIT; +} + +void +stats_interval_event_handler(tsd_t *tsd, uint64_t elapsed) { + assert(elapsed > 0 && elapsed != TE_INVALID_ELAPSED); + if (counter_accum(tsd_tsdn(tsd), &stats_interval_accumulated, + elapsed)) { + je_malloc_stats_print(NULL, NULL, opt_stats_interval_opts); + } +} + +bool +stats_boot(void) { + uint64_t stats_interval; + if (opt_stats_interval < 0) { + assert(opt_stats_interval == -1); + stats_interval = 0; + stats_interval_accum_batch = 0; + } else{ + /* See comments in stats.h */ + stats_interval = (opt_stats_interval > 0) ? + opt_stats_interval : 1; + uint64_t batch = stats_interval >> + STATS_INTERVAL_ACCUM_LG_BATCH_SIZE; + if (batch > STATS_INTERVAL_ACCUM_BATCH_MAX) { + batch = STATS_INTERVAL_ACCUM_BATCH_MAX; + } else if (batch == 0) { + batch = 1; + } + stats_interval_accum_batch = batch; + } + + return counter_accum_init(&stats_interval_accumulated, stats_interval); +} + +void +stats_prefork(tsdn_t *tsdn) { + counter_prefork(tsdn, &stats_interval_accumulated); +} + +void +stats_postfork_parent(tsdn_t *tsdn) { + counter_postfork_parent(tsdn, &stats_interval_accumulated); +} + +void +stats_postfork_child(tsdn_t *tsdn) { + counter_postfork_child(tsdn, &stats_interval_accumulated); +} diff --git a/contrib/jemalloc/src/sz.c b/contrib/jemalloc/src/sz.c index 8633fb05005e3c..d3115dda7c96de 100644 --- a/contrib/jemalloc/src/sz.c +++ b/contrib/jemalloc/src/sz.c @@ -1,8 +1,57 @@ #include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/sz.h" JEMALLOC_ALIGNED(CACHELINE) size_t sz_pind2sz_tab[SC_NPSIZES+1]; +size_t sz_large_pad; + +size_t +sz_psz_quantize_floor(size_t size) { + size_t ret; + pszind_t pind; + + assert(size > 0); + assert((size & PAGE_MASK) == 0); + + pind = sz_psz2ind(size - sz_large_pad + 1); + if (pind == 0) { + /* + * Avoid underflow. This short-circuit would also do the right + * thing for all sizes in the range for which there are + * PAGE-spaced size classes, but it's simplest to just handle + * the one case that would cause erroneous results. + */ + return size; + } + ret = sz_pind2sz(pind - 1) + sz_large_pad; + assert(ret <= size); + return ret; +} + +size_t +sz_psz_quantize_ceil(size_t size) { + size_t ret; + + assert(size > 0); + assert(size - sz_large_pad <= SC_LARGE_MAXCLASS); + assert((size & PAGE_MASK) == 0); + + ret = sz_psz_quantize_floor(size); + if (ret < size) { + /* + * Skip a quantization that may have an adequately large extent, + * because under-sized extents may be mixed in. This only + * happens when an unusual size is requested, i.e. for aligned + * allocation, and is just one of several places where linear + * search would potentially find sufficiently aligned available + * memory somewhere lower. + */ + ret = sz_pind2sz(sz_psz2ind(ret - sz_large_pad + 1)) + + sz_large_pad; + } + return ret; +} static void sz_boot_pind2sz_tab(const sc_data_t *sc_data) { @@ -57,7 +106,8 @@ sz_boot_size2index_tab(const sc_data_t *sc_data) { } void -sz_boot(const sc_data_t *sc_data) { +sz_boot(const sc_data_t *sc_data, bool cache_oblivious) { + sz_large_pad = cache_oblivious ? PAGE : 0; sz_boot_pind2sz_tab(sc_data); sz_boot_index2size_tab(sc_data); sz_boot_size2index_tab(sc_data); diff --git a/contrib/jemalloc/src/tcache.c b/contrib/jemalloc/src/tcache.c index 50099a9f2cdcda..fa16732e4abfaa 100644 --- a/contrib/jemalloc/src/tcache.c +++ b/contrib/jemalloc/src/tcache.c @@ -1,22 +1,71 @@ -#define JEMALLOC_TCACHE_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/assert.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/safety_check.h" +#include "jemalloc/internal/san.h" #include "jemalloc/internal/sc.h" /******************************************************************************/ /* Data. */ -bool opt_tcache = true; -ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT; +bool opt_tcache = true; + +/* tcache_maxclass is set to 32KB by default. */ +size_t opt_tcache_max = ((size_t)1) << 15; + +/* Reasonable defaults for min and max values. */ +unsigned opt_tcache_nslots_small_min = 20; +unsigned opt_tcache_nslots_small_max = 200; +unsigned opt_tcache_nslots_large = 20; + +/* + * We attempt to make the number of slots in a tcache bin for a given size class + * equal to the number of objects in a slab times some multiplier. By default, + * the multiplier is 2 (i.e. we set the maximum number of objects in the tcache + * to twice the number of objects in a slab). + * This is bounded by some other constraints as well, like the fact that it + * must be even, must be less than opt_tcache_nslots_small_max, etc.. + */ +ssize_t opt_lg_tcache_nslots_mul = 1; + +/* + * Number of allocation bytes between tcache incremental GCs. Again, this + * default just seems to work well; more tuning is possible. + */ +size_t opt_tcache_gc_incr_bytes = 65536; + +/* + * With default settings, we may end up flushing small bins frequently with + * small flush amounts. To limit this tendency, we can set a number of bytes to + * "delay" by. If we try to flush N M-byte items, we decrease that size-class's + * delay by N * M. So, if delay is 1024 and we're looking at the 64-byte size + * class, we won't do any flushing until we've been asked to flush 1024/64 == 16 + * items. This can happen in any configuration (i.e. being asked to flush 16 + * items once, or 4 items 4 times). + * + * Practically, this is stored as a count of items in a uint8_t, so the + * effective maximum value for a size class is 255 * sz. + */ +size_t opt_tcache_gc_delay_bytes = 0; + +/* + * When a cache bin is flushed because it's full, how much of it do we flush? + * By default, we flush half the maximum number of items. + */ +unsigned opt_lg_tcache_flush_small_div = 1; +unsigned opt_lg_tcache_flush_large_div = 1; cache_bin_info_t *tcache_bin_info; -static unsigned stack_nelms; /* Total stack elms per tcache. */ +/* Total stack size required (per tcache). Include the padding above. */ +static size_t tcache_bin_alloc_size; +static size_t tcache_bin_alloc_alignment; + +/* Number of cache bins enabled, including both large and small. */ unsigned nhbins; +/* Max size class to be cached (can be small or large). */ size_t tcache_maxclass; tcaches_t *tcaches; @@ -37,358 +86,551 @@ tcache_salloc(tsdn_t *tsdn, const void *ptr) { return arena_salloc(tsdn, ptr); } -void -tcache_event_hard(tsd_t *tsd, tcache_t *tcache) { - szind_t binind = tcache->next_gc_bin; +uint64_t +tcache_gc_new_event_wait(tsd_t *tsd) { + return opt_tcache_gc_incr_bytes; +} + +uint64_t +tcache_gc_postponed_event_wait(tsd_t *tsd) { + return TE_MIN_START_WAIT; +} + +uint64_t +tcache_gc_dalloc_new_event_wait(tsd_t *tsd) { + return opt_tcache_gc_incr_bytes; +} - cache_bin_t *tbin; - if (binind < SC_NBINS) { - tbin = tcache_small_bin_get(tcache, binind); +uint64_t +tcache_gc_dalloc_postponed_event_wait(tsd_t *tsd) { + return TE_MIN_START_WAIT; +} + +static uint8_t +tcache_gc_item_delay_compute(szind_t szind) { + assert(szind < SC_NBINS); + size_t sz = sz_index2size(szind); + size_t item_delay = opt_tcache_gc_delay_bytes / sz; + size_t delay_max = ZU(1) + << (sizeof(((tcache_slow_t *)NULL)->bin_flush_delay_items[0]) * 8); + if (item_delay >= delay_max) { + item_delay = delay_max - 1; + } + return (uint8_t)item_delay; +} + +static void +tcache_gc_small(tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache, + szind_t szind) { + /* Aim to flush 3/4 of items below low-water. */ + assert(szind < SC_NBINS); + + cache_bin_t *cache_bin = &tcache->bins[szind]; + cache_bin_sz_t ncached = cache_bin_ncached_get_local(cache_bin, + &tcache_bin_info[szind]); + cache_bin_sz_t low_water = cache_bin_low_water_get(cache_bin, + &tcache_bin_info[szind]); + assert(!tcache_slow->bin_refilled[szind]); + + size_t nflush = low_water - (low_water >> 2); + if (nflush < tcache_slow->bin_flush_delay_items[szind]) { + /* Workaround for a conversion warning. */ + uint8_t nflush_uint8 = (uint8_t)nflush; + assert(sizeof(tcache_slow->bin_flush_delay_items[0]) == + sizeof(nflush_uint8)); + tcache_slow->bin_flush_delay_items[szind] -= nflush_uint8; + return; } else { - tbin = tcache_large_bin_get(tcache, binind); + tcache_slow->bin_flush_delay_items[szind] + = tcache_gc_item_delay_compute(szind); } - if (tbin->low_water > 0) { - /* - * Flush (ceiling) 3/4 of the objects below the low water mark. - */ - if (binind < SC_NBINS) { - tcache_bin_flush_small(tsd, tcache, tbin, binind, - tbin->ncached - tbin->low_water + (tbin->low_water - >> 2)); - /* - * Reduce fill count by 2X. Limit lg_fill_div such that - * the fill count is always at least 1. - */ - cache_bin_info_t *tbin_info = &tcache_bin_info[binind]; - if ((tbin_info->ncached_max >> - (tcache->lg_fill_div[binind] + 1)) >= 1) { - tcache->lg_fill_div[binind]++; - } + + tcache_bin_flush_small(tsd, tcache, cache_bin, szind, + (unsigned)(ncached - nflush)); + + /* + * Reduce fill count by 2X. Limit lg_fill_div such that + * the fill count is always at least 1. + */ + if ((cache_bin_info_ncached_max(&tcache_bin_info[szind]) + >> (tcache_slow->lg_fill_div[szind] + 1)) >= 1) { + tcache_slow->lg_fill_div[szind]++; + } +} + +static void +tcache_gc_large(tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache, + szind_t szind) { + /* Like the small GC; flush 3/4 of untouched items. */ + assert(szind >= SC_NBINS); + cache_bin_t *cache_bin = &tcache->bins[szind]; + cache_bin_sz_t ncached = cache_bin_ncached_get_local(cache_bin, + &tcache_bin_info[szind]); + cache_bin_sz_t low_water = cache_bin_low_water_get(cache_bin, + &tcache_bin_info[szind]); + tcache_bin_flush_large(tsd, tcache, cache_bin, szind, + (unsigned)(ncached - low_water + (low_water >> 2))); +} + +static void +tcache_event(tsd_t *tsd) { + tcache_t *tcache = tcache_get(tsd); + if (tcache == NULL) { + return; + } + + tcache_slow_t *tcache_slow = tsd_tcache_slowp_get(tsd); + szind_t szind = tcache_slow->next_gc_bin; + bool is_small = (szind < SC_NBINS); + cache_bin_t *cache_bin = &tcache->bins[szind]; + + tcache_bin_flush_stashed(tsd, tcache, cache_bin, szind, is_small); + + cache_bin_sz_t low_water = cache_bin_low_water_get(cache_bin, + &tcache_bin_info[szind]); + if (low_water > 0) { + if (is_small) { + tcache_gc_small(tsd, tcache_slow, tcache, szind); } else { - tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached - - tbin->low_water + (tbin->low_water >> 2), tcache); + tcache_gc_large(tsd, tcache_slow, tcache, szind); } - } else if (tbin->low_water < 0) { + } else if (is_small && tcache_slow->bin_refilled[szind]) { + assert(low_water == 0); /* * Increase fill count by 2X for small bins. Make sure * lg_fill_div stays greater than 0. */ - if (binind < SC_NBINS && tcache->lg_fill_div[binind] > 1) { - tcache->lg_fill_div[binind]--; + if (tcache_slow->lg_fill_div[szind] > 1) { + tcache_slow->lg_fill_div[szind]--; } + tcache_slow->bin_refilled[szind] = false; } - tbin->low_water = tbin->ncached; + cache_bin_low_water_set(cache_bin); - tcache->next_gc_bin++; - if (tcache->next_gc_bin == nhbins) { - tcache->next_gc_bin = 0; + tcache_slow->next_gc_bin++; + if (tcache_slow->next_gc_bin == nhbins) { + tcache_slow->next_gc_bin = 0; } } +void +tcache_gc_event_handler(tsd_t *tsd, uint64_t elapsed) { + assert(elapsed == TE_INVALID_ELAPSED); + tcache_event(tsd); +} + +void +tcache_gc_dalloc_event_handler(tsd_t *tsd, uint64_t elapsed) { + assert(elapsed == TE_INVALID_ELAPSED); + tcache_event(tsd); +} + void * -tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, - cache_bin_t *tbin, szind_t binind, bool *tcache_success) { +tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, + tcache_t *tcache, cache_bin_t *cache_bin, szind_t binind, + bool *tcache_success) { + tcache_slow_t *tcache_slow = tcache->tcache_slow; void *ret; - assert(tcache->arena != NULL); - arena_tcache_fill_small(tsdn, arena, tcache, tbin, binind, - config_prof ? tcache->prof_accumbytes : 0); - if (config_prof) { - tcache->prof_accumbytes = 0; - } - ret = cache_bin_alloc_easy(tbin, tcache_success); + assert(tcache_slow->arena != NULL); + unsigned nfill = cache_bin_info_ncached_max(&tcache_bin_info[binind]) + >> tcache_slow->lg_fill_div[binind]; + arena_cache_bin_fill_small(tsdn, arena, cache_bin, + &tcache_bin_info[binind], binind, nfill); + tcache_slow->bin_refilled[binind] = true; + ret = cache_bin_alloc(cache_bin, tcache_success); return ret; } -/* Enabled with --enable-extra-size-check. */ +static const void * +tcache_bin_flush_ptr_getter(void *arr_ctx, size_t ind) { + cache_bin_ptr_array_t *arr = (cache_bin_ptr_array_t *)arr_ctx; + return arr->ptr[ind]; +} + static void -tbin_extents_lookup_size_check(tsdn_t *tsdn, cache_bin_t *tbin, szind_t binind, - size_t nflush, extent_t **extents){ - rtree_ctx_t rtree_ctx_fallback; - rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); +tcache_bin_flush_metadata_visitor(void *szind_sum_ctx, + emap_full_alloc_ctx_t *alloc_ctx) { + size_t *szind_sum = (size_t *)szind_sum_ctx; + *szind_sum -= alloc_ctx->szind; + util_prefetch_write_range(alloc_ctx->edata, sizeof(edata_t)); +} - /* - * Verify that the items in the tcache all have the correct size; this - * is useful for catching sized deallocation bugs, also to fail early - * instead of corrupting metadata. Since this can be turned on for opt - * builds, avoid the branch in the loop. - */ - szind_t szind; - size_t sz_sum = binind * nflush; - for (unsigned i = 0 ; i < nflush; i++) { - rtree_extent_szind_read(tsdn, &extents_rtree, - rtree_ctx, (uintptr_t)*(tbin->avail - 1 - i), true, - &extents[i], &szind); - sz_sum -= szind; - } - if (sz_sum != 0) { - safety_check_fail(": size mismatch in thread cache " - "detected, likely caused by sized deallocation bugs by " - "application. Abort.\n"); - abort(); +JEMALLOC_NOINLINE static void +tcache_bin_flush_size_check_fail(cache_bin_ptr_array_t *arr, szind_t szind, + size_t nptrs, emap_batch_lookup_result_t *edatas) { + bool found_mismatch = false; + for (size_t i = 0; i < nptrs; i++) { + szind_t true_szind = edata_szind_get(edatas[i].edata); + if (true_szind != szind) { + found_mismatch = true; + safety_check_fail_sized_dealloc( + /* current_dealloc */ false, + /* ptr */ tcache_bin_flush_ptr_getter(arr, i), + /* true_size */ sz_index2size(true_szind), + /* input_size */ sz_index2size(szind)); + } } + assert(found_mismatch); } -void -tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin, - szind_t binind, unsigned rem) { - bool merged_stats = false; - - assert(binind < SC_NBINS); - assert((cache_bin_sz_t)rem <= tbin->ncached); +static void +tcache_bin_flush_edatas_lookup(tsd_t *tsd, cache_bin_ptr_array_t *arr, + szind_t binind, size_t nflush, emap_batch_lookup_result_t *edatas) { - arena_t *arena = tcache->arena; - assert(arena != NULL); - unsigned nflush = tbin->ncached - rem; - VARIABLE_ARRAY(extent_t *, item_extent, nflush); + /* + * This gets compiled away when config_opt_safety_checks is false. + * Checks for sized deallocation bugs, failing early rather than + * corrupting metadata. + */ + size_t szind_sum = binind * nflush; + emap_edata_lookup_batch(tsd, &arena_emap_global, nflush, + &tcache_bin_flush_ptr_getter, (void *)arr, + &tcache_bin_flush_metadata_visitor, (void *)&szind_sum, + edatas); + if (config_opt_safety_checks && unlikely(szind_sum != 0)) { + tcache_bin_flush_size_check_fail(arr, binind, nflush, edatas); + } +} - /* Look up extent once per item. */ - if (config_opt_safety_checks) { - tbin_extents_lookup_size_check(tsd_tsdn(tsd), tbin, binind, - nflush, item_extent); +JEMALLOC_ALWAYS_INLINE bool +tcache_bin_flush_match(edata_t *edata, unsigned cur_arena_ind, + unsigned cur_binshard, bool small) { + if (small) { + return edata_arena_ind_get(edata) == cur_arena_ind + && edata_binshard_get(edata) == cur_binshard; } else { - for (unsigned i = 0 ; i < nflush; i++) { - item_extent[i] = iealloc(tsd_tsdn(tsd), - *(tbin->avail - 1 - i)); - } + return edata_arena_ind_get(edata) == cur_arena_ind; } - while (nflush > 0) { - /* Lock the arena bin associated with the first object. */ - extent_t *extent = item_extent[0]; - unsigned bin_arena_ind = extent_arena_ind_get(extent); - arena_t *bin_arena = arena_get(tsd_tsdn(tsd), bin_arena_ind, - false); - unsigned binshard = extent_binshard_get(extent); - assert(binshard < bin_infos[binind].n_shards); - bin_t *bin = &bin_arena->bins[binind].bin_shards[binshard]; - - if (config_prof && bin_arena == arena) { - if (arena_prof_accum(tsd_tsdn(tsd), arena, - tcache->prof_accumbytes)) { - prof_idump(tsd_tsdn(tsd)); - } - tcache->prof_accumbytes = 0; - } +} - malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); - if (config_stats && bin_arena == arena && !merged_stats) { - merged_stats = true; - bin->stats.nflushes++; - bin->stats.nrequests += tbin->tstats.nrequests; - tbin->tstats.nrequests = 0; - } - unsigned ndeferred = 0; - for (unsigned i = 0; i < nflush; i++) { - void *ptr = *(tbin->avail - 1 - i); - extent = item_extent[i]; - assert(ptr != NULL && extent != NULL); - - if (extent_arena_ind_get(extent) == bin_arena_ind - && extent_binshard_get(extent) == binshard) { - arena_dalloc_bin_junked_locked(tsd_tsdn(tsd), - bin_arena, bin, binind, extent, ptr); - } else { - /* - * This object was allocated via a different - * arena bin than the one that is currently - * locked. Stash the object, so that it can be - * handled in a future pass. - */ - *(tbin->avail - 1 - ndeferred) = ptr; - item_extent[ndeferred] = extent; - ndeferred++; - } - } - malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); - arena_decay_ticks(tsd_tsdn(tsd), bin_arena, nflush - ndeferred); - nflush = ndeferred; - } - if (config_stats && !merged_stats) { - /* - * The flush loop didn't happen to flush to this thread's - * arena, so the stats didn't get merged. Manually do so now. - */ - unsigned binshard; - bin_t *bin = arena_bin_choose_lock(tsd_tsdn(tsd), arena, binind, - &binshard); - bin->stats.nflushes++; - bin->stats.nrequests += tbin->tstats.nrequests; - tbin->tstats.nrequests = 0; - malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); - } +JEMALLOC_ALWAYS_INLINE void +tcache_bin_flush_impl(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin, + szind_t binind, cache_bin_ptr_array_t *ptrs, unsigned nflush, bool small) { + tcache_slow_t *tcache_slow = tcache->tcache_slow; + /* + * A couple lookup calls take tsdn; declare it once for convenience + * instead of calling tsd_tsdn(tsd) all the time. + */ + tsdn_t *tsdn = tsd_tsdn(tsd); - memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem * - sizeof(void *)); - tbin->ncached = rem; - if (tbin->ncached < tbin->low_water) { - tbin->low_water = tbin->ncached; + if (small) { + assert(binind < SC_NBINS); + } else { + assert(binind < nhbins); } -} + arena_t *tcache_arena = tcache_slow->arena; + assert(tcache_arena != NULL); -void -tcache_bin_flush_large(tsd_t *tsd, cache_bin_t *tbin, szind_t binind, - unsigned rem, tcache_t *tcache) { - bool merged_stats = false; + /* + * Variable length array must have > 0 length; the last element is never + * touched (it's just included to satisfy the no-zero-length rule). + */ + VARIABLE_ARRAY(emap_batch_lookup_result_t, item_edata, nflush + 1); + tcache_bin_flush_edatas_lookup(tsd, ptrs, binind, nflush, item_edata); - assert(binind < nhbins); - assert((cache_bin_sz_t)rem <= tbin->ncached); + /* + * The slabs where we freed the last remaining object in the slab (and + * so need to free the slab itself). + * Used only if small == true. + */ + unsigned dalloc_count = 0; + VARIABLE_ARRAY(edata_t *, dalloc_slabs, nflush + 1); - arena_t *tcache_arena = tcache->arena; - assert(tcache_arena != NULL); - unsigned nflush = tbin->ncached - rem; - VARIABLE_ARRAY(extent_t *, item_extent, nflush); - -#ifndef JEMALLOC_EXTRA_SIZE_CHECK - /* Look up extent once per item. */ - for (unsigned i = 0 ; i < nflush; i++) { - item_extent[i] = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1 - i)); - } -#else - tbin_extents_lookup_size_check(tsd_tsdn(tsd), tbin, binind, nflush, - item_extent); -#endif + /* + * We're about to grab a bunch of locks. If one of them happens to be + * the one guarding the arena-level stats counters we flush our + * thread-local ones to, we do so under one critical section. + */ + bool merged_stats = false; while (nflush > 0) { - /* Lock the arena associated with the first object. */ - extent_t *extent = item_extent[0]; - unsigned locked_arena_ind = extent_arena_ind_get(extent); - arena_t *locked_arena = arena_get(tsd_tsdn(tsd), - locked_arena_ind, false); - bool idump; - - if (config_prof) { - idump = false; + /* Lock the arena, or bin, associated with the first object. */ + edata_t *edata = item_edata[0].edata; + unsigned cur_arena_ind = edata_arena_ind_get(edata); + arena_t *cur_arena = arena_get(tsdn, cur_arena_ind, false); + + /* + * These assignments are always overwritten when small is true, + * and their values are always ignored when small is false, but + * to avoid the technical UB when we pass them as parameters, we + * need to intialize them. + */ + unsigned cur_binshard = 0; + bin_t *cur_bin = NULL; + if (small) { + cur_binshard = edata_binshard_get(edata); + cur_bin = arena_get_bin(cur_arena, binind, + cur_binshard); + assert(cur_binshard < bin_infos[binind].n_shards); + /* + * If you're looking at profiles, you might think this + * is a good place to prefetch the bin stats, which are + * often a cache miss. This turns out not to be + * helpful on the workloads we've looked at, with moving + * the bin stats next to the lock seeming to do better. + */ } - bool lock_large = !arena_is_auto(locked_arena); - if (lock_large) { - malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->large_mtx); + if (small) { + malloc_mutex_lock(tsdn, &cur_bin->lock); } - for (unsigned i = 0; i < nflush; i++) { - void *ptr = *(tbin->avail - 1 - i); - assert(ptr != NULL); - extent = item_extent[i]; - if (extent_arena_ind_get(extent) == locked_arena_ind) { - large_dalloc_prep_junked_locked(tsd_tsdn(tsd), - extent); - } + if (!small && !arena_is_auto(cur_arena)) { + malloc_mutex_lock(tsdn, &cur_arena->large_mtx); } - if ((config_prof || config_stats) && - (locked_arena == tcache_arena)) { - if (config_prof) { - idump = arena_prof_accum(tsd_tsdn(tsd), - tcache_arena, tcache->prof_accumbytes); - tcache->prof_accumbytes = 0; + + /* + * If we acquired the right lock and have some stats to flush, + * flush them. + */ + if (config_stats && tcache_arena == cur_arena + && !merged_stats) { + merged_stats = true; + if (small) { + cur_bin->stats.nflushes++; + cur_bin->stats.nrequests += + cache_bin->tstats.nrequests; + cache_bin->tstats.nrequests = 0; + } else { + arena_stats_large_flush_nrequests_add(tsdn, + &tcache_arena->stats, binind, + cache_bin->tstats.nrequests); + cache_bin->tstats.nrequests = 0; } - if (config_stats) { - merged_stats = true; - arena_stats_large_flush_nrequests_add( - tsd_tsdn(tsd), &tcache_arena->stats, binind, - tbin->tstats.nrequests); - tbin->tstats.nrequests = 0; + } + + /* + * Large allocations need special prep done. Afterwards, we can + * drop the large lock. + */ + if (!small) { + for (unsigned i = 0; i < nflush; i++) { + void *ptr = ptrs->ptr[i]; + edata = item_edata[i].edata; + assert(ptr != NULL && edata != NULL); + + if (tcache_bin_flush_match(edata, cur_arena_ind, + cur_binshard, small)) { + large_dalloc_prep_locked(tsdn, + edata); + } } } - if (lock_large) { - malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->large_mtx); + if (!small && !arena_is_auto(cur_arena)) { + malloc_mutex_unlock(tsdn, &cur_arena->large_mtx); } + /* Deallocate whatever we can. */ unsigned ndeferred = 0; + /* Init only to avoid used-uninitialized warning. */ + arena_dalloc_bin_locked_info_t dalloc_bin_info = {0}; + if (small) { + arena_dalloc_bin_locked_begin(&dalloc_bin_info, binind); + } for (unsigned i = 0; i < nflush; i++) { - void *ptr = *(tbin->avail - 1 - i); - extent = item_extent[i]; - assert(ptr != NULL && extent != NULL); - - if (extent_arena_ind_get(extent) == locked_arena_ind) { - large_dalloc_finish(tsd_tsdn(tsd), extent); - } else { + void *ptr = ptrs->ptr[i]; + edata = item_edata[i].edata; + assert(ptr != NULL && edata != NULL); + if (!tcache_bin_flush_match(edata, cur_arena_ind, + cur_binshard, small)) { /* - * This object was allocated via a different - * arena than the one that is currently locked. - * Stash the object, so that it can be handled - * in a future pass. + * The object was allocated either via a + * different arena, or a different bin in this + * arena. Either way, stash the object so that + * it can be handled in a future pass. */ - *(tbin->avail - 1 - ndeferred) = ptr; - item_extent[ndeferred] = extent; + ptrs->ptr[ndeferred] = ptr; + item_edata[ndeferred].edata = edata; ndeferred++; + continue; + } + if (small) { + if (arena_dalloc_bin_locked_step(tsdn, + cur_arena, cur_bin, &dalloc_bin_info, + binind, edata, ptr)) { + dalloc_slabs[dalloc_count] = edata; + dalloc_count++; + } + } else { + if (large_dalloc_safety_checks(edata, ptr, + binind)) { + /* See the comment in isfree. */ + continue; + } + large_dalloc_finish(tsdn, edata); } } - if (config_prof && idump) { - prof_idump(tsd_tsdn(tsd)); + + if (small) { + arena_dalloc_bin_locked_finish(tsdn, cur_arena, cur_bin, + &dalloc_bin_info); + malloc_mutex_unlock(tsdn, &cur_bin->lock); } - arena_decay_ticks(tsd_tsdn(tsd), locked_arena, nflush - - ndeferred); + arena_decay_ticks(tsdn, cur_arena, nflush - ndeferred); nflush = ndeferred; } + + /* Handle all deferred slab dalloc. */ + assert(small || dalloc_count == 0); + for (unsigned i = 0; i < dalloc_count; i++) { + edata_t *slab = dalloc_slabs[i]; + arena_slab_dalloc(tsdn, arena_get_from_edata(slab), slab); + + } + if (config_stats && !merged_stats) { - /* - * The flush loop didn't happen to flush to this thread's - * arena, so the stats didn't get merged. Manually do so now. - */ - arena_stats_large_flush_nrequests_add(tsd_tsdn(tsd), - &tcache_arena->stats, binind, tbin->tstats.nrequests); - tbin->tstats.nrequests = 0; + if (small) { + /* + * The flush loop didn't happen to flush to this + * thread's arena, so the stats didn't get merged. + * Manually do so now. + */ + bin_t *bin = arena_bin_choose(tsdn, tcache_arena, + binind, NULL); + malloc_mutex_lock(tsdn, &bin->lock); + bin->stats.nflushes++; + bin->stats.nrequests += cache_bin->tstats.nrequests; + cache_bin->tstats.nrequests = 0; + malloc_mutex_unlock(tsdn, &bin->lock); + } else { + arena_stats_large_flush_nrequests_add(tsdn, + &tcache_arena->stats, binind, + cache_bin->tstats.nrequests); + cache_bin->tstats.nrequests = 0; + } } - memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem * - sizeof(void *)); - tbin->ncached = rem; - if (tbin->ncached < tbin->low_water) { - tbin->low_water = tbin->ncached; +} + +JEMALLOC_ALWAYS_INLINE void +tcache_bin_flush_bottom(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin, + szind_t binind, unsigned rem, bool small) { + tcache_bin_flush_stashed(tsd, tcache, cache_bin, binind, small); + + cache_bin_sz_t ncached = cache_bin_ncached_get_local(cache_bin, + &tcache_bin_info[binind]); + assert((cache_bin_sz_t)rem <= ncached); + unsigned nflush = ncached - rem; + + CACHE_BIN_PTR_ARRAY_DECLARE(ptrs, nflush); + cache_bin_init_ptr_array_for_flush(cache_bin, &tcache_bin_info[binind], + &ptrs, nflush); + + tcache_bin_flush_impl(tsd, tcache, cache_bin, binind, &ptrs, nflush, + small); + + cache_bin_finish_flush(cache_bin, &tcache_bin_info[binind], &ptrs, + ncached - rem); +} + +void +tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin, + szind_t binind, unsigned rem) { + tcache_bin_flush_bottom(tsd, tcache, cache_bin, binind, rem, true); +} + +void +tcache_bin_flush_large(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin, + szind_t binind, unsigned rem) { + tcache_bin_flush_bottom(tsd, tcache, cache_bin, binind, rem, false); +} + +/* + * Flushing stashed happens when 1) tcache fill, 2) tcache flush, or 3) tcache + * GC event. This makes sure that the stashed items do not hold memory for too + * long, and new buffers can only be allocated when nothing is stashed. + * + * The downside is, the time between stash and flush may be relatively short, + * especially when the request rate is high. It lowers the chance of detecting + * write-after-free -- however that is a delayed detection anyway, and is less + * of a focus than the memory overhead. + */ +void +tcache_bin_flush_stashed(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin, + szind_t binind, bool is_small) { + cache_bin_info_t *info = &tcache_bin_info[binind]; + /* + * The two below are for assertion only. The content of original cached + * items remain unchanged -- the stashed items reside on the other end + * of the stack. Checking the stack head and ncached to verify. + */ + void *head_content = *cache_bin->stack_head; + cache_bin_sz_t orig_cached = cache_bin_ncached_get_local(cache_bin, + info); + + cache_bin_sz_t nstashed = cache_bin_nstashed_get_local(cache_bin, info); + assert(orig_cached + nstashed <= cache_bin_info_ncached_max(info)); + if (nstashed == 0) { + return; } + + CACHE_BIN_PTR_ARRAY_DECLARE(ptrs, nstashed); + cache_bin_init_ptr_array_for_stashed(cache_bin, binind, info, &ptrs, + nstashed); + san_check_stashed_ptrs(ptrs.ptr, nstashed, sz_index2size(binind)); + tcache_bin_flush_impl(tsd, tcache, cache_bin, binind, &ptrs, nstashed, + is_small); + cache_bin_finish_flush_stashed(cache_bin, info); + + assert(cache_bin_nstashed_get_local(cache_bin, info) == 0); + assert(cache_bin_ncached_get_local(cache_bin, info) == orig_cached); + assert(head_content == *cache_bin->stack_head); } void -tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { - assert(tcache->arena == NULL); - tcache->arena = arena; +tcache_arena_associate(tsdn_t *tsdn, tcache_slow_t *tcache_slow, + tcache_t *tcache, arena_t *arena) { + assert(tcache_slow->arena == NULL); + tcache_slow->arena = arena; if (config_stats) { /* Link into list of extant tcaches. */ malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); - ql_elm_new(tcache, link); - ql_tail_insert(&arena->tcache_ql, tcache, link); + ql_elm_new(tcache_slow, link); + ql_tail_insert(&arena->tcache_ql, tcache_slow, link); cache_bin_array_descriptor_init( - &tcache->cache_bin_array_descriptor, tcache->bins_small, - tcache->bins_large); + &tcache_slow->cache_bin_array_descriptor, tcache->bins); ql_tail_insert(&arena->cache_bin_array_descriptor_ql, - &tcache->cache_bin_array_descriptor, link); + &tcache_slow->cache_bin_array_descriptor, link); malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx); } } static void -tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache) { - arena_t *arena = tcache->arena; +tcache_arena_dissociate(tsdn_t *tsdn, tcache_slow_t *tcache_slow, + tcache_t *tcache) { + arena_t *arena = tcache_slow->arena; assert(arena != NULL); if (config_stats) { /* Unlink from list of extant tcaches. */ malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); if (config_debug) { bool in_ql = false; - tcache_t *iter; + tcache_slow_t *iter; ql_foreach(iter, &arena->tcache_ql, link) { - if (iter == tcache) { + if (iter == tcache_slow) { in_ql = true; break; } } assert(in_ql); } - ql_remove(&arena->tcache_ql, tcache, link); + ql_remove(&arena->tcache_ql, tcache_slow, link); ql_remove(&arena->cache_bin_array_descriptor_ql, - &tcache->cache_bin_array_descriptor, link); - tcache_stats_merge(tsdn, tcache, arena); + &tcache_slow->cache_bin_array_descriptor, link); + tcache_stats_merge(tsdn, tcache_slow->tcache, arena); malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx); } - tcache->arena = NULL; + tcache_slow->arena = NULL; } void -tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { - tcache_arena_dissociate(tsdn, tcache); - tcache_arena_associate(tsdn, tcache, arena); +tcache_arena_reassociate(tsdn_t *tsdn, tcache_slow_t *tcache_slow, + tcache_t *tcache, arena_t *arena) { + tcache_arena_dissociate(tsdn, tcache_slow, tcache); + tcache_arena_associate(tsdn, tcache_slow, tcache, arena); } bool @@ -405,56 +647,80 @@ tsd_tcache_enabled_data_init(tsd_t *tsd) { return false; } -/* Initialize auto tcache (embedded in TSD). */ static void -tcache_init(tsd_t *tsd, tcache_t *tcache, void *avail_stack) { - memset(&tcache->link, 0, sizeof(ql_elm(tcache_t))); - tcache->prof_accumbytes = 0; - tcache->next_gc_bin = 0; - tcache->arena = NULL; - - ticker_init(&tcache->gc_ticker, TCACHE_GC_INCR); - - size_t stack_offset = 0; - assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0); - memset(tcache->bins_small, 0, sizeof(cache_bin_t) * SC_NBINS); - memset(tcache->bins_large, 0, sizeof(cache_bin_t) * (nhbins - SC_NBINS)); - unsigned i = 0; - for (; i < SC_NBINS; i++) { - tcache->lg_fill_div[i] = 1; - stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *); - /* - * avail points past the available space. Allocations will - * access the slots toward higher addresses (for the benefit of - * prefetch). - */ - tcache_small_bin_get(tcache, i)->avail = - (void **)((uintptr_t)avail_stack + (uintptr_t)stack_offset); +tcache_init(tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache, + void *mem) { + tcache->tcache_slow = tcache_slow; + tcache_slow->tcache = tcache; + + memset(&tcache_slow->link, 0, sizeof(ql_elm(tcache_t))); + tcache_slow->next_gc_bin = 0; + tcache_slow->arena = NULL; + tcache_slow->dyn_alloc = mem; + + /* + * We reserve cache bins for all small size classes, even if some may + * not get used (i.e. bins higher than nhbins). This allows the fast + * and common paths to access cache bin metadata safely w/o worrying + * about which ones are disabled. + */ + unsigned n_reserved_bins = nhbins < SC_NBINS ? SC_NBINS : nhbins; + memset(tcache->bins, 0, sizeof(cache_bin_t) * n_reserved_bins); + + size_t cur_offset = 0; + cache_bin_preincrement(tcache_bin_info, nhbins, mem, + &cur_offset); + for (unsigned i = 0; i < nhbins; i++) { + if (i < SC_NBINS) { + tcache_slow->lg_fill_div[i] = 1; + tcache_slow->bin_refilled[i] = false; + tcache_slow->bin_flush_delay_items[i] + = tcache_gc_item_delay_compute(i); + } + cache_bin_t *cache_bin = &tcache->bins[i]; + cache_bin_init(cache_bin, &tcache_bin_info[i], mem, + &cur_offset); } - for (; i < nhbins; i++) { - stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *); - tcache_large_bin_get(tcache, i)->avail = - (void **)((uintptr_t)avail_stack + (uintptr_t)stack_offset); + /* + * For small size classes beyond tcache_maxclass (i.e. nhbins < NBINS), + * their cache bins are initialized to a state to safely and efficiently + * fail all fastpath alloc / free, so that no additional check around + * nhbins is needed on fastpath. + */ + for (unsigned i = nhbins; i < SC_NBINS; i++) { + /* Disabled small bins. */ + cache_bin_t *cache_bin = &tcache->bins[i]; + void *fake_stack = mem; + size_t fake_offset = 0; + + cache_bin_init(cache_bin, &tcache_bin_info[i], fake_stack, + &fake_offset); + assert(tcache_small_bin_disabled(i, cache_bin)); } - assert(stack_offset == stack_nelms * sizeof(void *)); + + cache_bin_postincrement(tcache_bin_info, nhbins, mem, + &cur_offset); + /* Sanity check that the whole stack is used. */ + assert(cur_offset == tcache_bin_alloc_size); } /* Initialize auto tcache (embedded in TSD). */ bool tsd_tcache_data_init(tsd_t *tsd) { + tcache_slow_t *tcache_slow = tsd_tcache_slowp_get_unsafe(tsd); tcache_t *tcache = tsd_tcachep_get_unsafe(tsd); - assert(tcache_small_bin_get(tcache, 0)->avail == NULL); - size_t size = stack_nelms * sizeof(void *); - /* Avoid false cacheline sharing. */ - size = sz_sa2u(size, CACHELINE); - - void *avail_array = ipallocztm(tsd_tsdn(tsd), size, CACHELINE, true, - NULL, true, arena_get(TSDN_NULL, 0, true)); - if (avail_array == NULL) { + + assert(cache_bin_still_zero_initialized(&tcache->bins[0])); + size_t alignment = tcache_bin_alloc_alignment; + size_t size = sz_sa2u(tcache_bin_alloc_size, alignment); + + void *mem = ipallocztm(tsd_tsdn(tsd), size, alignment, true, NULL, + true, arena_get(TSDN_NULL, 0, true)); + if (mem == NULL) { return true; } - tcache_init(tsd, tcache, avail_array); + tcache_init(tsd, tcache_slow, tcache, mem); /* * Initialization is a bit tricky here. After malloc init is done, all * threads can rely on arena_choose and associate tcache accordingly. @@ -463,20 +729,22 @@ tsd_tcache_data_init(tsd_t *tsd) { * associate its tcache to a0 temporarily, and later on * arena_choose_hard() will re-associate properly. */ - tcache->arena = NULL; + tcache_slow->arena = NULL; arena_t *arena; if (!malloc_initialized()) { /* If in initialization, assign to a0. */ arena = arena_get(tsd_tsdn(tsd), 0, false); - tcache_arena_associate(tsd_tsdn(tsd), tcache, arena); + tcache_arena_associate(tsd_tsdn(tsd), tcache_slow, tcache, + arena); } else { arena = arena_choose(tsd, NULL); /* This may happen if thread.tcache.enabled is used. */ - if (tcache->arena == NULL) { - tcache_arena_associate(tsd_tsdn(tsd), tcache, arena); + if (tcache_slow->arena == NULL) { + tcache_arena_associate(tsd_tsdn(tsd), tcache_slow, + tcache, arena); } } - assert(arena == tcache->arena); + assert(arena == tcache_slow->arena); return false; } @@ -484,56 +752,49 @@ tsd_tcache_data_init(tsd_t *tsd) { /* Created manual tcache for tcache.create mallctl. */ tcache_t * tcache_create_explicit(tsd_t *tsd) { - tcache_t *tcache; - size_t size, stack_offset; - - size = sizeof(tcache_t); + /* + * We place the cache bin stacks, then the tcache_t, then a pointer to + * the beginning of the whole allocation (for freeing). The makes sure + * the cache bins have the requested alignment. + */ + size_t size = tcache_bin_alloc_size + sizeof(tcache_t) + + sizeof(tcache_slow_t); /* Naturally align the pointer stacks. */ size = PTR_CEILING(size); - stack_offset = size; - size += stack_nelms * sizeof(void *); - /* Avoid false cacheline sharing. */ - size = sz_sa2u(size, CACHELINE); + size = sz_sa2u(size, tcache_bin_alloc_alignment); - tcache = ipallocztm(tsd_tsdn(tsd), size, CACHELINE, true, NULL, true, - arena_get(TSDN_NULL, 0, true)); - if (tcache == NULL) { + void *mem = ipallocztm(tsd_tsdn(tsd), size, tcache_bin_alloc_alignment, + true, NULL, true, arena_get(TSDN_NULL, 0, true)); + if (mem == NULL) { return NULL; } + tcache_t *tcache = (void *)((uintptr_t)mem + tcache_bin_alloc_size); + tcache_slow_t *tcache_slow = + (void *)((uintptr_t)mem + tcache_bin_alloc_size + sizeof(tcache_t)); + tcache_init(tsd, tcache_slow, tcache, mem); - tcache_init(tsd, tcache, - (void *)((uintptr_t)tcache + (uintptr_t)stack_offset)); - tcache_arena_associate(tsd_tsdn(tsd), tcache, arena_ichoose(tsd, NULL)); + tcache_arena_associate(tsd_tsdn(tsd), tcache_slow, tcache, + arena_ichoose(tsd, NULL)); return tcache; } static void tcache_flush_cache(tsd_t *tsd, tcache_t *tcache) { - assert(tcache->arena != NULL); - - for (unsigned i = 0; i < SC_NBINS; i++) { - cache_bin_t *tbin = tcache_small_bin_get(tcache, i); - tcache_bin_flush_small(tsd, tcache, tbin, i, 0); + tcache_slow_t *tcache_slow = tcache->tcache_slow; + assert(tcache_slow->arena != NULL); - if (config_stats) { - assert(tbin->tstats.nrequests == 0); + for (unsigned i = 0; i < nhbins; i++) { + cache_bin_t *cache_bin = &tcache->bins[i]; + if (i < SC_NBINS) { + tcache_bin_flush_small(tsd, tcache, cache_bin, i, 0); + } else { + tcache_bin_flush_large(tsd, tcache, cache_bin, i, 0); } - } - for (unsigned i = SC_NBINS; i < nhbins; i++) { - cache_bin_t *tbin = tcache_large_bin_get(tcache, i); - tcache_bin_flush_large(tsd, tbin, i, 0, tcache); - if (config_stats) { - assert(tbin->tstats.nrequests == 0); + assert(cache_bin->tstats.nrequests == 0); } } - - if (config_prof && tcache->prof_accumbytes > 0 && - arena_prof_accum(tsd_tsdn(tsd), tcache->arena, - tcache->prof_accumbytes)) { - prof_idump(tsd_tsdn(tsd)); - } } void @@ -544,20 +805,17 @@ tcache_flush(tsd_t *tsd) { static void tcache_destroy(tsd_t *tsd, tcache_t *tcache, bool tsd_tcache) { + tcache_slow_t *tcache_slow = tcache->tcache_slow; tcache_flush_cache(tsd, tcache); - arena_t *arena = tcache->arena; - tcache_arena_dissociate(tsd_tsdn(tsd), tcache); + arena_t *arena = tcache_slow->arena; + tcache_arena_dissociate(tsd_tsdn(tsd), tcache_slow, tcache); if (tsd_tcache) { - /* Release the avail array for the TSD embedded auto tcache. */ - void *avail_array = - (void *)((uintptr_t)tcache_small_bin_get(tcache, 0)->avail - - (uintptr_t)tcache_bin_info[0].ncached_max * sizeof(void *)); - idalloctm(tsd_tsdn(tsd), avail_array, NULL, NULL, true, true); - } else { - /* Release both the tcache struct and avail array. */ - idalloctm(tsd_tsdn(tsd), tcache, NULL, NULL, true, true); + cache_bin_t *cache_bin = &tcache->bins[0]; + cache_bin_assert_empty(cache_bin, &tcache_bin_info[0]); } + idalloctm(tsd_tsdn(tsd), tcache_slow->dyn_alloc, NULL, NULL, true, + true); /* * The deallocation and tcache flush above may not trigger decay since @@ -571,9 +829,11 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache, bool tsd_tcache) { if (arena_nthreads_get(arena, false) == 0 && !background_thread_enabled()) { /* Force purging when no threads assigned to the arena anymore. */ - arena_decay(tsd_tsdn(tsd), arena, false, true); + arena_decay(tsd_tsdn(tsd), arena, + /* is_background_thread */ false, /* all */ true); } else { - arena_decay(tsd_tsdn(tsd), arena, false, false); + arena_decay(tsd_tsdn(tsd), arena, + /* is_background_thread */ false, /* all */ false); } } @@ -583,53 +843,51 @@ tcache_cleanup(tsd_t *tsd) { tcache_t *tcache = tsd_tcachep_get(tsd); if (!tcache_available(tsd)) { assert(tsd_tcache_enabled_get(tsd) == false); - if (config_debug) { - assert(tcache_small_bin_get(tcache, 0)->avail == NULL); - } + assert(cache_bin_still_zero_initialized(&tcache->bins[0])); return; } assert(tsd_tcache_enabled_get(tsd)); - assert(tcache_small_bin_get(tcache, 0)->avail != NULL); + assert(!cache_bin_still_zero_initialized(&tcache->bins[0])); tcache_destroy(tsd, tcache, true); if (config_debug) { - tcache_small_bin_get(tcache, 0)->avail = NULL; + /* + * For debug testing only, we want to pretend we're still in the + * zero-initialized state. + */ + memset(tcache->bins, 0, sizeof(cache_bin_t) * nhbins); } } void tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { - unsigned i; - cassert(config_stats); /* Merge and reset tcache stats. */ - for (i = 0; i < SC_NBINS; i++) { - cache_bin_t *tbin = tcache_small_bin_get(tcache, i); - unsigned binshard; - bin_t *bin = arena_bin_choose_lock(tsdn, arena, i, &binshard); - bin->stats.nrequests += tbin->tstats.nrequests; - malloc_mutex_unlock(tsdn, &bin->lock); - tbin->tstats.nrequests = 0; - } - - for (; i < nhbins; i++) { - cache_bin_t *tbin = tcache_large_bin_get(tcache, i); - arena_stats_large_flush_nrequests_add(tsdn, &arena->stats, i, - tbin->tstats.nrequests); - tbin->tstats.nrequests = 0; + for (unsigned i = 0; i < nhbins; i++) { + cache_bin_t *cache_bin = &tcache->bins[i]; + if (i < SC_NBINS) { + bin_t *bin = arena_bin_choose(tsdn, arena, i, NULL); + malloc_mutex_lock(tsdn, &bin->lock); + bin->stats.nrequests += cache_bin->tstats.nrequests; + malloc_mutex_unlock(tsdn, &bin->lock); + } else { + arena_stats_large_flush_nrequests_add(tsdn, + &arena->stats, i, cache_bin->tstats.nrequests); + } + cache_bin->tstats.nrequests = 0; } } static bool -tcaches_create_prep(tsd_t *tsd) { +tcaches_create_prep(tsd_t *tsd, base_t *base) { bool err; - malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx); + malloc_mutex_assert_owner(tsd_tsdn(tsd), &tcaches_mtx); if (tcaches == NULL) { - tcaches = base_alloc(tsd_tsdn(tsd), b0get(), sizeof(tcache_t *) - * (MALLOCX_TCACHE_MAX+1), CACHELINE); + tcaches = base_alloc(tsd_tsdn(tsd), base, + sizeof(tcache_t *) * (MALLOCX_TCACHE_MAX+1), CACHELINE); if (tcaches == NULL) { err = true; goto label_return; @@ -643,17 +901,18 @@ tcaches_create_prep(tsd_t *tsd) { err = false; label_return: - malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx); return err; } bool -tcaches_create(tsd_t *tsd, unsigned *r_ind) { +tcaches_create(tsd_t *tsd, base_t *base, unsigned *r_ind) { witness_assert_depth(tsdn_witness_tsdp_get(tsd_tsdn(tsd)), 0); bool err; - if (tcaches_create_prep(tsd)) { + malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx); + + if (tcaches_create_prep(tsd, base)) { err = true; goto label_return; } @@ -665,7 +924,6 @@ tcaches_create(tsd_t *tsd, unsigned *r_ind) { } tcaches_t *elm; - malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx); if (tcaches_avail != NULL) { elm = tcaches_avail; tcaches_avail = tcaches_avail->next; @@ -677,10 +935,10 @@ tcaches_create(tsd_t *tsd, unsigned *r_ind) { *r_ind = tcaches_past; tcaches_past++; } - malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx); err = false; label_return: + malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx); witness_assert_depth(tsdn_witness_tsdp_get(tsd_tsdn(tsd)), 0); return err; } @@ -729,70 +987,115 @@ tcaches_destroy(tsd_t *tsd, unsigned ind) { } } -bool -tcache_boot(tsdn_t *tsdn) { - /* If necessary, clamp opt_lg_tcache_max. */ - if (opt_lg_tcache_max < 0 || (ZU(1) << opt_lg_tcache_max) < - SC_SMALL_MAXCLASS) { - tcache_maxclass = SC_SMALL_MAXCLASS; +static unsigned +tcache_ncached_max_compute(szind_t szind) { + if (szind >= SC_NBINS) { + assert(szind < nhbins); + return opt_tcache_nslots_large; + } + unsigned slab_nregs = bin_infos[szind].nregs; + + /* We may modify these values; start with the opt versions. */ + unsigned nslots_small_min = opt_tcache_nslots_small_min; + unsigned nslots_small_max = opt_tcache_nslots_small_max; + + /* + * Clamp values to meet our constraints -- even, nonzero, min < max, and + * suitable for a cache bin size. + */ + if (opt_tcache_nslots_small_max > CACHE_BIN_NCACHED_MAX) { + nslots_small_max = CACHE_BIN_NCACHED_MAX; + } + if (nslots_small_min % 2 != 0) { + nslots_small_min++; + } + if (nslots_small_max % 2 != 0) { + nslots_small_max--; + } + if (nslots_small_min < 2) { + nslots_small_min = 2; + } + if (nslots_small_max < 2) { + nslots_small_max = 2; + } + if (nslots_small_min > nslots_small_max) { + nslots_small_min = nslots_small_max; + } + + unsigned candidate; + if (opt_lg_tcache_nslots_mul < 0) { + candidate = slab_nregs >> (-opt_lg_tcache_nslots_mul); } else { - tcache_maxclass = (ZU(1) << opt_lg_tcache_max); + candidate = slab_nregs << opt_lg_tcache_nslots_mul; + } + if (candidate % 2 != 0) { + /* + * We need the candidate size to be even -- we assume that we + * can divide by two and get a positive number (e.g. when + * flushing). + */ + ++candidate; } + if (candidate <= nslots_small_min) { + return nslots_small_min; + } else if (candidate <= nslots_small_max) { + return candidate; + } else { + return nslots_small_max; + } +} + +bool +tcache_boot(tsdn_t *tsdn, base_t *base) { + tcache_maxclass = sz_s2u(opt_tcache_max); + assert(tcache_maxclass <= TCACHE_MAXCLASS_LIMIT); + nhbins = sz_size2index(tcache_maxclass) + 1; if (malloc_mutex_init(&tcaches_mtx, "tcaches", WITNESS_RANK_TCACHES, malloc_mutex_rank_exclusive)) { return true; } - nhbins = sz_size2index(tcache_maxclass) + 1; - - /* Initialize tcache_bin_info. */ - tcache_bin_info = (cache_bin_info_t *)base_alloc(tsdn, b0get(), nhbins - * sizeof(cache_bin_info_t), CACHELINE); + /* Initialize tcache_bin_info. See comments in tcache_init(). */ + unsigned n_reserved_bins = nhbins < SC_NBINS ? SC_NBINS : nhbins; + size_t size = n_reserved_bins * sizeof(cache_bin_info_t); + tcache_bin_info = (cache_bin_info_t *)base_alloc(tsdn, base, size, + CACHELINE); if (tcache_bin_info == NULL) { return true; } - stack_nelms = 0; - unsigned i; - for (i = 0; i < SC_NBINS; i++) { - if ((bin_infos[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) { - tcache_bin_info[i].ncached_max = - TCACHE_NSLOTS_SMALL_MIN; - } else if ((bin_infos[i].nregs << 1) <= - TCACHE_NSLOTS_SMALL_MAX) { - tcache_bin_info[i].ncached_max = - (bin_infos[i].nregs << 1); - } else { - tcache_bin_info[i].ncached_max = - TCACHE_NSLOTS_SMALL_MAX; - } - stack_nelms += tcache_bin_info[i].ncached_max; + + for (szind_t i = 0; i < nhbins; i++) { + unsigned ncached_max = tcache_ncached_max_compute(i); + cache_bin_info_init(&tcache_bin_info[i], ncached_max); } - for (; i < nhbins; i++) { - tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE; - stack_nelms += tcache_bin_info[i].ncached_max; + for (szind_t i = nhbins; i < SC_NBINS; i++) { + /* Disabled small bins. */ + cache_bin_info_init(&tcache_bin_info[i], 0); + assert(tcache_small_bin_disabled(i, NULL)); } + cache_bin_info_compute_alloc(tcache_bin_info, nhbins, + &tcache_bin_alloc_size, &tcache_bin_alloc_alignment); + return false; } void tcache_prefork(tsdn_t *tsdn) { - if (!config_prof && opt_tcache) { - malloc_mutex_prefork(tsdn, &tcaches_mtx); - } + malloc_mutex_prefork(tsdn, &tcaches_mtx); } void tcache_postfork_parent(tsdn_t *tsdn) { - if (!config_prof && opt_tcache) { - malloc_mutex_postfork_parent(tsdn, &tcaches_mtx); - } + malloc_mutex_postfork_parent(tsdn, &tcaches_mtx); } void tcache_postfork_child(tsdn_t *tsdn) { - if (!config_prof && opt_tcache) { - malloc_mutex_postfork_child(tsdn, &tcaches_mtx); - } + malloc_mutex_postfork_child(tsdn, &tcaches_mtx); +} + +void tcache_assert_initialized(tcache_t *tcache) { + assert(!cache_bin_still_zero_initialized(&tcache->bins[0])); } diff --git a/contrib/jemalloc/src/thread_event.c b/contrib/jemalloc/src/thread_event.c new file mode 100644 index 00000000000000..37eb5827d3c93e --- /dev/null +++ b/contrib/jemalloc/src/thread_event.c @@ -0,0 +1,343 @@ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/thread_event.h" + +/* + * Signatures for event specific functions. These functions should be defined + * by the modules owning each event. The signatures here verify that the + * definitions follow the right format. + * + * The first two are functions computing new / postponed event wait time. New + * event wait time is the time till the next event if an event is currently + * being triggered; postponed event wait time is the time till the next event + * if an event should be triggered but needs to be postponed, e.g. when the TSD + * is not nominal or during reentrancy. + * + * The third is the event handler function, which is called whenever an event + * is triggered. The parameter is the elapsed time since the last time an + * event of the same type was triggered. + */ +#define E(event, condition_unused, is_alloc_event_unused) \ +uint64_t event##_new_event_wait(tsd_t *tsd); \ +uint64_t event##_postponed_event_wait(tsd_t *tsd); \ +void event##_event_handler(tsd_t *tsd, uint64_t elapsed); + +ITERATE_OVER_ALL_EVENTS +#undef E + +/* Signatures for internal functions fetching elapsed time. */ +#define E(event, condition_unused, is_alloc_event_unused) \ +static uint64_t event##_fetch_elapsed(tsd_t *tsd); + +ITERATE_OVER_ALL_EVENTS +#undef E + +static uint64_t +tcache_gc_fetch_elapsed(tsd_t *tsd) { + return TE_INVALID_ELAPSED; +} + +static uint64_t +tcache_gc_dalloc_fetch_elapsed(tsd_t *tsd) { + return TE_INVALID_ELAPSED; +} + +static uint64_t +prof_sample_fetch_elapsed(tsd_t *tsd) { + uint64_t last_event = thread_allocated_last_event_get(tsd); + uint64_t last_sample_event = prof_sample_last_event_get(tsd); + prof_sample_last_event_set(tsd, last_event); + return last_event - last_sample_event; +} + +static uint64_t +stats_interval_fetch_elapsed(tsd_t *tsd) { + uint64_t last_event = thread_allocated_last_event_get(tsd); + uint64_t last_stats_event = stats_interval_last_event_get(tsd); + stats_interval_last_event_set(tsd, last_event); + return last_event - last_stats_event; +} + +static uint64_t +peak_alloc_fetch_elapsed(tsd_t *tsd) { + return TE_INVALID_ELAPSED; +} + +static uint64_t +peak_dalloc_fetch_elapsed(tsd_t *tsd) { + return TE_INVALID_ELAPSED; +} + +/* Per event facilities done. */ + +static bool +te_ctx_has_active_events(te_ctx_t *ctx) { + assert(config_debug); +#define E(event, condition, alloc_event) \ + if (condition && alloc_event == ctx->is_alloc) { \ + return true; \ + } + ITERATE_OVER_ALL_EVENTS +#undef E + return false; +} + +static uint64_t +te_next_event_compute(tsd_t *tsd, bool is_alloc) { + uint64_t wait = TE_MAX_START_WAIT; +#define E(event, condition, alloc_event) \ + if (is_alloc == alloc_event && condition) { \ + uint64_t event_wait = \ + event##_event_wait_get(tsd); \ + assert(event_wait <= TE_MAX_START_WAIT); \ + if (event_wait > 0U && event_wait < wait) { \ + wait = event_wait; \ + } \ + } + + ITERATE_OVER_ALL_EVENTS +#undef E + assert(wait <= TE_MAX_START_WAIT); + return wait; +} + +static void +te_assert_invariants_impl(tsd_t *tsd, te_ctx_t *ctx) { + uint64_t current_bytes = te_ctx_current_bytes_get(ctx); + uint64_t last_event = te_ctx_last_event_get(ctx); + uint64_t next_event = te_ctx_next_event_get(ctx); + uint64_t next_event_fast = te_ctx_next_event_fast_get(ctx); + + assert(last_event != next_event); + if (next_event > TE_NEXT_EVENT_FAST_MAX || !tsd_fast(tsd)) { + assert(next_event_fast == 0U); + } else { + assert(next_event_fast == next_event); + } + + /* The subtraction is intentionally susceptible to underflow. */ + uint64_t interval = next_event - last_event; + + /* The subtraction is intentionally susceptible to underflow. */ + assert(current_bytes - last_event < interval); + uint64_t min_wait = te_next_event_compute(tsd, te_ctx_is_alloc(ctx)); + /* + * next_event should have been pushed up only except when no event is + * on and the TSD is just initialized. The last_event == 0U guard + * below is stronger than needed, but having an exactly accurate guard + * is more complicated to implement. + */ + assert((!te_ctx_has_active_events(ctx) && last_event == 0U) || + interval == min_wait || + (interval < min_wait && interval == TE_MAX_INTERVAL)); +} + +void +te_assert_invariants_debug(tsd_t *tsd) { + te_ctx_t ctx; + te_ctx_get(tsd, &ctx, true); + te_assert_invariants_impl(tsd, &ctx); + + te_ctx_get(tsd, &ctx, false); + te_assert_invariants_impl(tsd, &ctx); +} + +/* + * Synchronization around the fast threshold in tsd -- + * There are two threads to consider in the synchronization here: + * - The owner of the tsd being updated by a slow path change + * - The remote thread, doing that slow path change. + * + * As a design constraint, we want to ensure that a slow-path transition cannot + * be ignored for arbitrarily long, and that if the remote thread causes a + * slow-path transition and then communicates with the owner thread that it has + * occurred, then the owner will go down the slow path on the next allocator + * operation (so that we don't want to just wait until the owner hits its slow + * path reset condition on its own). + * + * Here's our strategy to do that: + * + * The remote thread will update the slow-path stores to TSD variables, issue a + * SEQ_CST fence, and then update the TSD next_event_fast counter. The owner + * thread will update next_event_fast, issue an SEQ_CST fence, and then check + * its TSD to see if it's on the slow path. + + * This is fairly straightforward when 64-bit atomics are supported. Assume that + * the remote fence is sandwiched between two owner fences in the reset pathway. + * The case where there is no preceding or trailing owner fence (i.e. because + * the owner thread is near the beginning or end of its life) can be analyzed + * similarly. The owner store to next_event_fast preceding the earlier owner + * fence will be earlier in coherence order than the remote store to it, so that + * the owner thread will go down the slow path once the store becomes visible to + * it, which is no later than the time of the second fence. + + * The case where we don't support 64-bit atomics is trickier, since word + * tearing is possible. We'll repeat the same analysis, and look at the two + * owner fences sandwiching the remote fence. The next_event_fast stores done + * alongside the earlier owner fence cannot overwrite any of the remote stores + * (since they precede the earlier owner fence in sb, which precedes the remote + * fence in sc, which precedes the remote stores in sb). After the second owner + * fence there will be a re-check of the slow-path variables anyways, so the + * "owner will notice that it's on the slow path eventually" guarantee is + * satisfied. To make sure that the out-of-band-messaging constraint is as well, + * note that either the message passing is sequenced before the second owner + * fence (in which case the remote stores happen before the second set of owner + * stores, so malloc sees a value of zero for next_event_fast and goes down the + * slow path), or it is not (in which case the owner sees the tsd slow-path + * writes on its previous update). This leaves open the possibility that the + * remote thread will (at some arbitrary point in the future) zero out one half + * of the owner thread's next_event_fast, but that's always safe (it just sends + * it down the slow path earlier). + */ +static void +te_ctx_next_event_fast_update(te_ctx_t *ctx) { + uint64_t next_event = te_ctx_next_event_get(ctx); + uint64_t next_event_fast = (next_event <= TE_NEXT_EVENT_FAST_MAX) ? + next_event : 0U; + te_ctx_next_event_fast_set(ctx, next_event_fast); +} + +void +te_recompute_fast_threshold(tsd_t *tsd) { + if (tsd_state_get(tsd) != tsd_state_nominal) { + /* Check first because this is also called on purgatory. */ + te_next_event_fast_set_non_nominal(tsd); + return; + } + + te_ctx_t ctx; + te_ctx_get(tsd, &ctx, true); + te_ctx_next_event_fast_update(&ctx); + te_ctx_get(tsd, &ctx, false); + te_ctx_next_event_fast_update(&ctx); + + atomic_fence(ATOMIC_SEQ_CST); + if (tsd_state_get(tsd) != tsd_state_nominal) { + te_next_event_fast_set_non_nominal(tsd); + } +} + +static void +te_adjust_thresholds_helper(tsd_t *tsd, te_ctx_t *ctx, + uint64_t wait) { + /* + * The next threshold based on future events can only be adjusted after + * progressing the last_event counter (which is set to current). + */ + assert(te_ctx_current_bytes_get(ctx) == te_ctx_last_event_get(ctx)); + assert(wait <= TE_MAX_START_WAIT); + + uint64_t next_event = te_ctx_last_event_get(ctx) + (wait <= + TE_MAX_INTERVAL ? wait : TE_MAX_INTERVAL); + te_ctx_next_event_set(tsd, ctx, next_event); +} + +static uint64_t +te_clip_event_wait(uint64_t event_wait) { + assert(event_wait > 0U); + if (TE_MIN_START_WAIT > 1U && + unlikely(event_wait < TE_MIN_START_WAIT)) { + event_wait = TE_MIN_START_WAIT; + } + if (TE_MAX_START_WAIT < UINT64_MAX && + unlikely(event_wait > TE_MAX_START_WAIT)) { + event_wait = TE_MAX_START_WAIT; + } + return event_wait; +} + +void +te_event_trigger(tsd_t *tsd, te_ctx_t *ctx) { + /* usize has already been added to thread_allocated. */ + uint64_t bytes_after = te_ctx_current_bytes_get(ctx); + /* The subtraction is intentionally susceptible to underflow. */ + uint64_t accumbytes = bytes_after - te_ctx_last_event_get(ctx); + + te_ctx_last_event_set(ctx, bytes_after); + + bool allow_event_trigger = tsd_nominal(tsd) && + tsd_reentrancy_level_get(tsd) == 0; + bool is_alloc = ctx->is_alloc; + uint64_t wait = TE_MAX_START_WAIT; + +#define E(event, condition, alloc_event) \ + bool is_##event##_triggered = false; \ + if (is_alloc == alloc_event && condition) { \ + uint64_t event_wait = event##_event_wait_get(tsd); \ + assert(event_wait <= TE_MAX_START_WAIT); \ + if (event_wait > accumbytes) { \ + event_wait -= accumbytes; \ + } else if (!allow_event_trigger) { \ + event_wait = event##_postponed_event_wait(tsd); \ + } else { \ + is_##event##_triggered = true; \ + event_wait = event##_new_event_wait(tsd); \ + } \ + event_wait = te_clip_event_wait(event_wait); \ + event##_event_wait_set(tsd, event_wait); \ + if (event_wait < wait) { \ + wait = event_wait; \ + } \ + } + + ITERATE_OVER_ALL_EVENTS +#undef E + + assert(wait <= TE_MAX_START_WAIT); + te_adjust_thresholds_helper(tsd, ctx, wait); + te_assert_invariants(tsd); + +#define E(event, condition, alloc_event) \ + if (is_alloc == alloc_event && condition && \ + is_##event##_triggered) { \ + assert(allow_event_trigger); \ + uint64_t elapsed = event##_fetch_elapsed(tsd); \ + event##_event_handler(tsd, elapsed); \ + } + + ITERATE_OVER_ALL_EVENTS +#undef E + + te_assert_invariants(tsd); +} + +static void +te_init(tsd_t *tsd, bool is_alloc) { + te_ctx_t ctx; + te_ctx_get(tsd, &ctx, is_alloc); + /* + * Reset the last event to current, which starts the events from a clean + * state. This is necessary when re-init the tsd event counters. + * + * The event counters maintain a relationship with the current bytes: + * last_event <= current < next_event. When a reinit happens (e.g. + * reincarnated tsd), the last event needs progressing because all + * events start fresh from the current bytes. + */ + te_ctx_last_event_set(&ctx, te_ctx_current_bytes_get(&ctx)); + + uint64_t wait = TE_MAX_START_WAIT; +#define E(event, condition, alloc_event) \ + if (is_alloc == alloc_event && condition) { \ + uint64_t event_wait = event##_new_event_wait(tsd); \ + event_wait = te_clip_event_wait(event_wait); \ + event##_event_wait_set(tsd, event_wait); \ + if (event_wait < wait) { \ + wait = event_wait; \ + } \ + } + + ITERATE_OVER_ALL_EVENTS +#undef E + te_adjust_thresholds_helper(tsd, &ctx, wait); +} + +void +tsd_te_init(tsd_t *tsd) { + /* Make sure no overflow for the bytes accumulated on event_trigger. */ + assert(TE_MAX_INTERVAL <= UINT64_MAX - SC_LARGE_MAXCLASS + 1); + te_init(tsd, true); + te_init(tsd, false); + te_assert_invariants(tsd); +} diff --git a/contrib/jemalloc/src/ticker.c b/contrib/jemalloc/src/ticker.c index d7b8cd26c06891..790b5c20079c6c 100644 --- a/contrib/jemalloc/src/ticker.c +++ b/contrib/jemalloc/src/ticker.c @@ -1,3 +1,32 @@ -#define JEMALLOC_TICKER_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" + +/* + * To avoid using floating point math down core paths (still necessary because + * versions of the glibc dynamic loader that did not preserve xmm registers are + * still somewhat common, requiring us to be compilable with -mno-sse), and also + * to avoid generally expensive library calls, we use a precomputed table of + * values. We want to sample U uniformly on [0, 1], and then compute + * ceil(log(u)/log(1-1/nticks)). We're mostly interested in the case where + * nticks is reasonably big, so 1/log(1-1/nticks) is well-approximated by + * -nticks. + * + * To compute log(u), we sample an integer in [1, 64] and divide, then just look + * up results in a table. As a space-compression mechanism, we store these as + * uint8_t by dividing the range (255) by the highest-magnitude value the log + * can take on, and using that as a multiplier. We then have to divide by that + * multiplier at the end of the computation. + * + * The values here are computed in src/ticker.py + */ + +const uint8_t ticker_geom_table[1 << TICKER_GEOM_NBITS] = { + 254, 211, 187, 169, 156, 144, 135, 127, + 120, 113, 107, 102, 97, 93, 89, 85, + 81, 77, 74, 71, 68, 65, 62, 60, + 57, 55, 53, 50, 48, 46, 44, 42, + 40, 39, 37, 35, 33, 32, 30, 29, + 27, 26, 24, 23, 21, 20, 19, 18, + 16, 15, 14, 13, 12, 10, 9, 8, + 7, 6, 5, 4, 3, 2, 1, 0 +}; diff --git a/contrib/jemalloc/src/tsd.c b/contrib/jemalloc/src/tsd.c index a31f6b9698e5b3..e8e4f3a339592d 100644 --- a/contrib/jemalloc/src/tsd.c +++ b/contrib/jemalloc/src/tsd.c @@ -1,17 +1,14 @@ -#define JEMALLOC_TSD_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/assert.h" +#include "jemalloc/internal/san.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/rtree.h" /******************************************************************************/ /* Data. */ -static unsigned ncleanups; -static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX]; - /* TSD_INITIALIZER triggers "-Wmissing-field-initializer" */ JEMALLOC_DIAGNOSTIC_PUSH JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS @@ -74,7 +71,7 @@ tsd_in_nominal_list(tsd_t *tsd) { * out of it here. */ malloc_mutex_lock(TSDN_NULL, &tsd_nominal_tsds_lock); - ql_foreach(tsd_list, &tsd_nominal_tsds, TSD_MANGLE(tcache).tsd_link) { + ql_foreach(tsd_list, &tsd_nominal_tsds, TSD_MANGLE(tsd_link)) { if (tsd == tsd_list) { found = true; break; @@ -88,9 +85,9 @@ static void tsd_add_nominal(tsd_t *tsd) { assert(!tsd_in_nominal_list(tsd)); assert(tsd_state_get(tsd) <= tsd_state_nominal_max); - ql_elm_new(tsd, TSD_MANGLE(tcache).tsd_link); + ql_elm_new(tsd, TSD_MANGLE(tsd_link)); malloc_mutex_lock(tsd_tsdn(tsd), &tsd_nominal_tsds_lock); - ql_tail_insert(&tsd_nominal_tsds, tsd, TSD_MANGLE(tcache).tsd_link); + ql_tail_insert(&tsd_nominal_tsds, tsd, TSD_MANGLE(tsd_link)); malloc_mutex_unlock(tsd_tsdn(tsd), &tsd_nominal_tsds_lock); } @@ -99,7 +96,7 @@ tsd_remove_nominal(tsd_t *tsd) { assert(tsd_in_nominal_list(tsd)); assert(tsd_state_get(tsd) <= tsd_state_nominal_max); malloc_mutex_lock(tsd_tsdn(tsd), &tsd_nominal_tsds_lock); - ql_remove(&tsd_nominal_tsds, tsd, TSD_MANGLE(tcache).tsd_link); + ql_remove(&tsd_nominal_tsds, tsd, TSD_MANGLE(tsd_link)); malloc_mutex_unlock(tsd_tsdn(tsd), &tsd_nominal_tsds_lock); } @@ -112,11 +109,14 @@ tsd_force_recompute(tsdn_t *tsdn) { atomic_fence(ATOMIC_RELEASE); malloc_mutex_lock(tsdn, &tsd_nominal_tsds_lock); tsd_t *remote_tsd; - ql_foreach(remote_tsd, &tsd_nominal_tsds, TSD_MANGLE(tcache).tsd_link) { + ql_foreach(remote_tsd, &tsd_nominal_tsds, TSD_MANGLE(tsd_link)) { assert(tsd_atomic_load(&remote_tsd->state, ATOMIC_RELAXED) <= tsd_state_nominal_max); - tsd_atomic_store(&remote_tsd->state, tsd_state_nominal_recompute, - ATOMIC_RELAXED); + tsd_atomic_store(&remote_tsd->state, + tsd_state_nominal_recompute, ATOMIC_RELAXED); + /* See comments in te_recompute_fast_threshold(). */ + atomic_fence(ATOMIC_SEQ_CST); + te_next_event_fast_set_non_nominal(remote_tsd); } malloc_mutex_unlock(tsdn, &tsd_nominal_tsds_lock); } @@ -175,6 +175,8 @@ tsd_slow_update(tsd_t *tsd) { old_state = tsd_atomic_exchange(&tsd->state, new_state, ATOMIC_ACQUIRE); } while (old_state == tsd_state_nominal_recompute); + + te_recompute_fast_threshold(tsd); } void @@ -207,22 +209,17 @@ tsd_state_set(tsd_t *tsd, uint8_t new_state) { /* * This is the tricky case. We're transitioning from * one nominal state to another. The caller can't know - * about any races that are occuring at the same time, + * about any races that are occurring at the same time, * so we always have to recompute no matter what. */ tsd_slow_update(tsd); } } + te_recompute_fast_threshold(tsd); } -static bool -tsd_data_init(tsd_t *tsd) { - /* - * We initialize the rtree context first (before the tcache), since the - * tcache initialization depends on it. - */ - rtree_ctx_data_init(tsd_rtree_ctxp_get_unsafe(tsd)); - +static void +tsd_prng_state_init(tsd_t *tsd) { /* * A nondeterministic seed based on the address of tsd reduces * the likelihood of lockstep non-uniform cache index @@ -230,9 +227,20 @@ tsd_data_init(tsd_t *tsd) { * cost of test repeatability. For debug builds, instead use a * deterministic seed. */ - *tsd_offset_statep_get(tsd) = config_debug ? 0 : + *tsd_prng_statep_get(tsd) = config_debug ? 0 : (uint64_t)(uintptr_t)tsd; +} +static bool +tsd_data_init(tsd_t *tsd) { + /* + * We initialize the rtree context first (before the tcache), since the + * tcache initialization depends on it. + */ + rtree_ctx_data_init(tsd_rtree_ctxp_get_unsafe(tsd)); + tsd_prng_state_init(tsd); + tsd_te_init(tsd); /* event_init may use the prng state above. */ + tsd_san_init(tsd); return tsd_tcache_enabled_data_init(tsd); } @@ -242,8 +250,6 @@ assert_tsd_data_cleanup_done(tsd_t *tsd) { assert(!tsd_in_nominal_list(tsd)); assert(*tsd_arenap_get_unsafe(tsd) == NULL); assert(*tsd_iarenap_get_unsafe(tsd) == NULL); - assert(*tsd_arenas_tdata_bypassp_get_unsafe(tsd) == true); - assert(*tsd_arenas_tdatap_get_unsafe(tsd) == NULL); assert(*tsd_tcache_enabledp_get_unsafe(tsd) == false); assert(*tsd_prof_tdatap_get_unsafe(tsd) == NULL); } @@ -258,9 +264,11 @@ tsd_data_init_nocleanup(tsd_t *tsd) { * We set up tsd in a way that no cleanup is needed. */ rtree_ctx_data_init(tsd_rtree_ctxp_get_unsafe(tsd)); - *tsd_arenas_tdata_bypassp_get(tsd) = true; *tsd_tcache_enabledp_get_unsafe(tsd) = false; *tsd_reentrancy_levelp_get(tsd) = 1; + tsd_prng_state_init(tsd); + tsd_te_init(tsd); /* event_init may use the prng state above. */ + tsd_san_init(tsd); assert_tsd_data_cleanup_done(tsd); return false; @@ -326,6 +334,9 @@ malloc_tsd_dalloc(void *wrapper) { } #if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32) +static unsigned ncleanups; +static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX]; + #ifndef _WIN32 JEMALLOC_EXPORT #endif @@ -350,23 +361,27 @@ _malloc_thread_cleanup(void) { } } while (again); } -#endif +#ifndef _WIN32 +JEMALLOC_EXPORT +#endif void -malloc_tsd_cleanup_register(bool (*f)(void)) { +_malloc_tsd_cleanup_register(bool (*f)(void)) { assert(ncleanups < MALLOC_TSD_CLEANUPS_MAX); cleanups[ncleanups] = f; ncleanups++; } +#endif + static void tsd_do_data_cleanup(tsd_t *tsd) { prof_tdata_cleanup(tsd); iarena_cleanup(tsd); arena_cleanup(tsd); - arenas_tdata_cleanup(tsd); tcache_cleanup(tsd); witnesses_cleanup(tsd_witness_tsdp_get_unsafe(tsd)); + *tsd_reentrancy_levelp_get(tsd) = 1; } void @@ -387,7 +402,7 @@ tsd_cleanup(void *arg) { * is still called for testing and completeness. */ assert_tsd_data_cleanup_done(tsd); - /* Fall through. */ + JEMALLOC_FALLTHROUGH; case tsd_state_nominal: case tsd_state_nominal_slow: tsd_do_data_cleanup(tsd); @@ -418,7 +433,9 @@ tsd_t * malloc_tsd_boot0(void) { tsd_t *tsd; +#if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32) ncleanups = 0; +#endif if (malloc_mutex_init(&tsd_nominal_tsds_lock, "tsd_nominal_tsds_lock", WITNESS_RANK_OMIT, malloc_mutex_rank_exclusive)) { return NULL; @@ -427,7 +444,6 @@ malloc_tsd_boot0(void) { return NULL; } tsd = tsd_fetch(); - *tsd_arenas_tdata_bypassp_get(tsd) = true; return tsd; } @@ -437,7 +453,6 @@ malloc_tsd_boot1(void) { tsd_t *tsd = tsd_fetch(); /* malloc_slow has been set properly. Update tsd_slow. */ tsd_slow_update(tsd); - *tsd_arenas_tdata_bypassp_get(tsd) = false; } #ifdef _WIN32 diff --git a/contrib/jemalloc/src/witness.c b/contrib/jemalloc/src/witness.c index f42b72ad1a2cd5..4474af04c8dcd4 100644 --- a/contrib/jemalloc/src/witness.c +++ b/contrib/jemalloc/src/witness.c @@ -1,4 +1,3 @@ -#define JEMALLOC_WITNESS_C_ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" @@ -15,14 +14,41 @@ witness_init(witness_t *witness, const char *name, witness_rank_t rank, } static void -witness_lock_error_impl(const witness_list_t *witnesses, - const witness_t *witness) { - witness_t *w; +witness_print_witness(witness_t *w, unsigned n) { + assert(n > 0); + if (n == 1) { + malloc_printf(" %s(%u)", w->name, w->rank); + } else { + malloc_printf(" %s(%u)X%u", w->name, w->rank, n); + } +} - malloc_printf(": Lock rank order reversal:"); +static void +witness_print_witnesses(const witness_list_t *witnesses) { + witness_t *w, *last = NULL; + unsigned n = 0; ql_foreach(w, witnesses, link) { - malloc_printf(" %s(%u)", w->name, w->rank); + if (last != NULL && w->rank > last->rank) { + assert(w->name != last->name); + witness_print_witness(last, n); + n = 0; + } else if (last != NULL) { + assert(w->rank == last->rank); + assert(w->name == last->name); + } + last = w; + ++n; } + if (last != NULL) { + witness_print_witness(last, n); + } +} + +static void +witness_lock_error_impl(const witness_list_t *witnesses, + const witness_t *witness) { + malloc_printf(": Lock rank order reversal:"); + witness_print_witnesses(witnesses); malloc_printf(" %s(%u)\n", witness->name, witness->rank); abort(); } @@ -49,13 +75,9 @@ witness_not_owner_error_t *JET_MUTABLE witness_not_owner_error = static void witness_depth_error_impl(const witness_list_t *witnesses, witness_rank_t rank_inclusive, unsigned depth) { - witness_t *w; - malloc_printf(": Should own %u lock%s of rank >= %u:", depth, (depth != 1) ? "s" : "", rank_inclusive); - ql_foreach(w, witnesses, link) { - malloc_printf(" %s(%u)", w->name, w->rank); - } + witness_print_witnesses(witnesses); malloc_printf("\n"); abort(); } diff --git a/lib/libc/stdlib/malloc/jemalloc/Makefile.inc b/lib/libc/stdlib/malloc/jemalloc/Makefile.inc index d916d69f471f67..9bc30ce4b4e86b 100644 --- a/lib/libc/stdlib/malloc/jemalloc/Makefile.inc +++ b/lib/libc/stdlib/malloc/jemalloc/Makefile.inc @@ -1,8 +1,12 @@ -JEMALLOCSRCS:= jemalloc.c arena.c background_thread.c base.c bin.c bitmap.c \ - ckh.c ctl.c div.c extent.c extent_dss.c extent_mmap.c hash.c hook.c \ - large.c log.c malloc_io.c mutex.c mutex_pool.c nstime.c pages.c \ - prng.c prof.c rtree.c safety_check.c sc.c stats.c sz.c tcache.c \ - test_hooks.c ticker.c tsd.c witness.c +JEMALLOCSRCS:= jemalloc.c arena.c background_thread.c base.c bin.c bin_info.c \ + bitmap.c buf_writer.c cache_bin.c ckh.c counter.c ctl.c decay.c div.c \ + ecache.c edata.c edata_cache.c ehooks.c emap.c eset.c exp_grow.c \ + extent.c extent_dss.c extent_mmap.c fxp.c hook.c hpa.c hpa_hooks.c \ + hpdata.c inspect.c large.c log.c malloc_io.c mutex.c nstime.c pa.c \ + pa_extra.c pac.c pages.c pai.c peak_event.c prof.c prof_data.c \ + prof_log.c prof_recent.c prof_stats.c prof_sys.c psset.c rtree.c \ + safety_check.c san.c san_bump.c sc.c sec.c stats.c sz.c tcache.c \ + test_hooks.c thread_event.c ticker.c tsd.c witness.c CFLAGS+=-I${SRCTOP}/contrib/jemalloc/include .if ${MK_JEMALLOC_LG_VADDR_WIDE} != no