From 632421da3a6e0e769c6f09539d04007c627ac878 Mon Sep 17 00:00:00 2001 From: QuarticCat Date: Wed, 19 Jun 2024 10:49:03 +0800 Subject: [PATCH] fix typos --- doc/mimalloc-doc.h | 6 +-- docker/alpine-arm32v7/Dockerfile | 4 +- include/mimalloc/atomic.h | 2 +- include/mimalloc/internal.h | 2 +- include/mimalloc/types.h | 4 +- readme.md | 68 ++++++++++++++++---------------- src/arena.c | 4 +- src/options.c | 10 ++--- src/prim/unix/prim.c | 2 +- src/prim/windows/prim.c | 3 +- src/segment.c | 10 ++--- test/test-stress.c | 6 +-- 12 files changed, 59 insertions(+), 62 deletions(-) diff --git a/doc/mimalloc-doc.h b/doc/mimalloc-doc.h index e1c14b44..698c5dbb 100644 --- a/doc/mimalloc-doc.h +++ b/doc/mimalloc-doc.h @@ -589,7 +589,7 @@ void mi_subproc_add_current_thread(mi_subproc_id_t subproc); /// Allocate \a size bytes aligned by \a alignment. /// @param size number of bytes to allocate. -/// @param alignment the minimal alignment of the allocated memory. +/// @param alignment the minimal alignment of the allocated memory. /// @returns pointer to the allocated memory or \a NULL if out of memory, /// or if the alignment is not a power of 2 (including 0). The \a size is unrestricted /// (and does not have to be an integral multiple of the \a alignment). @@ -883,7 +883,7 @@ typedef bool (mi_block_visit_fun)(const mi_heap_t* heap, const mi_heap_area_t* a bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_all_blocks, mi_block_visit_fun* visitor, void* arg); /// @brief Visit all areas and blocks in abandoned heaps. -/// @param subproc_id The sub-process id associated with the abandonded heaps. +/// @param subproc_id The sub-process id associated with the abandoned heaps. /// @param heap_tag Visit only abandoned memory with the specified heap tag, use -1 to visit all abandoned memory. /// @param visit_blocks If \a true visits all allocated blocks, otherwise /// \a visitor is only called for every heap area. @@ -1139,7 +1139,7 @@ to link with the static library. See `test\CMakeLists.txt` for an example. ### C++ For best performance in C++ programs, it is also recommended to override the -global `new` and `delete` operators. For convience, mimalloc provides +global `new` and `delete` operators. For convenience, mimalloc provides [`mimalloc-new-delete.h`](https://github.com/microsoft/mimalloc/blob/master/include/mimalloc-new-delete.h) which does this for you -- just include it in a single(!) source file in your project. In C++, mimalloc also provides the `mi_stl_allocator` struct which implements the `std::allocator` diff --git a/docker/alpine-arm32v7/Dockerfile b/docker/alpine-arm32v7/Dockerfile index 56f071db..1d7fd48b 100644 --- a/docker/alpine-arm32v7/Dockerfile +++ b/docker/alpine-arm32v7/Dockerfile @@ -1,10 +1,10 @@ # install from an image -# download first an appropiate tar.gz image into the current directory +# download first an appropriate tar.gz image into the current directory # from: FROM scratch # Substitute the image name that was downloaded -ADD alpine-minirootfs-20240329-armv7.tar.gz / +ADD alpine-minirootfs-20240329-armv7.tar.gz / # Install tools RUN apk add build-base make cmake diff --git a/include/mimalloc/atomic.h b/include/mimalloc/atomic.h index 3a0d4892..530cca01 100644 --- a/include/mimalloc/atomic.h +++ b/include/mimalloc/atomic.h @@ -22,7 +22,7 @@ terms of the MIT license. A copy of the license can be found in the file // -------------------------------------------------------------------------------------------- // Atomics // We need to be portable between C, C++, and MSVC. -// We base the primitives on the C/C++ atomics and create a mimimal wrapper for MSVC in C compilation mode. +// We base the primitives on the C/C++ atomics and create a minimal wrapper for MSVC in C compilation mode. // This is why we try to use only `uintptr_t` and `*` as atomic types. // To gain better insight in the range of used atomics, we use explicitly named memory order operations // instead of passing the memory order as a parameter. diff --git a/include/mimalloc/internal.h b/include/mimalloc/internal.h index 6e87d5ae..94e394c2 100644 --- a/include/mimalloc/internal.h +++ b/include/mimalloc/internal.h @@ -10,7 +10,7 @@ terms of the MIT license. A copy of the license can be found in the file // -------------------------------------------------------------------------- -// This file contains the interal API's of mimalloc and various utility +// This file contains the internal API's of mimalloc and various utility // functions and macros. // -------------------------------------------------------------------------- diff --git a/include/mimalloc/types.h b/include/mimalloc/types.h index 31ed35f8..2545c6d2 100644 --- a/include/mimalloc/types.h +++ b/include/mimalloc/types.h @@ -231,7 +231,7 @@ typedef enum mi_delayed_e { MI_USE_DELAYED_FREE = 0, // push on the owning heap thread delayed list MI_DELAYED_FREEING = 1, // temporary: another thread is accessing the owning heap MI_NO_DELAYED_FREE = 2, // optimize: push on page local thread free queue if another block is already in the heap thread delayed free list - MI_NEVER_DELAYED_FREE = 3 // sticky: used for abondoned pages without a owning heap; this only resets on page reclaim + MI_NEVER_DELAYED_FREE = 3 // sticky: used for abandoned pages without a owning heap; this only resets on page reclaim } mi_delayed_t; @@ -338,7 +338,7 @@ typedef enum mi_page_kind_e { MI_PAGE_MEDIUM, // medium blocks go into 512KiB pages inside a segment MI_PAGE_LARGE, // larger blocks go into a single page spanning a whole segment MI_PAGE_HUGE // a huge page is a single page in a segment of variable size (but still 2MiB aligned) - // used for blocks `> MI_LARGE_OBJ_SIZE_MAX` or an aligment `> MI_BLOCK_ALIGNMENT_MAX`. + // used for blocks `> MI_LARGE_OBJ_SIZE_MAX` or an alignment `> MI_BLOCK_ALIGNMENT_MAX`. } mi_page_kind_t; diff --git a/readme.md b/readme.md index a0296b43..44e4c261 100644 --- a/readme.md +++ b/readme.md @@ -12,7 +12,7 @@ is a general purpose allocator with excellent [performance](#performance) charac Initially developed by Daan Leijen for the runtime systems of the [Koka](https://koka-lang.github.io) and [Lean](https://github.com/leanprover/lean) languages. -Latest release tag: `v2.1.7` (2024-05-21). +Latest release tag: `v2.1.7` (2024-05-21). Latest v1 tag: `v1.8.7` (2024-05-21). mimalloc is a drop-in replacement for `malloc` and can be used in other programs @@ -77,7 +77,7 @@ Enjoy! ### Releases -Note: the `v2.x` version has a different algorithm for managing internal mimalloc pages (as slices) that tends to use reduce +Note: the `v2.x` version has a different algorithm for managing internal mimalloc pages (as slices) that tends to use reduce memory usage and fragmentation compared to mimalloc `v1.x` (especially for large workloads). Should otherwise have similar performance (see [below](#performance)); please report if you observe any significant performance regression. @@ -87,18 +87,18 @@ memory usage * 2024-05-13, `v1.8.6`, `v2.1.6`: Fix build errors on various (older) platforms. Refactored aligned allocation. * 2024-04-22, `v1.8.4`, `v2.1.4`: Fixes various bugs and build issues. Add `MI_LIBC_MUSL` cmake flag for musl builds. Free-ing code is refactored into a separate module (`free.c`). Mimalloc page info is simplified with the block size - directly available (and new `block_size_shift` to improve aligned block free-ing). + directly available (and new `block_size_shift` to improve aligned block free-ing). New approach to collection of abandoned segments: When a thread terminates the segments it owns are abandoned (containing still live objects) and these can be - reclaimed by other threads. We no longer use a list of abandoned segments but this is now done using bitmaps in arena's + reclaimed by other threads. We no longer use a list of abandoned segments but this is now done using bitmaps in arena's which is more concurrent (and more aggressive). Abandoned memory can now also be reclaimed if a thread frees an object in an abandoned page (which can be disabled using `mi_option_abandoned_reclaim_on_free`). The option `mi_option_max_segment_reclaim` gives a maximum percentage of abandoned segments that can be reclaimed per try (=10%). -* 2023-04-24, `v1.8.2`, `v2.1.2`: Fixes build issues on freeBSD, musl, and C17 (UE 5.1.1). Reduce code size/complexity +* 2023-04-24, `v1.8.2`, `v2.1.2`: Fixes build issues on freeBSD, musl, and C17 (UE 5.1.1). Reduce code size/complexity by removing regions and segment-cache's and only use arenas with improved memory purging -- this may improve memory usage as well for larger services. Renamed options for consistency. Improved Valgrind and ASAN checking. - + * 2023-04-03, `v1.8.1`, `v2.1.1`: Fixes build issues on some platforms. * 2023-03-29, `v1.8.0`, `v2.1.0`: Improved support dynamic overriding on Windows 11. Improved tracing precision @@ -106,14 +106,14 @@ memory usage abstraction layer to make it easier to port and separate platform dependent code (in `src/prim`). Fixed C++ STL compilation on older Microsoft C++ compilers, and various small bug fixes. * 2022-12-23, `v1.7.9`, `v2.0.9`: Supports building with [asan](#asan) and improved [Valgrind](#valgrind) support. - Support arbitrary large alignments (in particular for `std::pmr` pools). - Added C++ STL allocators attached to a specific heap (thanks @vmarkovtsev). - Heap walks now visit all object (including huge objects). Support Windows nano server containers (by Johannes Schindelin,@dscho). + Support arbitrary large alignments (in particular for `std::pmr` pools). + Added C++ STL allocators attached to a specific heap (thanks @vmarkovtsev). + Heap walks now visit all object (including huge objects). Support Windows nano server containers (by Johannes Schindelin,@dscho). Various small bug fixes. * 2022-11-03, `v1.7.7`, `v2.0.7`: Initial support for [Valgrind](#valgrind) for leak testing and heap block overflow detection. Initial - support for attaching heaps to a speficic memory area (only in v2). Fix `realloc` behavior for zero size blocks, remove restriction to integral multiple of the alignment in `alloc_align`, improved aligned allocation performance, reduced contention with many threads on few processors (thank you @dposluns!), vs2022 support, support `pkg-config`, . + support for attaching heaps to a specific memory area (only in v2). Fix `realloc` behavior for zero size blocks, remove restriction to integral multiple of the alignment in `alloc_align`, improved aligned allocation performance, reduced contention with many threads on few processors (thank you @dposluns!), vs2022 support, support `pkg-config`, . * 2022-04-14, `v1.7.6`, `v2.0.6`: fix fallback path for aligned OS allocation on Windows, improve Windows aligned allocation even when compiling with older SDK's, fix dynamic overriding on macOS Monterey, fix MSVC C++ dynamic overriding, fix @@ -295,14 +295,14 @@ You can set further options either programmatically (using [`mi_option_set`](htt Advanced options: -- `MIMALLOC_ARENA_EAGER_COMMIT=2`: turns on eager commit for the large arenas (usually 1GiB) from which mimalloc - allocates segments and pages. Set this to 2 (default) to - only enable this on overcommit systems (e.g. Linux). Set this to 1 to enable explicitly on other systems - as well (like Windows or macOS) which may improve performance (as the whole arena is committed at once). - Note that eager commit only increases the commit but not the actual the peak resident set +- `MIMALLOC_ARENA_EAGER_COMMIT=2`: turns on eager commit for the large arenas (usually 1GiB) from which mimalloc + allocates segments and pages. Set this to 2 (default) to + only enable this on overcommit systems (e.g. Linux). Set this to 1 to enable explicitly on other systems + as well (like Windows or macOS) which may improve performance (as the whole arena is committed at once). + Note that eager commit only increases the commit but not the actual the peak resident set (rss) so it is generally ok to enable this. -- `MIMALLOC_PURGE_DELAY=N`: the delay in `N` milli-seconds (by default `10`) after which mimalloc will purge - OS pages that are not in use. This signals to the OS that the underlying physical memory can be reused which +- `MIMALLOC_PURGE_DELAY=N`: the delay in `N` milli-seconds (by default `10`) after which mimalloc will purge + OS pages that are not in use. This signals to the OS that the underlying physical memory can be reused which can reduce memory fragmentation especially in long running (server) programs. Setting `N` to `0` purges immediately when a page becomes unused which can improve memory usage but also decreases performance. Setting `N` to a higher value like `100` can improve performance (sometimes by a lot) at the cost of potentially using more memory at times. @@ -310,7 +310,7 @@ Advanced options: - `MIMALLOC_PURGE_DECOMMITS=1`: By default "purging" memory means unused memory is decommitted (`MEM_DECOMMIT` on Windows, `MADV_DONTNEED` (which decresease rss immediately) on `mmap` systems). Set this to 0 to instead "reset" unused memory on a purge (`MEM_RESET` on Windows, generally `MADV_FREE` (which does not decrease rss immediately) on `mmap` systems). - Mimalloc generally does not "free" OS memory but only "purges" OS memory, in other words, it tries to keep virtual + Mimalloc generally does not "free" OS memory but only "purges" OS memory, in other words, it tries to keep virtual address ranges and decommits within those ranges (to make the underlying physical memory available to other processes). Further options for large workloads and services: @@ -320,14 +320,14 @@ Further options for large workloads and services: the actual NUMA nodes is fine and will only cause threads to potentially allocate more memory across actual NUMA nodes (but this can happen in any case as NUMA local allocation is always a best effort but not guaranteed). - `MIMALLOC_ALLOW_LARGE_OS_PAGES=1`: use large OS pages (2 or 4MiB) when available; for some workloads this can significantly - improve performance. When this option is disabled, it also disables transparent huge pages (THP) for the process + improve performance. When this option is disabled, it also disables transparent huge pages (THP) for the process (on Linux and Android). Use `MIMALLOC_VERBOSE` to check if the large OS pages are enabled -- usually one needs to explicitly give permissions for large OS pages (as on [Windows][windows-huge] and [Linux][linux-huge]). However, sometimes the OS is very slow to reserve contiguous physical memory for large OS pages so use with care on systems that - can have fragmented memory (for that reason, we generally recommend to use `MIMALLOC_RESERVE_HUGE_OS_PAGES` instead whenever possible). + can have fragmented memory (for that reason, we generally recommend to use `MIMALLOC_RESERVE_HUGE_OS_PAGES` instead whenever possible). - `MIMALLOC_RESERVE_HUGE_OS_PAGES=N`: where `N` is the number of 1GiB _huge_ OS pages. This reserves the huge pages at startup and sometimes this can give a large (latency) performance improvement on big workloads. - Usually it is better to not use `MIMALLOC_ALLOW_LARGE_OS_PAGES=1` in combination with this setting. Just like large + Usually it is better to not use `MIMALLOC_ALLOW_LARGE_OS_PAGES=1` in combination with this setting. Just like large OS pages, use with care as reserving contiguous physical memory can take a long time when memory is fragmented (but reserving the huge pages is done at startup only once). @@ -417,10 +417,10 @@ the [shell](https://stackoverflow.com/questions/43941322/dyld-insert-libraries-i ### Dynamic Override on Windows -Dynamically overriding on mimalloc on Windows +Dynamically overriding on mimalloc on Windows is robust and has the particular advantage to be able to redirect all malloc/free calls that go through the (dynamic) C runtime allocator, including those from other DLL's or libraries. -As it intercepts all allocation calls on a low level, it can be used reliably +As it intercepts all allocation calls on a low level, it can be used reliably on large programs that include other 3rd party components. There are four requirements to make the overriding work robustly: @@ -429,7 +429,7 @@ There are four requirements to make the overriding work robustly: To ensure the `mimalloc-override.dll` is loaded at run-time it is easiest to insert some call to the mimalloc API in the `main` function, like `mi_version()` (or use the `/INCLUDE:mi_version` switch on the linker). See the `mimalloc-override-test` project - for an example on how to use this. + for an example on how to use this. 3. The [`mimalloc-redirect.dll`](bin) (or `mimalloc-redirect32.dll`) must be put in the same folder as the main `mimalloc-override.dll` at runtime (as it is a dependency of that DLL). The redirection DLL ensures that all calls to the C runtime malloc API get redirected to @@ -439,7 +439,7 @@ There are four requirements to make the overriding work robustly: For best performance on Windows with C++, it is also recommended to also override the `new`/`delete` operations (by including -[`mimalloc-new-delete.h`](include/mimalloc-new-delete.h) +[`mimalloc-new-delete.h`](include/mimalloc-new-delete.h) a single(!) source file in your project). The environment variable `MIMALLOC_DISABLE_REDIRECT=1` can be used to disable dynamic @@ -476,9 +476,9 @@ under your control or otherwise mixing of pointers from different heaps may occu # Tools Generally, we recommend using the standard allocator with memory tracking tools, but mimalloc -can also be build to support the [address sanitizer][asan] or the excellent [Valgrind] tool. +can also be build to support the [address sanitizer][asan] or the excellent [Valgrind] tool. Moreover, it can be build to support Windows event tracing ([ETW]). -This has a small performance overhead but does allow detecting memory leaks and byte-precise +This has a small performance overhead but does allow detecting memory leaks and byte-precise buffer overflows directly on final executables. See also the `test/test-wrong.c` file to test with various tools. ## Valgrind @@ -523,7 +523,7 @@ To build with the address sanitizer, use the `-DMI_TRACK_ASAN=ON` cmake option: > cmake ../.. -DMI_TRACK_ASAN=ON ``` -This can also be combined with secure mode or debug mode. +This can also be combined with secure mode or debug mode. You can then run your programs as:' ``` @@ -531,7 +531,7 @@ You can then run your programs as:' ``` When you link a program with an address sanitizer build of mimalloc, you should -generally compile that program too with the address sanitizer enabled. +generally compile that program too with the address sanitizer enabled. For example, assuming you build mimalloc in `out/debug`: ``` @@ -540,23 +540,23 @@ clang -g -o test-wrong -Iinclude test/test-wrong.c out/debug/libmimalloc-asan-de Since the address sanitizer redirects the standard allocation functions, on some platforms (macOSX for example) it is required to compile mimalloc with `-DMI_OVERRIDE=OFF`. -Adress sanitizer support is in its initial development -- please report any issues. +Address sanitizer support is in its initial development -- please report any issues. [asan]: https://github.com/google/sanitizers/wiki/AddressSanitizer ## ETW Event tracing for Windows ([ETW]) provides a high performance way to capture all allocations though -mimalloc and analyze them later. To build with ETW support, use the `-DMI_TRACK_ETW=ON` cmake option. +mimalloc and analyze them later. To build with ETW support, use the `-DMI_TRACK_ETW=ON` cmake option. -You can then capture an allocation trace using the Windows performance recorder (WPR), using the +You can then capture an allocation trace using the Windows performance recorder (WPR), using the `src/prim/windows/etw-mimalloc.wprp` profile. In an admin prompt, you can use: ``` > wpr -start src\prim\windows\etw-mimalloc.wprp -filemode > > wpr -stop .etl -``` -and then open `.etl` in the Windows Performance Analyzer (WPA), or +``` +and then open `.etl` in the Windows Performance Analyzer (WPA), or use a tool like [TraceControl] that is specialized for analyzing mimalloc traces. [ETW]: https://learn.microsoft.com/en-us/windows-hardware/test/wpt/event-tracing-for-windows diff --git a/src/arena.c b/src/arena.c index 3bb8f502..e0223e7f 100644 --- a/src/arena.c +++ b/src/arena.c @@ -284,7 +284,7 @@ static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t ar return p; } -// allocate in a speficic arena +// allocate in a specific arena static void* mi_arena_try_alloc_at_id(mi_arena_id_t arena_id, bool match_numa_node, int numa_node, size_t size, size_t alignment, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld ) { @@ -984,5 +984,3 @@ int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserv if (err==0 && pages_reserved!=NULL) *pages_reserved = pages; return err; } - - diff --git a/src/options.c b/src/options.c index 462a7c71..71c43e9c 100644 --- a/src/options.c +++ b/src/options.c @@ -94,10 +94,10 @@ static mi_option_desc_t options[_mi_option_last] = { 1, UNINIT, MI_OPTION(abandoned_reclaim_on_free) },// reclaim an abandoned segment on a free { 0, UNINIT, MI_OPTION(disallow_arena_alloc) }, // 1 = do not use arena's for allocation (except if using specific arena id's) { 400, UNINIT, MI_OPTION(retry_on_oom) }, // windows only: retry on out-of-memory for N milli seconds (=400), set to 0 to disable retries. -#if defined(MI_VISIT_ABANDONED) - { 1, INITIALIZED, MI_OPTION(visit_abandoned) }, // allow visiting heap blocks in abandonded segments; requires taking locks during reclaim. +#if defined(MI_VISIT_ABANDONED) + { 1, INITIALIZED, MI_OPTION(visit_abandoned) }, // allow visiting heap blocks in abandoned segments; requires taking locks during reclaim. #else - { 0, UNINIT, MI_OPTION(visit_abandoned) }, + { 0, UNINIT, MI_OPTION(visit_abandoned) }, #endif }; @@ -286,7 +286,7 @@ static _Atomic(size_t) warning_count; // = 0; // when >= max_warning_count stop // (recursively) invoke malloc again to allocate space for the thread local // variables on demand. This is why we use a _mi_preloading test on such // platforms. However, C code generator may move the initial thread local address -// load before the `if` and we therefore split it out in a separate funcion. +// load before the `if` and we therefore split it out in a separate function. static mi_decl_thread bool recurse = false; static mi_decl_noinline bool mi_recurse_enter_prim(void) { @@ -491,7 +491,7 @@ static void mi_option_init(mi_option_desc_t* desc) { char* end = buf; long value = strtol(buf, &end, 10); if (mi_option_has_size_in_kib(desc->option)) { - // this option is interpreted in KiB to prevent overflow of `long` for large allocations + // this option is interpreted in KiB to prevent overflow of `long` for large allocations // (long is 32-bit on 64-bit windows, which allows for 4TiB max.) size_t size = (value < 0 ? 0 : (size_t)value); bool overflow = false; diff --git a/src/prim/unix/prim.c b/src/prim/unix/prim.c index 63a36f25..78080e88 100644 --- a/src/prim/unix/prim.c +++ b/src/prim/unix/prim.c @@ -763,7 +763,7 @@ bool _mi_prim_getenv(const char* name, char* result, size_t result_size) { #include bool _mi_prim_random_buf(void* buf, size_t buf_len) { - // We prefere CCRandomGenerateBytes as it returns an error code while arc4random_buf + // We prefer CCRandomGenerateBytes as it returns an error code while arc4random_buf // may fail silently on macOS. See PR #390, and return (CCRandomGenerateBytes(buf, buf_len) == kCCSuccess); } diff --git a/src/prim/windows/prim.c b/src/prim/windows/prim.c index bd874f9b..22f787de 100644 --- a/src/prim/windows/prim.c +++ b/src/prim/windows/prim.c @@ -50,7 +50,7 @@ typedef NTSTATUS (__stdcall *PNtAllocateVirtualMemoryEx)(HANDLE, PVOID*, SIZE_T* static PVirtualAlloc2 pVirtualAlloc2 = NULL; static PNtAllocateVirtualMemoryEx pNtAllocateVirtualMemoryEx = NULL; -// Similarly, GetNumaProcesorNodeEx is only supported since Windows 7 +// Similarly, GetNumaProcessorNodeEx is only supported since Windows 7 typedef struct MI_PROCESSOR_NUMBER_S { WORD Group; BYTE Number; BYTE Reserved; } MI_PROCESSOR_NUMBER; typedef VOID (__stdcall *PGetCurrentProcessorNumberEx)(MI_PROCESSOR_NUMBER* ProcNumber); @@ -658,4 +658,3 @@ void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) { } #endif - diff --git a/src/segment.c b/src/segment.c index 54a917ea..b9bdb9b7 100644 --- a/src/segment.c +++ b/src/segment.c @@ -32,7 +32,7 @@ static uint8_t* mi_segment_raw_page_start(const mi_segment_t* segment, const mi_ (i.e. we are careful to not touch the memory until we actually allocate a block there) If a thread ends, it "abandons" pages that still contain live blocks. - Such segments are abondoned and these can be reclaimed by still running threads, + Such segments are abandoned and these can be reclaimed by still running threads, (much like work-stealing). -------------------------------------------------------------------------------- */ @@ -276,7 +276,7 @@ static bool mi_page_ensure_committed(mi_segment_t* segment, mi_page_t* page, mi_ // we re-use the `free` field for the expiration counter. Since this is a // a pointer size field while the clock is always 64-bit we need to guard -// against overflow, we use substraction to check for expiry which works +// against overflow, we use subtraction to check for expiry which works // as long as the reset delay is under (2^30 - 1) milliseconds (~12 days) static uint32_t mi_page_get_expire( mi_page_t* page ) { return (uint32_t)((uintptr_t)page->free); @@ -294,7 +294,7 @@ static void mi_page_purge_set_expire(mi_page_t* page) { // we re-use the `free` field for the expiration counter. Since this is a // a pointer size field while the clock is always 64-bit we need to guard -// against overflow, we use substraction to check for expiry which work +// against overflow, we use subtraction to check for expiry which work // as long as the reset delay is under (2^30 - 1) milliseconds (~12 days) static bool mi_page_purge_is_expired(mi_page_t* page, mi_msecs_t now) { int32_t expire = (int32_t)mi_page_get_expire(page); @@ -778,7 +778,7 @@ When a block is freed in an abandoned segment, the segment is reclaimed into that thread. Moreover, if threads are looking for a fresh segment, they -will first consider abondoned segments -- these can be found +will first consider abandoned segments -- these can be found by scanning the arena memory (segments outside arena memoryare only reclaimed by a free). ----------------------------------------------------------- */ @@ -995,7 +995,7 @@ static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t block_size, { mi_assert(segment->subproc == heap->tld->segments.subproc); // cursor only visits segments in our sub-process segment->abandoned_visits++; - // todo: should we respect numa affinity for abondoned reclaim? perhaps only for the first visit? + // todo: should we respect numa affinity for abandoned reclaim? perhaps only for the first visit? // todo: an arena exclusive heap will potentially visit many abandoned unsuitable segments and use many tries // Perhaps we can skip non-suitable ones in a better way? bool is_suitable = _mi_heap_memid_is_suitable(heap, segment->memid); diff --git a/test/test-stress.c b/test/test-stress.c index f9b3c9d6..58d6a6a1 100644 --- a/test/test-stress.c +++ b/test/test-stress.c @@ -26,11 +26,11 @@ terms of the MIT license. // // argument defaults #if defined(MI_TSAN) // with thread-sanitizer reduce the threads to test within the azure pipeline limits -static int THREADS = 8; +static int THREADS = 8; static int SCALE = 25; static int ITER = 200; -#elif defined(MI_UBSAN) // with undefined behavious sanitizer reduce parameters to stay within the azure pipeline limits -static int THREADS = 8; +#elif defined(MI_UBSAN) // with undefined behaviours sanitizer reduce parameters to stay within the azure pipeline limits +static int THREADS = 8; static int SCALE = 25; static int ITER = 20; #else