From 324dbc874a1a0193e60166d70b7655696b9e0ba9 Mon Sep 17 00:00:00 2001 From: doodspav Date: Wed, 13 Nov 2024 21:26:31 +0000 Subject: [PATCH] GHI #35 Add std implementation (#36) - add macro wrappers for direct and cmpxchg implementations - implement all operations using c11 std atomics --- .github/workflows/test.yml | 3 + CMakePresets.json | 8 +- cmake/check/HasFunction.cmake | 92 ++ cmake/check/HasHeader.cmake | 22 +- cmake/check/HasKeyword.cmake | 67 +- cmake/in/_config.h.in | 182 ++++ include/patomic/api/core.h | 3 - include/patomic/patomic.h | 16 +- src/impl/CMakeLists.txt | 1 + src/impl/null/null.c | 16 +- src/impl/null/null.h | 14 +- src/impl/register.h | 7 +- src/impl/std/CMakeLists.txt | 5 + src/impl/std/std.c | 791 +++++++++++++++ src/impl/std/std.h | 85 ++ src/include/patomic/CMakeLists.txt | 1 + src/include/patomic/config.h | 182 ++++ src/include/patomic/macros/CMakeLists.txt | 2 + src/include/patomic/macros/restrict.h | 27 + src/include/patomic/macros/static_assert.h | 16 + src/include/patomic/stdlib/CMakeLists.txt | 2 + src/include/patomic/stdlib/assert.h | 2 +- src/include/patomic/stdlib/stdalign.h | 100 ++ src/include/patomic/stdlib/string.h | 41 + src/include/patomic/wrapped/CMakeLists.txt | 6 + src/include/patomic/wrapped/base.h | 101 ++ src/include/patomic/wrapped/cmpxchg.h | 1022 ++++++++++++++++++++ src/include/patomic/wrapped/direct.h | 901 +++++++++++++++++ src/patomic.c | 18 +- src/stdlib/CMakeLists.txt | 2 + src/stdlib/assert.c | 12 +- src/stdlib/sort.c | 8 +- src/stdlib/stdalign.c | 31 + src/stdlib/string.c | 14 + 34 files changed, 3731 insertions(+), 69 deletions(-) create mode 100644 src/impl/std/CMakeLists.txt create mode 100644 src/impl/std/std.c create mode 100644 src/impl/std/std.h create mode 100644 src/include/patomic/macros/restrict.h create mode 100644 src/include/patomic/macros/static_assert.h create mode 100644 src/include/patomic/stdlib/stdalign.h create mode 100644 src/include/patomic/stdlib/string.h create mode 100644 src/include/patomic/wrapped/CMakeLists.txt create mode 100644 src/include/patomic/wrapped/base.h create mode 100644 src/include/patomic/wrapped/cmpxchg.h create mode 100644 src/include/patomic/wrapped/direct.h create mode 100644 src/stdlib/stdalign.c create mode 100644 src/stdlib/string.c diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index c71b95461..57c077994 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -9,6 +9,7 @@ on: jobs: test-native: + if: false strategy: matrix: # verbose labels make things easier to read in GitHub Actions @@ -55,6 +56,7 @@ jobs: architecture: ${{ matrix.architecture }} test-qemu: + if: false strategy: matrix: # architecture gets converted to triple @@ -133,6 +135,7 @@ jobs: skip_llvm: ${{ matrix.skip_llvm == true }} publish-results: + if: false runs-on: ubuntu-latest needs: - test-native diff --git a/CMakePresets.json b/CMakePresets.json index e43a605f2..394919fff 100644 --- a/CMakePresets.json +++ b/CMakePresets.json @@ -56,7 +56,7 @@ "name": "_patomic-ci-flags-ansi-gnu", "hidden": true, "cacheVariables": { - "CMAKE_C_FLAGS": "-Wall -Wextra -Werror -Wpedantic -Wno-unused-function -Wno-atomic-alignment", + "CMAKE_C_FLAGS": "-Wall -Wextra -Werror -Wpedantic -Wno-unused-function -Wno-atomic-alignment -Wno-unused-local-typedef", "CMAKE_C_STANDARD": "90" } }, @@ -94,7 +94,7 @@ "name": "_patomic-ci-flags-warning-clang", "hidden": true, "cacheVariables": { - "CMAKE_C_FLAGS_INIT": "-Weverything -Werror -Wpedantic -Wno-c++98-compat -Wno-covered-switch-default -Wno-padded -Wno-unused-function -Wno-atomic-alignment -Wno-poison-system-directories -Wno-reserved-identifier -Wno-documentation-unknown-command", + "CMAKE_C_FLAGS_INIT": "-Weverything -Werror -Wpedantic -Wno-c++98-compat -Wno-covered-switch-default -Wno-padded -Wno-unused-function -Wno-atomic-alignment -Wno-poison-system-directories -Wno-reserved-identifier -Wno-documentation-unknown-command -Wno-unused-local-typedef", "CMAKE_CXX_FLAGS_INIT": "-Wall -Wextra -Werror -Wpedantic -Wno-c++17-attribute-extensions" } }, @@ -102,7 +102,7 @@ "name": "_patomic-ci-flags-warning-gcc", "hidden": true, "cacheVariables": { - "CMAKE_C_FLAGS_INIT": "-Wall -Wextra -Werror -Wpedantic -Wshadow -Wcast-align -Wconversion -Wsign-conversion -Wnull-dereference -Wdouble-promotion -Wstrict-prototypes -Wmisleading-indentation -Wduplicated-branches -Wlogical-op -Wdeclaration-after-statement -Wno-unused-function", + "CMAKE_C_FLAGS_INIT": "-Wall -Wextra -Werror -Wpedantic -Wshadow -Wcast-align -Wconversion -Wsign-conversion -Wnull-dereference -Wdouble-promotion -Wstrict-prototypes -Wmisleading-indentation -Wduplicated-branches -Wlogical-op -Wdeclaration-after-statement -Wno-unused-function -Wno-unused-local-typedef", "CMAKE_CXX_FLAGS_INIT": "-Wall -Wextra -Werror -Wpedantic" } }, @@ -110,7 +110,7 @@ "name": "_patomic-ci-flags-warning-msvc", "hidden": true, "cacheVariables": { - "CMAKE_C_FLAGS_INIT": "/permissive- /volatile:iso /Wall /WX /wd4464 /wd4132 /wd4820 /wd4127 /wd5045 /wd4710 /wd4711 /wd4668", + "CMAKE_C_FLAGS_INIT": "/permissive- /volatile:iso /Wall /WX /wd4464 /wd4132 /wd4820 /wd4127 /wd5045 /wd4710 /wd4711 /wd4668 /wd4146", "CMAKE_CXX_FLAGS_INIT": "/permissive- /volatile:iso /W4 /WX" } }, diff --git a/cmake/check/HasFunction.cmake b/cmake/check/HasFunction.cmake index ac29a419b..309caeaf1 100644 --- a/cmake/check/HasFunction.cmake +++ b/cmake/check/HasFunction.cmake @@ -7,6 +7,14 @@ # | COMPILER_HAS_BUILTIN_UNREACHABLE | '__builtin_unreachable(void)' is available as a function | # | COMPILER_HAS_WCHAR_FWIDE | '' header is available and makes 'fwide(FILE*, int)' available as a function | # | COMPILER_HAS_WCHAR_FWPRINTF | '' header is available and makes 'fwprintf(FILE*, const wchar_t*, ...)' available as a function | +# | COMPILER_HAS_C23_ALIGNOF | 'alignof(T)' is available as a function | +# | COMPILER_HAS_C23_ALIGNOF_EXTN | '__extension__ alignof(T)' is available as a function | +# | COMPILER_HAS_C11_ALIGNOF | '_Alignof(T)' is available as a function | +# | COMPILER_HAS_C11_ALIGNOF_EXTN | '__extension__ _Alignof(T)' is available as a function | +# | COMPILER_HAS_MS_ALIGNOF | '__alignof(T)' is available as a function | +# | COMPILER_HAS_MS_ALIGNOF_EXTN | '__extension__ __alignof(T)' is available as a function | +# | COMPILER_HAS_GNU_ALIGNOF | '__alignof__(T)' is available as a function | +# | COMPILER_HAS_GNU_ALIGNOF_EXTN | '__extension__ __alignof__(T)' is available as a function | # ----------------------------------------------------------------------------------------------------------------------------------------------- @@ -55,3 +63,87 @@ check_c_source_compiles_or_zero( WILL_FAIL_IF_ANY_NOT ${COMPILER_HAS_WCHAR_H} ) + +# 'alignof(T)' is available as a function +check_c_source_compiles_or_zero( + SOURCE + "int main(void) { return (int) alignof(int); }" + OUTPUT_VARIABLE + COMPILER_HAS_C23_ALIGNOF +) + +# '__extension__ alignof(T)' is available as a function +check_c_source_compiles_or_zero( + SOURCE + "int main(void) { return (int) __extension__ alignof(int); }" + OUTPUT_VARIABLE + COMPILER_HAS_C23_ALIGNOF_EXTN + WILL_SUCCEED_IF_ALL + ${COMPILER_HAS_C23_ALIGNOF} + ${COMPILER_HAS_EXTN} + WILL_FAIL_IF_ANY_NOT + ${COMPILER_HAS_EXTN} +) + +# '_Alignof(T)' is available as a function +check_c_source_compiles_or_zero( + SOURCE + "int main(void) { return (int) _Alignof(int); }" + OUTPUT_VARIABLE + COMPILER_HAS_C11_ALIGNOF +) + +# '__extension__ _Alignof(T)' is available as a function +check_c_source_compiles_or_zero( + SOURCE + "int main(void) { return (int) __extension__ _Alignof(int); }" + OUTPUT_VARIABLE + COMPILER_HAS_C11_ALIGNOF_EXTN + WILL_SUCCEED_IF_ALL + ${COMPILER_HAS_C11_ALIGNOF} + ${COMPILER_HAS_EXTN} + WILL_FAIL_IF_ANY_NOT + ${COMPILER_HAS_EXTN} +) + +# '__alignof(T)' is available as a function +check_c_source_compiles_or_zero( + SOURCE + "int main(void) { return (int) __alignof(int); }" + OUTPUT_VARIABLE + COMPILER_HAS_MS_ALIGNOF +) + +# '__extension__ __alignof(T)' is available as a function +check_c_source_compiles_or_zero( + SOURCE + "int main(void) { return (int) __extension__ __alignof(int); }" + OUTPUT_VARIABLE + COMPILER_HAS_MS_ALIGNOF_EXTN + WILL_SUCCEED_IF_ALL + ${COMPILER_HAS_MS_ALIGNOF} + ${COMPILER_HAS_EXTN} + WILL_FAIL_IF_ANY_NOT + ${COMPILER_HAS_EXTN} +) + +# '__alignof__(T)' is available as a function +check_c_source_compiles_or_zero( + SOURCE + "int main(void) { return (int) __alignof__(int); }" + OUTPUT_VARIABLE + COMPILER_HAS_GNU_ALIGNOF +) + +# '__extension__ __alignof__(T)' is available as a function +check_c_source_compiles_or_zero( + SOURCE + "int main(void) { return (int) __extension__ __alignof__(int); }" + OUTPUT_VARIABLE + COMPILER_HAS_GNU_ALIGNOF_EXTN + WILL_SUCCEED_IF_ALL + ${COMPILER_HAS_GNU_ALIGNOF} + ${COMPILER_HAS_EXTN} + WILL_FAIL_IF_ANY_NOT + ${COMPILER_HAS_EXTN} +) diff --git a/cmake/check/HasHeader.cmake b/cmake/check/HasHeader.cmake index a98aa9d32..c0bf40f1e 100644 --- a/cmake/check/HasHeader.cmake +++ b/cmake/check/HasHeader.cmake @@ -1,13 +1,23 @@ # ---- Has Header ---- -# ---------------------------------------------------------- -# | Variable | Check | -# |=======================|================================| -# | COMPILER_HAS_STDINT_H | header is available | -# | COMPILER_HAS_WCHAR_H | header is available | -# ---------------------------------------------------------- +# ---------------------------------------------------------------- +# | Variable | Check | +# |==========================|===================================| +# | COMPILER_HAS_STDATOMIC_H | header is available | +# | COMPILER_HAS_STDINT_H | header is available | +# | COMPILER_HAS_WCHAR_H | header is available | +# ---------------------------------------------------------------- +# header is available +check_c_source_compiles_or_zero( + SOURCE + "#include \n\ + int main(void) {}" + OUTPUT_VARIABLE + COMPILER_HAS_STDATOMIC_H +) + # header is available check_c_source_compiles_or_zero( SOURCE diff --git a/cmake/check/HasKeyword.cmake b/cmake/check/HasKeyword.cmake index f723fa087..9c9621cee 100644 --- a/cmake/check/HasKeyword.cmake +++ b/cmake/check/HasKeyword.cmake @@ -1,10 +1,14 @@ # ---- Has Keyword ---- -# ---------------------------------------------------------------------- -# | Variable | Check | -# |========================|===========================================| -# | COMPILER_HAS_EXTN | '__extension__' is available as a keyword | -# ---------------------------------------------------------------------- +# ------------------------------------------------------------------------- +# | Variable | Check | +# |===========================|===========================================| +# | COMPILER_HAS_EXTN | '__extension__' is available as a keyword | +# | COMPILER_HAS_RESTRICT | 'restrict' is available as a keyword | +# | COMPILER_HAS_MS_RESTRICT | '__restrict' is available as a keyword | +# | COMPILER_HAS_GNU_RESTRICT | '__restrict__' is available as a keyword | +# | COMPILER_HAS_ATOMIC | '_Atomic' is available as a keyword | +# ------------------------------------------------------------------------- # '__extension__' is available as a keyword @@ -14,3 +18,56 @@ check_c_source_compiles_or_zero( OUTPUT_VARIABLE COMPILER_HAS_EXTN ) + +# 'restrict' is available as a keyword +check_c_source_compiles_or_zero( + SOURCE + "static int get(const int *const restrict p) { return *p; } \n\ + int main(void) { int x = 0; return get(&x); }" + OUTPUT_VARIABLE + COMPILER_HAS_RESTRICT +) + +# '__restrict' is available as a keyword +check_c_source_compiles_or_zero( + SOURCE + "static int get(const int *const __restrict p) { return *p; } \n\ + int main(void) { int x = 0; return get(&x); }" + OUTPUT_VARIABLE + COMPILER_HAS_MS_RESTRICT +) + +# '__restrict__' is available as a keyword +check_c_source_compiles_or_zero( + SOURCE + "static int get(const int *const __restrict__ p) { return *p; } \n\ + int main(void) { int x = 0; return get(&x); }" + OUTPUT_VARIABLE + COMPILER_HAS_GNU_RESTRICT +) + +# '_Atomic' is available as a keyword +check_c_source_compiles_or_zero( + SOURCE + "#include \n\ + int main(void) { \n\ + static const _Atomic(unsigned char) a1; \n\ + static const _Atomic(unsigned short) a2; \n\ + static const _Atomic(unsigned int) a3; \n\ + static const _Atomic(unsigned long) a4; \n\ + const volatile _Atomic(unsigned char) *const p1 = &a1; \n\ + const volatile _Atomic(unsigned short) *const p2 = &a2; \n\ + const volatile _Atomic(unsigned int) *const p3 = &a3; \n\ + const volatile _Atomic(unsigned long) *const p4 = &a4; \n\ + unsigned long sum = 0; \n\ + sum += (unsigned long) atomic_load(p1); \n\ + sum += (unsigned long) atomic_load(p2); \n\ + sum += (unsigned long) atomic_load(p3); \n\ + sum += (unsigned long) atomic_load(p4); \n\ + return (int) sum; \n\ + }" + OUTPUT_VARIABLE + COMPILER_HAS_ATOMIC + WILL_FAIL_IF_ANY_NOT + ${COMPILER_HAS_STDATOMIC_H} +) diff --git a/cmake/in/_config.h.in b/cmake/in/_config.h.in index f1c0fae81..f3accf598 100644 --- a/cmake/in/_config.h.in +++ b/cmake/in/_config.h.in @@ -16,6 +16,76 @@ #endif +#ifndef PATOMIC_HAS_RESTRICT + /** + * @addtogroup config.safe + * + * @brief + * 'restrict' is available as a keyword. + * + * @note + * Usually required: C99. + */ + #define PATOMIC_HAS_RESTRICT @COMPILER_HAS_RESTRICT@ +#endif + + +#ifndef PATOMIC_HAS_MS_RESTRICT + /** + * @addtogroup config.safe + * + * @brief + * '__restrict' is available as a keyword. + * + * @note + * Usually required: Microsoft compatible(-ish) compiler. + */ + #define PATOMIC_HAS_MS_RESTRICT @COMPILER_HAS_MS_RESTRICT@ +#endif + + +#ifndef PATOMIC_HAS_GNU_RESTRICT + /** + * @addtogroup config.safe + * + * @brief + * '__restrict__' is available as a keyword. + * + * @note + * Usually required: GNU compatible(-ish) compiler. + */ + #define PATOMIC_HAS_GNU_RESTRICT @COMPILER_HAS_GNU_RESTRICT@ +#endif + + +#ifndef PATOMIC_HAS_ATOMIC + /** + * @addtogroup config.safe + * + * @brief + * '_Atomic' is available as a keyword. + * + * @note + * Usually required: C11. + */ + #define PATOMIC_HAS_ATOMIC @COMPILER_HAS_ATOMIC@ +#endif + + +#ifndef PATOMIC_HAS_STDATOMIC_H + /** + * @addtogroup config.safe + * + * @brief + * header is available. + * + * @note + * Usually required: C11. + */ + #define PATOMIC_HAS_STDATOMIC_H @COMPILER_HAS_STDATOMIC_H@ +#endif + + #ifndef PATOMIC_HAS_STDINT_H /** * @addtogroup config.safe @@ -345,4 +415,116 @@ #endif +#ifndef PATOMIC_HAS_C23_ALIGNOF + /** + * @addtogroup config.safe + * + * @brief + * 'alignof(T)' is available as a function. + * + * @note + * Usually requires: C23. + */ + #define PATOMIC_HAS_C23_ALIGNOF @COMPILER_HAS_C23_ALIGNOF@ +#endif + + +#ifndef PATOMIC_HAS_C23_ALIGNOF_EXTN + /** + * @addtogroup config.safe + * + * @brief + * '__extension__ alignof(T)' is available as a function. + * + * @note + * Usually requires: GNU compatible(-ish) compiler. + */ + #define PATOMIC_HAS_C23_ALIGNOF_EXTN @COMPILER_HAS_C23_ALIGNOF_EXTN@ +#endif + + +#ifndef PATOMIC_HAS_C11_ALIGNOF + /** + * @addtogroup config.safe + * + * @brief + * '_Alignof(T)' is available as a function. + * + * @note + * Usually requires: C11. + */ + #define PATOMIC_HAS_C11_ALIGNOF @COMPILER_HAS_C11_ALIGNOF@ +#endif + + +#ifndef PATOMIC_HAS_C11_ALIGNOF_EXTN + /** + * @addtogroup config.safe + * + * @brief + * '__extension__ _Alignof(T)' is available as a function. + * + * @note + * Usually requires: GNU compatible(-ish) compiler. + */ + #define PATOMIC_HAS_C11_ALIGNOF_EXTN @COMPILER_HAS_C11_ALIGNOF_EXTN@ +#endif + + +#ifndef PATOMIC_HAS_MS_ALIGNOF + /** + * @addtogroup config.safe + * + * @brief + * '__alignof(T)' is available as a function. + * + * @note + * Usually required: Microsoft compatible(-ish) compiler. + */ + #define PATOMIC_HAS_MS_ALIGNOF @COMPILER_HAS_MS_ALIGNOF@ +#endif + + +#ifndef PATOMIC_HAS_MS_ALIGNOF_EXTN + /** + * @addtogroup config.safe + * + * @brief + * '__extension__ __alignof(T)' is available as a function. + * + * @note + * Usually required: GNU compatible(-ish) compiler. + */ + #define PATOMIC_HAS_MS_ALIGNOF_EXTN @COMPILER_HAS_MS_ALIGNOF_EXTN@ +#endif + + +#ifndef PATOMIC_HAS_GNU_ALIGNOF + /** + * @addtogroup config.safe + * + * @brief + * '__alignof__(T)' is available as a function. + * + * @note + * Usually required: GNU compatible(-ish) compiler. + */ + #define PATOMIC_HAS_GNU_ALIGNOF @COMPILER_HAS_GNU_ALIGNOF@ +#endif + + +#ifndef PATOMIC_HAS_GNU_ALIGNOF_EXTN + /** + * @addtogroup config.safe + * + * @brief + * '__extension__ __alignof__(T)' is available as a function. + * + * @note + * Usually required: GNU compatible(-ish) compiler. + */ + #define PATOMIC_HAS_GNU_ALIGNOF_EXTN @COMPILER_HAS_GNU_ALIGNOF_EXTN@ +#endif + + #endif /* PATOMIC_GENERATED_CONFIG_H */ diff --git a/include/patomic/api/core.h b/include/patomic/api/core.h index be282f199..9b79c3237 100644 --- a/include/patomic/api/core.h +++ b/include/patomic/api/core.h @@ -59,9 +59,6 @@ typedef struct { * transaction, and non-atomic transaction specific operations. */ patomic_ops_transaction_t ops; - /** @brief Alignment requirements for atomic operations. */ - patomic_align_t align; - /** @brief Recommended time and space bounds for atomic operations. */ patomic_transaction_recommended_t recommended; diff --git a/include/patomic/patomic.h b/include/patomic/patomic.h index 12f442efb..6111d6aae 100644 --- a/include/patomic/patomic.h +++ b/include/patomic/patomic.h @@ -35,7 +35,7 @@ extern "C" { * @param order * Memory order to implicitly use for all atomic operations. * - * @param opts + * @param options * One or more patomic_option_t flags combined. Passed on to each internal * implementation to be used in an unspecified manner. * @@ -57,7 +57,7 @@ PATOMIC_EXPORT patomic_t patomic_create( size_t byte_width, patomic_memory_order_t order, - unsigned int opts, + unsigned int options, unsigned int kinds, unsigned long ids ); @@ -74,7 +74,7 @@ patomic_create( * @param byte_width * Width in bytes of type to support. * - * @param opts + * @param options * One or more patomic_option_t flags combined. Passed on to each internal * implementation to be used in an unspecified manner. * @@ -95,7 +95,7 @@ patomic_create( PATOMIC_EXPORT patomic_explicit_t patomic_create_explicit( size_t byte_width, - unsigned int opts, + unsigned int options, unsigned int kinds, unsigned long ids ); @@ -116,7 +116,7 @@ patomic_create_explicit( * than one set of APIs using hardware support for performing lock-free * transactional operations. * - * @param opts + * @param options * One or more patomic_option_t flags combined. Passed on to each internal * implementation to be used in an unspecified manner. * @@ -130,14 +130,10 @@ patomic_create_explicit( * The implementation with the most efficient kind that supports at least a * single operation. If no such implementations exist, the NULL implementation * is returned. - * - * @note - * The alignment requirements returned by this function will always be valid - * even if no operations are supported. */ PATOMIC_EXPORT patomic_transaction_t patomic_create_transaction( - unsigned int opts, + unsigned int options, unsigned int kinds, unsigned long ids ); diff --git a/src/impl/CMakeLists.txt b/src/impl/CMakeLists.txt index dd79bc131..e285d9648 100644 --- a/src/impl/CMakeLists.txt +++ b/src/impl/CMakeLists.txt @@ -1,5 +1,6 @@ # add all subdirectories add_subdirectory(null) +add_subdirectory(std) # add directory files to target target_sources(${target_name} PRIVATE diff --git a/src/impl/null/null.c b/src/impl/null/null.c index 1d4f682c7..3cdf3a022 100644 --- a/src/impl/null/null.c +++ b/src/impl/null/null.c @@ -7,7 +7,7 @@ patomic_t patomic_impl_create_null( const size_t byte_width, const patomic_memory_order_t order, - const unsigned int opts + const unsigned int options ) { /* zero all fields */ @@ -16,7 +16,7 @@ patomic_impl_create_null( /* ignore parameters */ PATOMIC_IGNORE_UNUSED(byte_width); PATOMIC_IGNORE_UNUSED(order); - PATOMIC_IGNORE_UNUSED(opts); + PATOMIC_IGNORE_UNUSED(options); /* set a valid minimal alignment */ impl.align.recommended = 1; @@ -30,7 +30,7 @@ patomic_impl_create_null( patomic_explicit_t patomic_impl_create_explicit_null( const size_t byte_width, - const unsigned int opts + const unsigned int options ) { /* zero all fields */ @@ -38,7 +38,7 @@ patomic_impl_create_explicit_null( /* ignore parameters */ PATOMIC_IGNORE_UNUSED(byte_width); - PATOMIC_IGNORE_UNUSED(opts); + PATOMIC_IGNORE_UNUSED(options); /* set a valid minimal alignment */ impl.align.recommended = 1; @@ -51,18 +51,14 @@ patomic_impl_create_explicit_null( patomic_transaction_t patomic_impl_create_transaction_null( - const unsigned int opts + const unsigned int options ) { /* zero all fields */ patomic_transaction_t impl = {0}; /* ignore parameters */ - PATOMIC_IGNORE_UNUSED(opts); - - /* set a valid minimal alignment */ - impl.align.recommended = 1; - impl.align.minimum = 1; + PATOMIC_IGNORE_UNUSED(options); /* return */ return impl; diff --git a/src/impl/null/null.h b/src/impl/null/null.h index 719d1a969..538ca44e1 100644 --- a/src/impl/null/null.h +++ b/src/impl/null/null.h @@ -3,8 +3,6 @@ #include -#include - /** * @addtogroup impl.null @@ -18,7 +16,7 @@ * @param order * Value is ignored. * - * @param opts + * @param options * Value is ignored. * * @return @@ -29,7 +27,7 @@ patomic_t patomic_impl_create_null( size_t byte_width, patomic_memory_order_t order, - unsigned int opts + unsigned int options ); @@ -42,7 +40,7 @@ patomic_impl_create_null( * @param byte_width * Value is ignored. * - * @param opts + * @param options * Value is ignored. * * @return @@ -52,7 +50,7 @@ patomic_impl_create_null( patomic_explicit_t patomic_impl_create_explicit_null( size_t byte_width, - unsigned int opts + unsigned int options ); @@ -62,7 +60,7 @@ patomic_impl_create_explicit_null( * @brief * No operations are supported. * - * @param opts + * @param options * Value is ignored. * * @return @@ -71,7 +69,7 @@ patomic_impl_create_explicit_null( */ patomic_transaction_t patomic_impl_create_transaction_null( - unsigned int opts + unsigned int options ); diff --git a/src/impl/register.h b/src/impl/register.h index a77ac9f84..95644c7a3 100644 --- a/src/impl/register.h +++ b/src/impl/register.h @@ -2,6 +2,7 @@ #define PATOMIC_REGISTER_H #include "null/null.h" +#include "std/std.h" #include @@ -66,9 +67,9 @@ patomic_impl_register[] = { { patomic_id_STDC, patomic_kind_BLTN, - patomic_impl_create_null, - patomic_impl_create_explicit_null, - patomic_impl_create_transaction_null + patomic_impl_create_std, + patomic_impl_create_explicit_std, + patomic_impl_create_transaction_std } }; diff --git a/src/impl/std/CMakeLists.txt b/src/impl/std/CMakeLists.txt new file mode 100644 index 000000000..8f99c355a --- /dev/null +++ b/src/impl/std/CMakeLists.txt @@ -0,0 +1,5 @@ +# add directory files to target +target_sources(${target_name} PRIVATE + std.h + std.c +) diff --git a/src/impl/std/std.c b/src/impl/std/std.c new file mode 100644 index 000000000..6478a6c8e --- /dev/null +++ b/src/impl/std/std.c @@ -0,0 +1,791 @@ +#include "std.h" + +#include + +#include + + +#if PATOMIC_HAS_ATOMIC && PATOMIC_HAS_STDATOMIC_H && PATOMIC_HAS_IR_TWOS_COMPL + + +#include +#include + +#include +#include + +#include +#include + + +/* + * BASE: + * - store (direct) + * - load (direct) + */ +#define do_store_explicit(type, obj, des, order) \ + atomic_store_explicit(obj, des, order) +#define do_load_explicit(type, obj, order, res) \ + res = atomic_load_explicit(obj, order) + +#define PATOMIC_DEFINE_STORE_OP(type, name, vis_p, order) \ + PATOMIC_WRAPPED_DIRECT_DEFINE_OP_STORE( \ + _Atomic(type), type, \ + patomic_opimpl_store_##name, \ + vis_p, order, \ + do_store_explicit \ + ) + +#define PATOMIC_DEFINE_LOAD_OP(type, name, vis_p, order) \ + PATOMIC_WRAPPED_DIRECT_DEFINE_OP_LOAD( \ + _Atomic(type), type, \ + patomic_opimpl_load_##name, \ + vis_p, order, \ + do_load_explicit \ + ) + + +/* + * XCHG: + * - exchange (direct) + * - cmpxchg_weak (direct) + * - cmpxchg_strong (direct) + */ +#define do_exchange_explicit(type, obj, des, order, res) \ + res = atomic_exchange_explicit(obj, des, order) +#define do_cmpxchg_weak(type, obj, exp, des, succ, fail, ok) \ + ok = atomic_compare_exchange_weak_explicit(obj, &exp, des, succ, fail) +#define do_cmpxchg_strong(type, obj, exp, des, succ, fail, ok) \ + ok = atomic_compare_exchange_strong_explicit(obj, &exp, des, succ, fail) + +#define PATOMIC_DEFINE_XCHG_OPS_CREATE(type, name, vis_p, inv, order, ops) \ + PATOMIC_WRAPPED_DIRECT_DEFINE_OP_EXCHANGE( \ + _Atomic(type), type, \ + patomic_opimpl_exchange_##name, \ + vis_p, order, \ + do_exchange_explicit \ + ) \ + PATOMIC_WRAPPED_DIRECT_DEFINE_OP_CMPXCHG( \ + _Atomic(type), type, \ + patomic_opimpl_cmpxchg_weak_##name, \ + vis_p, inv, order, \ + do_cmpxchg_weak \ + ) \ + PATOMIC_WRAPPED_DIRECT_DEFINE_OP_CMPXCHG( \ + _Atomic(type), type, \ + patomic_opimpl_cmpxchg_strong_##name, \ + vis_p, inv, order, \ + do_cmpxchg_strong \ + ) \ + static patomic_##ops##_xchg_t \ + patomic_ops_xchg_create_##name(void) \ + { \ + patomic_##ops##_xchg_t pao; \ + pao.fp_exchange = patomic_opimpl_exchange_##name; \ + pao.fp_cmpxchg_weak = patomic_opimpl_cmpxchg_weak_##name; \ + pao.fp_cmpxchg_strong = patomic_opimpl_cmpxchg_strong_##name; \ + return pao; \ + } + + +/* + * BITWISE: + * - bit_test (direct) + * - bit_test_compl (cmpxchg) + * - bit_test_set (cmpxchg) + * - bit_test_reset (cmpxchg) + */ +#define do_bit_test_explicit(type, obj, offset, order, res) \ + do { \ + type mask = (type) ((type) 1 << offset); \ + mask &= atomic_load_explicit(obj, order); \ + res = (mask != (type) 0); \ + } \ + while (0) + +#define do_get_bit(type, exp, offset, bit) \ + do { \ + const type mask = (type) ((type) 1 << offset); \ + bit = (exp & mask) != 0; \ + } \ + while (0) + +#define do_make_desired_compl(type, exp, offset, des) \ + do { \ + const type mask = (type) ((type) 1 << offset); \ + des = (type) (exp ^ mask); \ + } \ + while (0) + +#define do_make_desired_set(type, exp, offset, des) \ + do { \ + const type mask = (type) ((type) 1 << offset); \ + des = (type) (exp | mask); \ + } \ + while (0) + +#define do_make_desired_reset(type, exp, offset, des) \ + do { \ + const type mask = (type) ((type) 1 << offset); \ + des = (type) (exp & (type) ~mask); \ + } \ + while (0) + +#define PATOMIC_DEFINE_BIT_TEST_OP(type, name, vis_p, order) \ + PATOMIC_WRAPPED_DIRECT_DEFINE_OP_BIT_TEST( \ + _Atomic(type), type, \ + patomic_opimpl_bit_test_##name, \ + vis_p, order, \ + do_bit_test_explicit \ + ) + +#define PATOMIC_DEFINE_BIT_TEST_MODIFY_OPS(type, name, vis_p, order) \ + PATOMIC_WRAPPED_CMPXCHG_DEFINE_OP_BIT_TEST_MODIFY( \ + _Atomic(type), type, \ + patomic_opimpl_bit_test_compl_##name, \ + vis_p, order, \ + do_cmpxchg_weak, \ + do_get_bit, do_make_desired_compl \ + ) \ + PATOMIC_WRAPPED_CMPXCHG_DEFINE_OP_BIT_TEST_MODIFY( \ + _Atomic(type), type, \ + patomic_opimpl_bit_test_set_##name, \ + vis_p, order, \ + do_cmpxchg_weak, \ + do_get_bit, do_make_desired_set \ + ) \ + PATOMIC_WRAPPED_CMPXCHG_DEFINE_OP_BIT_TEST_MODIFY( \ + _Atomic(type), type, \ + patomic_opimpl_bit_test_reset_##name, \ + vis_p, order, \ + do_cmpxchg_weak, \ + do_get_bit, do_make_desired_reset \ + ) + +/* create ops which support all memory orders */ +#define PATOMIC_DEFINE_BITWISE_OPS_CREATE_NO_LOAD(type, name, vis_p, order, ops) \ + PATOMIC_DEFINE_BIT_TEST_MODIFY_OPS(type, name, vis_p, order) \ + static patomic_##ops##_bitwise_t \ + patomic_ops_bitwise_create_##name(void) \ + { \ + patomic_##ops##_bitwise_t pao; \ + pao.fp_test = NULL; /* does not support release or acq_rel */ \ + pao.fp_test_compl = patomic_opimpl_bit_test_compl_##name; \ + pao.fp_test_set = patomic_opimpl_bit_test_set_##name; \ + pao.fp_test_reset = patomic_opimpl_bit_test_reset_##name; \ + return pao; \ + } + +/* order cannot be release or acq_rel */ +#define PATOMIC_DEFINE_BITWISE_OPS_CREATE_LOAD(type, name, vis_p, order, ops) \ + PATOMIC_DEFINE_BIT_TEST_OP(type, name, vis_p, order) \ + PATOMIC_DEFINE_BIT_TEST_MODIFY_OPS(type, name, vis_p, order) \ + static patomic_##ops##_bitwise_t \ + patomic_ops_bitwise_create_##name(void) \ + { \ + patomic_##ops##_bitwise_t pao; \ + pao.fp_test = patomic_opimpl_bit_test_##name; \ + pao.fp_test_compl = patomic_opimpl_bit_test_compl_##name; \ + pao.fp_test_set = patomic_opimpl_bit_test_set_##name; \ + pao.fp_test_reset = patomic_opimpl_bit_test_reset_##name; \ + return pao; \ + } + + +/* + * BINARY: + * - (fetch_)or (direct) + * - (fetch_)xor (direct) + * - (fetch_)and (direct) + * - (fetch_)not (cmpxchg) + */ +#define do_void_or_explicit(type, obj, arg, order) \ + PATOMIC_IGNORE_UNUSED(atomic_fetch_or_explicit(obj, arg, order)) +#define do_void_xor_explicit(type, obj, arg, order) \ + PATOMIC_IGNORE_UNUSED(atomic_fetch_xor_explicit(obj, arg, order)) +#define do_void_and_explicit(type, obj, arg, order) \ + PATOMIC_IGNORE_UNUSED(atomic_fetch_and_explicit(obj, arg, order)) + +#define do_fetch_or_explicit(type, obj, arg, order, res) \ + res = atomic_fetch_or_explicit(obj, arg, order) +#define do_fetch_xor_explicit(type, obj, arg, order, res) \ + res = atomic_fetch_xor_explicit(obj, arg, order) +#define do_fetch_and_explicit(type, obj, arg, order, res) \ + res = atomic_fetch_and_explicit(obj, arg, order) + +#define do_make_desired_not(type, exp, des) \ + des = (type) ~exp + +#define PATOMIC_DEFINE_BINARY_OPS_CREATE(type, name, vis_p, order, ops) \ + PATOMIC_WRAPPED_DIRECT_DEFINE_OP_VOID( \ + _Atomic(type), type, \ + patomic_opimpl_void_or_##name, \ + vis_p, order, \ + do_void_or_explicit \ + ) \ + PATOMIC_WRAPPED_DIRECT_DEFINE_OP_VOID( \ + _Atomic(type), type, \ + patomic_opimpl_void_xor_##name, \ + vis_p, order, \ + do_void_xor_explicit \ + ) \ + PATOMIC_WRAPPED_DIRECT_DEFINE_OP_VOID( \ + _Atomic(type), type, \ + patomic_opimpl_void_and_##name, \ + vis_p, order, \ + do_void_and_explicit \ + ) \ + PATOMIC_WRAPPED_CMPXCHG_DEFINE_OP_VOID_NOARG( \ + _Atomic(type), type, \ + patomic_opimpl_void_not_##name, \ + vis_p, order, \ + do_cmpxchg_weak, do_make_desired_not \ + ) \ + PATOMIC_WRAPPED_DIRECT_DEFINE_OP_FETCH( \ + _Atomic(type), type, \ + patomic_opimpl_fetch_or_##name, \ + vis_p, order, \ + do_fetch_or_explicit \ + ) \ + PATOMIC_WRAPPED_DIRECT_DEFINE_OP_FETCH( \ + _Atomic(type), type, \ + patomic_opimpl_fetch_xor_##name, \ + vis_p, order, \ + do_fetch_xor_explicit \ + ) \ + PATOMIC_WRAPPED_DIRECT_DEFINE_OP_FETCH( \ + _Atomic(type), type, \ + patomic_opimpl_fetch_and_##name, \ + vis_p, order, \ + do_fetch_and_explicit \ + ) \ + PATOMIC_WRAPPED_CMPXCHG_DEFINE_OP_FETCH_NOARG( \ + _Atomic(type), type, \ + patomic_opimpl_fetch_not_##name, \ + vis_p, order, \ + do_cmpxchg_weak, do_make_desired_not \ + ) \ + static patomic_##ops##_binary_t \ + patomic_ops_binary_create_##name(void) \ + { \ + patomic_##ops##_binary_t pao; \ + pao.fp_or = patomic_opimpl_void_or_##name; \ + pao.fp_xor = patomic_opimpl_void_xor_##name; \ + pao.fp_and = patomic_opimpl_void_and_##name; \ + pao.fp_not = patomic_opimpl_void_not_##name; \ + pao.fp_fetch_or = patomic_opimpl_fetch_or_##name; \ + pao.fp_fetch_xor = patomic_opimpl_fetch_xor_##name; \ + pao.fp_fetch_and = patomic_opimpl_fetch_and_##name; \ + pao.fp_fetch_not = patomic_opimpl_fetch_not_##name; \ + return pao; \ + } + + +/* + * ARITHMETIC: + * - (fetch_)add (direct) + * - (fetch_)sub (direct) + * - (fetch_)inc (direct) + * - (fetch_)dec (direct) + * - (fetch_)neg (cmpxchg) + */ +#define do_void_add_explicit(type, obj, arg, order) \ + PATOMIC_IGNORE_UNUSED(atomic_fetch_add_explicit(obj, arg, order)) +#define do_void_sub_explicit(type, obj, arg, order) \ + PATOMIC_IGNORE_UNUSED(atomic_fetch_sub_explicit(obj, arg, order)) +#define do_void_inc_explicit(type, obj, order) \ + PATOMIC_IGNORE_UNUSED(atomic_fetch_add_explicit(obj, (type) 1, order)) +#define do_void_dec_explicit(type, obj, order) \ + PATOMIC_IGNORE_UNUSED(atomic_fetch_sub_explicit(obj, (type) 1, order)) + +#define do_fetch_add_explicit(type, obj, arg, order, res) \ + res = atomic_fetch_add_explicit(obj, arg, order) +#define do_fetch_sub_explicit(type, obj, arg, order, res) \ + res = atomic_fetch_sub_explicit(obj, arg, order) +#define do_fetch_inc_explicit(type, obj, order, res) \ + res = atomic_fetch_add_explicit(obj, (type) 1, order) +#define do_fetch_dec_explicit(type, obj, order, res) \ + res = atomic_fetch_sub_explicit(obj, (type) 1, order) + +#define do_make_desired_neg(type, exp, des) \ + des = (type) -exp + +#define PATOMIC_DEFINE_ARITHMETIC_OPS_CREATE(type, name, vis_p, order, ops) \ + PATOMIC_WRAPPED_DIRECT_DEFINE_OP_VOID( \ + _Atomic(type), type, \ + patomic_opimpl_void_add_##name, \ + vis_p, order, \ + do_void_add_explicit \ + ) \ + PATOMIC_WRAPPED_DIRECT_DEFINE_OP_VOID( \ + _Atomic(type), type, \ + patomic_opimpl_void_sub_##name, \ + vis_p, order, \ + do_void_sub_explicit \ + ) \ + PATOMIC_WRAPPED_DIRECT_DEFINE_OP_VOID_NOARG( \ + _Atomic(type), type, \ + patomic_opimpl_void_inc_##name, \ + vis_p, order, \ + do_void_inc_explicit \ + ) \ + PATOMIC_WRAPPED_DIRECT_DEFINE_OP_VOID_NOARG( \ + _Atomic(type), type, \ + patomic_opimpl_void_dec_##name, \ + vis_p, order, \ + do_void_dec_explicit \ + ) \ + PATOMIC_WRAPPED_CMPXCHG_DEFINE_OP_VOID_NOARG( \ + _Atomic(type), type, \ + patomic_opimpl_void_neg_##name, \ + vis_p, order, \ + do_cmpxchg_weak, do_make_desired_neg \ + ) \ + PATOMIC_WRAPPED_DIRECT_DEFINE_OP_FETCH( \ + _Atomic(type), type, \ + patomic_opimpl_fetch_add_##name, \ + vis_p, order, \ + do_fetch_add_explicit \ + ) \ + PATOMIC_WRAPPED_DIRECT_DEFINE_OP_FETCH( \ + _Atomic(type), type, \ + patomic_opimpl_fetch_sub_##name, \ + vis_p, order, \ + do_fetch_sub_explicit \ + ) \ + PATOMIC_WRAPPED_DIRECT_DEFINE_OP_FETCH_NOARG( \ + _Atomic(type), type, \ + patomic_opimpl_fetch_inc_##name, \ + vis_p, order, \ + do_fetch_inc_explicit \ + ) \ + PATOMIC_WRAPPED_DIRECT_DEFINE_OP_FETCH_NOARG( \ + _Atomic(type), type, \ + patomic_opimpl_fetch_dec_##name, \ + vis_p, order, \ + do_fetch_dec_explicit \ + ) \ + PATOMIC_WRAPPED_CMPXCHG_DEFINE_OP_FETCH_NOARG( \ + _Atomic(type), type, \ + patomic_opimpl_fetch_neg_##name, \ + vis_p, order, \ + do_cmpxchg_weak, do_make_desired_neg \ + ) \ + static patomic_##ops##_arithmetic_t \ + patomic_ops_arithmetic_create_##name(void) \ + { \ + patomic_##ops##_arithmetic_t pao; \ + pao.fp_add = patomic_opimpl_void_add_##name; \ + pao.fp_sub = patomic_opimpl_void_sub_##name; \ + pao.fp_inc = patomic_opimpl_void_inc_##name; \ + pao.fp_dec = patomic_opimpl_void_dec_##name; \ + pao.fp_neg = patomic_opimpl_void_neg_##name; \ + pao.fp_fetch_add = patomic_opimpl_fetch_add_##name; \ + pao.fp_fetch_sub = patomic_opimpl_fetch_sub_##name; \ + pao.fp_fetch_inc = patomic_opimpl_fetch_inc_##name; \ + pao.fp_fetch_dec = patomic_opimpl_fetch_dec_##name; \ + pao.fp_fetch_neg = patomic_opimpl_fetch_neg_##name; \ + return pao; \ + } + + +/* + * CREATE STRUCTS + * + * Implicit: + * - ca: { consume, acquire } (not supported by store) + * - r: { release } (not supported by load) + * - ar: { acq_rel } (not supported by store or load) + * - rsc:{ relaxed, seq_cst } + */ +#define PATOMIC_DEFINE_OPS_CREATE_CA(type, name, vis_p, inv, order, ops) \ + /* no store in consume or acquire */ \ + PATOMIC_DEFINE_LOAD_OP(type, name, vis_p, order) \ + PATOMIC_DEFINE_XCHG_OPS_CREATE(type, name, vis_p, inv, order, ops) \ + PATOMIC_DEFINE_BITWISE_OPS_CREATE_LOAD(type, name, vis_p, order, ops) \ + PATOMIC_DEFINE_BINARY_OPS_CREATE(type, name, vis_p, order, ops) \ + PATOMIC_DEFINE_ARITHMETIC_OPS_CREATE(type, name, vis_p, order, ops) \ + static patomic_##ops##_t \ + patomic_ops_create_##name(void) \ + { \ + patomic_##ops##_t pao; \ + pao.fp_store = NULL; \ + pao.fp_load = patomic_opimpl_load_##name; \ + pao.xchg_ops = patomic_ops_xchg_create_##name(); \ + pao.bitwise_ops = patomic_ops_bitwise_create_##name(); \ + pao.binary_ops = patomic_ops_binary_create_##name(); \ + pao.arithmetic_ops = patomic_ops_arithmetic_create_##name(); \ + return pao; \ + } + +#define PATOMIC_DEFINE_OPS_CREATE_R(type, name, vis_p, inv, order, ops) \ + PATOMIC_DEFINE_STORE_OP(type, name, vis_p, order) \ + /* no load in release */ \ + PATOMIC_DEFINE_XCHG_OPS_CREATE(type, name, vis_p, inv, order, ops) \ + PATOMIC_DEFINE_BITWISE_OPS_CREATE_NO_LOAD(type, name, vis_p, order, ops) \ + PATOMIC_DEFINE_BINARY_OPS_CREATE(type, name, vis_p, order, ops) \ + PATOMIC_DEFINE_ARITHMETIC_OPS_CREATE(type, name, vis_p, order, ops) \ + static patomic_##ops##_t \ + patomic_ops_create_##name(void) \ + { \ + patomic_##ops##_t pao; \ + pao.fp_store = patomic_opimpl_store_##name; \ + pao.fp_load = NULL; \ + pao.xchg_ops = patomic_ops_xchg_create_##name(); \ + pao.bitwise_ops = patomic_ops_bitwise_create_##name(); \ + pao.binary_ops = patomic_ops_binary_create_##name(); \ + pao.arithmetic_ops = patomic_ops_arithmetic_create_##name(); \ + return pao; \ + } + +#define PATOMIC_DEFINE_OPS_CREATE_AR(type, name, vis_p, inv, order, ops) \ + /* no store or load in acq_rel */ \ + PATOMIC_DEFINE_XCHG_OPS_CREATE(type, name, vis_p, inv, order, ops) \ + PATOMIC_DEFINE_BITWISE_OPS_CREATE_NO_LOAD(type, name, vis_p, order, ops) \ + PATOMIC_DEFINE_BINARY_OPS_CREATE(type, name, vis_p, order, ops) \ + PATOMIC_DEFINE_ARITHMETIC_OPS_CREATE(type, name, vis_p, order, ops) \ + static patomic_##ops##_t \ + patomic_ops_create_##name(void) \ + { \ + patomic_##ops##_t pao; \ + pao.fp_store = NULL; \ + pao.fp_load = NULL; \ + pao.xchg_ops = patomic_ops_xchg_create_##name(); \ + pao.bitwise_ops = patomic_ops_bitwise_create_##name(); \ + pao.binary_ops = patomic_ops_binary_create_##name(); \ + pao.arithmetic_ops = patomic_ops_arithmetic_create_##name(); \ + return pao; \ + } + +#define PATOMIC_DEFINE_OPS_CREATE_RSC(type, name, vis_p, inv, order, ops) \ + PATOMIC_DEFINE_STORE_OP(type, name, vis_p, order) \ + PATOMIC_DEFINE_LOAD_OP(type, name, vis_p, order) \ + PATOMIC_DEFINE_XCHG_OPS_CREATE(type, name, vis_p, inv, order, ops) \ + PATOMIC_DEFINE_BITWISE_OPS_CREATE_LOAD(type, name, vis_p, order, ops) \ + PATOMIC_DEFINE_BINARY_OPS_CREATE(type, name, vis_p, order, ops) \ + PATOMIC_DEFINE_ARITHMETIC_OPS_CREATE(type, name, vis_p, order, ops) \ + static patomic_##ops##_t \ + patomic_ops_create_##name(void) \ + { \ + patomic_##ops##_t pao; \ + pao.fp_store = patomic_opimpl_store_##name; \ + pao.fp_load = patomic_opimpl_load_##name; \ + pao.xchg_ops = patomic_ops_xchg_create_##name(); \ + pao.bitwise_ops = patomic_ops_bitwise_create_##name(); \ + pao.binary_ops = patomic_ops_binary_create_##name(); \ + pao.arithmetic_ops = patomic_ops_arithmetic_create_##name(); \ + return pao; \ + } + +#define PATOMIC_DEFINE_OPS_CREATE_ALL(type, name) \ + PATOMIC_DEFINE_OPS_CREATE_RSC( \ + type, name##_relaxed, HIDE_P, SHOW, patomic_RELAXED, ops \ + ) \ + /* consume is not supported, we just use acquire */ \ + PATOMIC_DEFINE_OPS_CREATE_CA( \ + type, name##_acquire, HIDE_P, SHOW, patomic_ACQUIRE, ops \ + ) \ + PATOMIC_DEFINE_OPS_CREATE_R( \ + type, name##_release, HIDE_P, SHOW, patomic_RELEASE, ops \ + ) \ + PATOMIC_DEFINE_OPS_CREATE_AR( \ + type, name##_acq_rel, HIDE_P, SHOW, patomic_ACQ_REL, ops \ + ) \ + PATOMIC_DEFINE_OPS_CREATE_RSC( \ + type, name##_seq_cst, HIDE_P, SHOW, patomic_SEQ_CST, ops \ + ) \ + PATOMIC_DEFINE_OPS_CREATE_RSC( \ + type, name##_explicit, SHOW_P, HIDE, order, ops_explicit \ + ) + + +#if ATOMIC_CHAR_LOCK_FREE + PATOMIC_DEFINE_OPS_CREATE_ALL(unsigned char, char) +#endif + +#if ATOMIC_SHORT_LOCK_FREE + PATOMIC_DEFINE_OPS_CREATE_ALL(unsigned short, short) +#endif + +#if ATOMIC_INT_LOCK_FREE + PATOMIC_DEFINE_OPS_CREATE_ALL(unsigned int, int) +#endif + +#if ATOMIC_LONG_LOCK_FREE + PATOMIC_DEFINE_OPS_CREATE_ALL(unsigned long, long) +#endif + +#undef HAS_LLONG_IMPL +#if ATOMIC_LLONG_LOCK_FREE && PATOMIC_STDINT_HAS_LLONG + #define HAS_LLONG_IMPL 1 + PATOMIC_DEFINE_OPS_CREATE_ALL(patomic_llong_unsigned_t, llong) +#else + #define HAS_LLONG_IMPL 0 +#endif + + +#define PATOMIC_RET_OPS(type, name, byte_width, order, ops) \ + if ((byte_width == sizeof(type)) && \ + (byte_width == sizeof(_Atomic(type)))) \ + { \ + _Atomic(type) obj; \ + if (atomic_is_lock_free(&obj)) \ + { \ + switch (order) \ + { \ + case patomic_RELAXED: \ + ops = patomic_ops_create_##name##_relaxed(); \ + break; \ + case patomic_CONSUME: \ + case patomic_ACQUIRE: \ + ops = patomic_ops_create_##name##_acquire(); \ + break; \ + case patomic_RELEASE: \ + ops = patomic_ops_create_##name##_release(); \ + break; \ + case patomic_ACQ_REL: \ + ops = patomic_ops_create_##name##_acq_rel(); \ + break; \ + case patomic_SEQ_CST: \ + ops = patomic_ops_create_##name##_seq_cst(); \ + break; \ + default: \ + patomic_assert_always("invalid memory order" && 0); \ + } \ + return ops; \ + } \ + PATOMIC_IGNORE_UNUSED(obj); \ + } + +#define PATOMIC_RET_OPS_EXPLICIT(type, name, byte_width, ops) \ + if ((byte_width == sizeof(type)) && \ + (byte_width == sizeof(_Atomic(type)))) \ + { \ + _Atomic(type) obj; \ + if (atomic_is_lock_free(&obj)) \ + { \ + ops = patomic_ops_create_##name##_explicit(); \ + return ops; \ + } \ + PATOMIC_IGNORE_UNUSED(obj); \ + } + +#define PATOMIC_RET_ALIGN(type, byte_width) \ + if ((byte_width == sizeof(type)) && \ + (byte_width == sizeof(_Atomic(type)))) \ + { \ + _Atomic(type) obj; \ + if (atomic_is_lock_free(&obj)) \ + { \ + align.recommended = patomic_alignof_type(_Atomic(type)); \ + align.minimum = align.recommended; \ + align.size_within = 0; \ + return align; \ + } \ + PATOMIC_IGNORE_UNUSED(obj); \ + } + + +static patomic_ops_t +patomic_create_ops( + const size_t byte_width, + const patomic_memory_order_t order +) +{ + /* setup */ + patomic_ops_t ops = {0}; + patomic_assert_always(patomic_is_valid_order((int) order)); + + /* set and return implicit atomic ops */ + /* go from largest to smallest in case some platform has two types with the + * same width but one has a larger range */ +#if HAS_LLONG_IMPL + PATOMIC_RET_OPS(patomic_llong_unsigned_t, llong, byte_width, order, ops) +#endif +#if ATOMIC_LONG_LOCK_FREE + PATOMIC_RET_OPS(unsigned long, long, byte_width, order, ops) +#endif +#if ATOMIC_INT_LOCK_FREE + PATOMIC_RET_OPS(unsigned int, int, byte_width, order, ops) +#endif +#if ATOMIC_SHORT_LOCK_FREE + PATOMIC_RET_OPS(unsigned short, short, byte_width, order, ops) +#endif +#if ATOMIC_CHAR_LOCK_FREE + PATOMIC_RET_OPS(unsigned char, char, byte_width, order, ops) +#endif + + /* fallback, width not supported */ + return ops; +} + +static patomic_ops_explicit_t +patomic_create_ops_explicit( + const size_t byte_width +) +{ + /* setup */ + patomic_ops_explicit_t ops = {0}; + + /* set and return explicit atomic ops */ + /* go from largest to smallest in case some platform has two types with the + * same width but one has a larger range */ +#if HAS_LLONG_IMPL + PATOMIC_RET_OPS_EXPLICIT(patomic_llong_unsigned_t, llong, byte_width, ops) +#endif +#if ATOMIC_LONG_LOCK_FREE + PATOMIC_RET_OPS_EXPLICIT(unsigned long, long, byte_width, ops) +#endif +#if ATOMIC_INT_LOCK_FREE + PATOMIC_RET_OPS_EXPLICIT(unsigned int, int, byte_width, ops) +#endif +#if ATOMIC_SHORT_LOCK_FREE + PATOMIC_RET_OPS_EXPLICIT(unsigned short, short, byte_width, ops) +#endif +#if ATOMIC_CHAR_LOCK_FREE + PATOMIC_RET_OPS_EXPLICIT(unsigned char, char, byte_width, ops) +#endif + + /* fallback, width not supported */ + return ops; +} + +static patomic_align_t +patomic_create_align( + const size_t byte_width +) +{ + /* setup */ + patomic_align_t align = {0}; + + /* set and return atomic alignments */ + /* go from largest to smallest in case some platform has two types with the + * same width but one has a larger range */ +#if HAS_LLONG_IMPL + PATOMIC_RET_ALIGN(patomic_llong_unsigned_t, byte_width) +#endif +#if ATOMIC_LONG_LOCK_FREE + PATOMIC_RET_ALIGN(unsigned long, byte_width) +#endif +#if ATOMIC_INT_LOCK_FREE + PATOMIC_RET_ALIGN(unsigned int, byte_width) +#endif +#if ATOMIC_SHORT_LOCK_FREE + PATOMIC_RET_ALIGN(unsigned short, byte_width) +#endif +#if ATOMIC_CHAR_LOCK_FREE + PATOMIC_RET_ALIGN(unsigned char, byte_width) +#endif + + /* fallback, width not supported */ + align.recommended = 1; + align.minimum = align.recommended; + align.size_within = 0; + return align; +} + + +patomic_t +patomic_impl_create_std( + const size_t byte_width, + const patomic_memory_order_t order, + const unsigned int options +) +{ + /* setup */ + patomic_t impl; + PATOMIC_IGNORE_UNUSED(options); + + /* set members */ + impl.ops = patomic_create_ops(byte_width, order); + impl.align = patomic_create_align(byte_width); + + /* return */ + return impl; +} + +patomic_explicit_t +patomic_impl_create_explicit_std( + const size_t byte_width, + const unsigned int options +) +{ + /* setup */ + patomic_explicit_t impl; + PATOMIC_IGNORE_UNUSED(options); + + /* set members */ + impl.ops = patomic_create_ops_explicit(byte_width); + impl.align = patomic_create_align(byte_width); + + /* return */ + return impl; +} + + +#else /* PATOMIC_HAS_ATOMIC && PATOMIC_HAS_STDATOMIC_H && PATOMIC_HAS_IR_TWOS_COMPL */ + + +patomic_t +patomic_impl_create_std( + const size_t byte_width, + const patomic_memory_order_t order, + const unsigned int options +) +{ + /* zero all fields */ + patomic_t impl = {0}; + + /* ignore parameters */ + PATOMIC_IGNORE_UNUSED(byte_width); + PATOMIC_IGNORE_UNUSED(order); + PATOMIC_IGNORE_UNUSED(options); + + /* set a valid minimal alignment */ + impl.align.recommended = 1; + impl.align.minimum = 1; + + /* return */ + return impl; +} + + +patomic_explicit_t +patomic_impl_create_explicit_std( + const size_t byte_width, + const unsigned int options +) +{ + /* zero all fields */ + patomic_explicit_t impl = {0}; + + /* ignore all parameters */ + PATOMIC_IGNORE_UNUSED(byte_width); + PATOMIC_IGNORE_UNUSED(options); + + /* set a valid minimal alignment */ + impl.align.recommended = 1; + impl.align.minimum = 1; + + /* return */ + return impl; +} + + +#endif /* PATOMIC_HAS_ATOMIC && PATOMIC_HAS_STDATOMIC_H && PATOMIC_HAS_IR_TWOS_COMPL */ + + +patomic_transaction_t +patomic_impl_create_transaction_std( + const unsigned int options +) +{ + /* zero all fields */ + patomic_transaction_t impl = {0}; + + /* ignore parameters */ + PATOMIC_IGNORE_UNUSED(options); + + /* return */ + return impl; +} diff --git a/src/impl/std/std.h b/src/impl/std/std.h new file mode 100644 index 000000000..c2951ac71 --- /dev/null +++ b/src/impl/std/std.h @@ -0,0 +1,85 @@ +#ifndef PATOMIC_IMPL_STD_H +#define PATOMIC_IMPL_STD_H + +#include + + +/** + * @addtogroup impl.std + * + * @brief + * Support for operations depends on the availability of C11 atomics. If one + * operation is supported for a given width, all operations are supported for + * that width. + * + * @note + * Bitwise operations are implemented as a cmpxchg loop. + * + * @param byte_width + * The width of an object to operate on. + * + * @param order + * The minimum memory order to perform the operation with. + * + * @param options + * Value is currently unused. + * + * @return + * Implementation where operations are as C11 atomic operations would be. + */ +patomic_t +patomic_impl_create_std( + size_t byte_width, + patomic_memory_order_t order, + unsigned int options +); + + +/** + * @addtogroup impl.std + * + * @brief + * Support for operations depends on the availability of C11 atomics. If one + * operation is supported for a given width, all operations are supported for + * that width. + * + * @note + * Bitwise operations are implemented as a cmpxchg loop. + * + * @param byte_width + * The width of an object to operate on. + * + * @param options + * Value is currently unused. + * + * @return + * Implementation where operations are as C11 atomic operations would be. + */ +patomic_explicit_t +patomic_impl_create_explicit_std( + size_t byte_width, + unsigned int options +); + + +/** + * @addtogroup impl.std + * + * @brief + * No operations are supported here, since C11 does not provide transactional + * operations. + * + * @param options + * Value is ignored. + * + * @return + * Implementation where no operations are supported and alignment requirements + * are the minimum possible. + */ +patomic_transaction_t +patomic_impl_create_transaction_std( + unsigned int options +); + + +#endif /* PATOMIC_IMPL_STD_H */ diff --git a/src/include/patomic/CMakeLists.txt b/src/include/patomic/CMakeLists.txt index 787e688b6..2ec0b122f 100644 --- a/src/include/patomic/CMakeLists.txt +++ b/src/include/patomic/CMakeLists.txt @@ -2,6 +2,7 @@ add_subdirectory(internal) add_subdirectory(macros) add_subdirectory(stdlib) +add_subdirectory(wrapped) # add directory files to target target_sources(${target_name} PRIVATE diff --git a/src/include/patomic/config.h b/src/include/patomic/config.h index 2d23a5c1b..3d2f50117 100644 --- a/src/include/patomic/config.h +++ b/src/include/patomic/config.h @@ -43,6 +43,76 @@ #endif +#ifndef PATOMIC_HAS_RESTRICT + /** + * @addtogroup config.safe + * + * @brief + * 'restrict' is available as a keyword. + * + * @note + * Usually required: C99. + */ + #define PATOMIC_HAS_RESTRICT 0 +#endif + + +#ifndef PATOMIC_HAS_MS_RESTRICT + /** + * @addtogroup config.safe + * + * @brief + * '__restrict' is available as a keyword. + * + * @note + * Usually required: Microsoft compatible(-ish) compiler. + */ + #define PATOMIC_HAS_MS_RESTRICT 0 +#endif + + +#ifndef PATOMIC_HAS_GNU_RESTRICT + /** + * @addtogroup config.safe + * + * @brief + * '__restrict__' is available as a keyword. + * + * @note + * Usually required: GNU compatible(-ish) compiler. + */ + #define PATOMIC_HAS_GNU_RESTRICT 0 +#endif + + +#ifndef PATOMIC_HAS_ATOMIC + /** + * @addtogroup config.safe + * + * @brief + * '_Atomic' is available as a keyword. + * + * @note + * Usually required: C11. + */ + #define PATOMIC_HAS_ATOMIC 0 +#endif + + +#ifndef PATOMIC_HAS_STDATOMIC_H + /** + * @addtogroup config.safe + * + * @brief + * header is available. + * + * @note + * Usually required: C11. + */ + #define PATOMIC_HAS_STDATOMIC_H 0 +#endif + + #ifndef PATOMIC_HAS_STDINT_H /** * @addtogroup config.safe @@ -372,6 +442,118 @@ #endif +#ifndef PATOMIC_HAS_C23_ALIGNOF + /** + * @addtogroup config.safe + * + * @brief + * 'alignof(T)' is available as a function. + * + * @note + * Usually requires: C23. + */ + #define PATOMIC_HAS_C23_ALIGNOF 0 +#endif + + +#ifndef PATOMIC_HAS_C23_ALIGNOF_EXTN + /** + * @addtogroup config.safe + * + * @brief + * '__extension__ alignof(T)' is available as a function. + * + * @note + * Usually requires: GNU compatible(-ish) compiler. + */ + #define PATOMIC_HAS_C23_ALIGNOF_EXTN 0 +#endif + + +#ifndef PATOMIC_HAS_C11_ALIGNOF + /** + * @addtogroup config.safe + * + * @brief + * '_Alignof(T)' is available as a function. + * + * @note + * Usually requires: C11. + */ + #define PATOMIC_HAS_C11_ALIGNOF 0 +#endif + + +#ifndef PATOMIC_HAS_C11_ALIGNOF_EXTN + /** + * @addtogroup config.safe + * + * @brief + * '__extension__ _Alignof(T)' is available as a function. + * + * @note + * Usually requires: GNU compatible(-ish) compiler. + */ + #define PATOMIC_HAS_C11_ALIGNOF_EXTN 0 +#endif + + +#ifndef PATOMIC_HAS_MS_ALIGNOF + /** + * @addtogroup config.safe + * + * @brief + * '__alignof(T)' is available as a function. + * + * @note + * Usually required: Microsoft compatible(-ish) compiler. + */ + #define PATOMIC_HAS_MS_ALIGNOF 0 +#endif + + +#ifndef PATOMIC_HAS_MS_ALIGNOF_EXTN + /** + * @addtogroup config.safe + * + * @brief + * '__extension__ __alignof(T)' is available as a function. + * + * @note + * Usually required: GNU compatible(-ish) compiler. + */ + #define PATOMIC_HAS_MS_ALIGNOF_EXTN 0 +#endif + + +#ifndef PATOMIC_HAS_GNU_ALIGNOF + /** + * @addtogroup config.safe + * + * @brief + * '__alignof__(T)' is available as a function. + * + * @note + * Usually required: GNU compatible(-ish) compiler. + */ + #define PATOMIC_HAS_GNU_ALIGNOF 0 +#endif + + +#ifndef PATOMIC_HAS_GNU_ALIGNOF_EXTN + /** + * @addtogroup config.safe + * + * @brief + * '__extension__ __alignof__(T)' is available as a function. + * + * @note + * Usually required: GNU compatible(-ish) compiler. + */ + #define PATOMIC_HAS_GNU_ALIGNOF_EXTN 0 +#endif + + /* * UNSAFE CONSTANTS * ================ diff --git a/src/include/patomic/macros/CMakeLists.txt b/src/include/patomic/macros/CMakeLists.txt index c84c8114e..4fa104221 100644 --- a/src/include/patomic/macros/CMakeLists.txt +++ b/src/include/patomic/macros/CMakeLists.txt @@ -3,5 +3,7 @@ target_sources(${target_name} PRIVATE func_name.h ignore_unused.h noreturn.h + restrict.h + static_assert.h unreachable.h ) diff --git a/src/include/patomic/macros/restrict.h b/src/include/patomic/macros/restrict.h new file mode 100644 index 000000000..b9befd46e --- /dev/null +++ b/src/include/patomic/macros/restrict.h @@ -0,0 +1,27 @@ +#ifndef PATOMIC_RESTRICT + +#include + +/* used internally */ +#undef PATOMIC_RESTRICT_ + +#if PATOMIC_HAS_RESTRICT + #define PATOMIC_RESTRICT_ restrict +#elif PATOMIC_HAS_MS_RESTRICT + #define PATOMIC_RESTRICT_ __restrict +#elif PATOMIC_HAS_GNU_RESTRICT + #define PATOMIC_RESTRICT_ __restrict__ +#else + #define PATOMIC_RESTRICT_ +#endif + +/** + * @addtogroup macros + * + * @brief + * Object pointer qualifier to declare that a pointer will not alias with any + * other pointer. Behaves like C99's 'restrict' keyword. + */ +#define PATOMIC_RESTRICT PATOMIC_RESTRICT_ + +#endif /* PATOMIC_RESTRICT */ diff --git a/src/include/patomic/macros/static_assert.h b/src/include/patomic/macros/static_assert.h new file mode 100644 index 000000000..af1285042 --- /dev/null +++ b/src/include/patomic/macros/static_assert.h @@ -0,0 +1,16 @@ +#ifndef PATOMIC_STATIC_ASSERT + +/** + * @addtogroup macros + * + * @brief + * Asserts that an expression evaluates to a non-zero value at compile time. + * + * @pre + * The message must be able to be part of a valid C identifier (but will + * never be at the start). + */ +#define PATOMIC_STATIC_ASSERT(msg, expr) \ + typedef char patomic_static_assert_##msg[(expr) ? 1 : -1] + +#endif /* PATOMIC_STATIC_ASSERT */ diff --git a/src/include/patomic/stdlib/CMakeLists.txt b/src/include/patomic/stdlib/CMakeLists.txt index 07899238d..5ae3199f4 100644 --- a/src/include/patomic/stdlib/CMakeLists.txt +++ b/src/include/patomic/stdlib/CMakeLists.txt @@ -4,5 +4,7 @@ target_sources(${target_name} PRIVATE assert.h math.h sort.h + stdalign.h stdint.h + string.h ) diff --git a/src/include/patomic/stdlib/assert.h b/src/include/patomic/stdlib/assert.h index aeb1f8c26..929032894 100644 --- a/src/include/patomic/stdlib/assert.h +++ b/src/include/patomic/stdlib/assert.h @@ -57,7 +57,7 @@ __patomic_assert_fail( #if defined(NDEBUG) && !defined(NNDEBUG) #define patomic_assert_unreachable_(expr) (PATOMIC_IGNORE_UNUSED((expr || \ - (PATOMIC_UNREACHABLE(), 0))) + (PATOMIC_UNREACHABLE(), 0)))) #else #define patomic_assert_unreachable_(expr) patomic_assert_always(expr) #endif diff --git a/src/include/patomic/stdlib/stdalign.h b/src/include/patomic/stdlib/stdalign.h new file mode 100644 index 000000000..56ed0d9f3 --- /dev/null +++ b/src/include/patomic/stdlib/stdalign.h @@ -0,0 +1,100 @@ +#ifndef PATOMIC_STDLIB_STDALIGN_H +#define PATOMIC_STDLIB_STDALIGN_H + +#include + +#include + + +/** + * @addtogroup stdlib + * + * @brief + * Calculates the alignment of an imaginary struct which contains the largest + * possible single object with natural alignment. + * + * @warning + * This will return incorrect values if the actual struct contains members of + * over-aligned types, which may result in undefined behaviour if used. + * + * @param size + * The size of an object or type as returned by sizeof. + * + * @return + * The largest power of 2 which divides 'size', or the value 0 if 'size' is 0. + */ +size_t +patomic_alignment_from_size( + size_t size +); + + +/** + * @addtogroup stdlib + * + * @brief + * Checks if the address has a suitable runtime alignment. + * + * @param ptr + * Pointer holding the address to check. + * + * @param alignment + * Minimum alignment to check for. + * + * @return + * The value 1 if the address held by 'ptr' is suitably aligned, otherwise + * the value 0. + */ +int +patomic_is_aligned( + const volatile void *ptr, + size_t alignment +); + + +/* used internally */ +#undef patomic_alignof_type_ + +#if PATOMIC_HAS_C23_ALIGNOF + #define patomic_alignof_type_(type) alignof(type) +#elif PATOMIC_HAS_C23_ALIGNOF_EXTN + #define patomic_alignof_type_(type) __extension__ alignof(type) +#elif PATOMIC_HAS_C11_ALIGNOF + #define patomic_alignof_type_(type) _Alignof(type) +#elif PATOMIC_HAS_C11_ALIGNOF_EXTN + #define patomic_alignof_type_(type) __extension__ _Alignof(type) +#elif PATOMIC_HAS_GNU_ALIGNOF + #define patomic_alignof_type_(type) __alignof__(type) +#elif PATOMIC_HAS_GNU_ALIGNOF_EXTN + #define patomic_alignof_type_(type) __extension__ __alignof__(type) +#elif PATOMIC_HAS_MS_ALIGNOF + #define patomic_alignof_type_(type) __alignof(type) +#elif PATOMIC_HAS_MS_ALIGNOF_EXTN + #define patomic_alignof_type_(type) __extension__ __alignof__(type) +#else + #define patomic_alignof_type_(type) \ + patomic_alignment_from_size(sizeof(type)) +#endif + +/** + * @addtogroup stdlib + * + * @brief + * Gets the alignment requirement of a type. + * + * @warning + * The implementation for this function may fall back to using + * 'patomic_alignment_from_size' if no other implementation is available. + * This will return incorrect values for over-aligned types, which may + * result in undefined behaviour if used. + * + * @param type + * Type from which to get alignment. + * + * @return + * The alignment of the type 'type'. + */ +#define patomic_alignof_type(type) patomic_alignof_type_(type) + + +#endif /* PATOMIC_STDLIB_STDALIGN_H */ diff --git a/src/include/patomic/stdlib/string.h b/src/include/patomic/stdlib/string.h new file mode 100644 index 000000000..1498a1332 --- /dev/null +++ b/src/include/patomic/stdlib/string.h @@ -0,0 +1,41 @@ +#ifndef PATOMIC_STDLIB_STRING_H +#define PATOMIC_STDLIB_STRING_H + +#include + +#include + + +/** + * @addtogroup stdlib + * + * @brief + * Copies 'count' characters from the buffer pointed to by 'src' into the + * buffer pointed to by 'dest'. + * + * @note + * Functions identically to C's memcpy. + * + * @param count + * Number of characters to copy from one buffer to the other. + * + * @param src + * Source buffer from which to copy the characters, interpreted as an array + * of 'unsigned char'. + * + * @param dest + * Destination buffer into which to copy the characters, interpreted as an + * array of 'unsigned char'. + * + * @return + * A copy of 'dest'. + */ +void * +patomic_memcpy( + void *PATOMIC_RESTRICT dest, + const void *PATOMIC_RESTRICT src, + size_t count +); + + +#endif /* PATOMIC_STDLIB_STRING_H */ diff --git a/src/include/patomic/wrapped/CMakeLists.txt b/src/include/patomic/wrapped/CMakeLists.txt new file mode 100644 index 000000000..e5cc08343 --- /dev/null +++ b/src/include/patomic/wrapped/CMakeLists.txt @@ -0,0 +1,6 @@ +# add directory files to target +target_sources(${target_name} PRIVATE + base.h + cmpxchg.h + direct.h +) diff --git a/src/include/patomic/wrapped/base.h b/src/include/patomic/wrapped/base.h new file mode 100644 index 000000000..cbdea2836 --- /dev/null +++ b/src/include/patomic/wrapped/base.h @@ -0,0 +1,101 @@ +#ifndef PATOMIC_WRAPPED_BASE_H +#define PATOMIC_WRAPPED_BASE_H + +#include + +#include +#include +#include + + +/* redefined below */ +#undef HIDE +#undef HIDE_P +#undef SHOW +#undef SHOW_P +#undef PATOMIC_WRAPPED_DO_ASSERT +#undef PATOMIC_WRAPPED_DO_ASSERT_ALIGNED +#undef PATOMIC_WRAPPED_DO_MEMCPY + + +/** + * @addtogroup wrapped.base + * + * @brief + * Hides a token from the compiler. + */ +#define HIDE(token) + + +/** + * @addtogroup wrapped.base + * + * @brief + * Hides two comma-separated tokens from the compiler. + * + * @note + * Can be used for conditionally hiding a function parameter. + */ +#define HIDE_P(token_a, token_b) + + +/** + * @addtogroup wrapped.base + * + * @brief + * Makes a token visible to the compiler. + */ +#define SHOW(token) token + + +/** + * @addtogroup wrapped.base + * + * @brief + * Makes the second of two comma-separated tokens visible to the compiler, + * including the comma. + * + * @note + * Can be used for conditionally making a function parameter visible. + */ +#define SHOW_P(token_a, token_b) ,token_b + + +/** + * @addtogroup wrapped.base + * + * @brief + * Asserts that an expression evaluates to a non-zero value. + */ +#define PATOMIC_WRAPPED_DO_ASSERT(expr) \ + patomic_assert_unreachable(expr) + + +/** + * @addtogroup wrapped.base + * + * @brief + * Asserts that a pointed-to object meets a type's alignment requirements. + */ +#define PATOMIC_WRAPPED_DO_ASSERT_ALIGNED(obj_ptr, type) \ + patomic_assert_unreachable( \ + patomic_is_aligned(obj_ptr, patomic_alignof_type(type)) \ + ) + + +/** + * @addtogroup wrapped.base + * + * @brief + * Acts as a transparent wrapper around memcpy where the return value is + * discarded. + * + * @note + * Necessary because some compiler options may cause an error if the return + * value of memcpy is not used. + */ +#define PATOMIC_WRAPPED_DO_MEMCPY(dest, src, count) \ + PATOMIC_IGNORE_UNUSED(patomic_memcpy(dest, src, count)) + + +#endif /* PATOMIC_WRAPPED_BASE_H */ diff --git a/src/include/patomic/wrapped/cmpxchg.h b/src/include/patomic/wrapped/cmpxchg.h new file mode 100644 index 000000000..4c5be5320 --- /dev/null +++ b/src/include/patomic/wrapped/cmpxchg.h @@ -0,0 +1,1022 @@ +#ifndef PATOMIC_WRAPPED_CMPXCHG_H +#define PATOMIC_WRAPPED_CMPXCHG_H + +#include "base.h" + +#include + +#include + +#include +#include + + +/** + * @addtogroup wrapped.cmpxchg + * + * @brief + * Defines a function which implements an atomic store operation using + * cmpxchg_weak as the underlying atomic operation. + * + * @details + * The defined function's signature will match either patomic_opsig_store_t + * or patomic_opsig_explicit_store_t (depending on the value of 'vis_p'). + * + * @param atomic_type + * The type of the object on which the atomic operation is to be performed. + * Must not be a VLA or an array of unknown size. + * + * @param type + * The non-atomic counterpart of 'atomic_type'. This must have the same size + * as 'atomic_type' and must not have a stricter alignment. + * + * @param fn_name + * The name of the function to be defined. + * + * @param vis_p + * Either the macro 'SHOW_P' if the function should be defined as taking a + * memory order parameter (a.k.a. explicit), or the macro 'HIDE_P' if it + * should not (a.k.a. implicit). + * + * @param order + * The literal token 'order' if 'vis_p' is 'SHOW_P', otherwise the desired + * memory order to be used implicitly by the atomic operation. + * + * @param do_atomic_cmpxchg_weak_explicit + * A macro, M, callable as 'M(type, obj, exp, des, succ, fail, ok);' in block + * scope. + * + * The arguments provided are: + * - 'type' : forwarded directly from the 'type' argument in this macro + * - 'obj' : an expression of type 'volatile atomic_type *' + * - 'exp' : the name of an identifier designating an object of type 'type' + * - 'des' : the name of an identifier designating an object of type 'type' + * - 'succ' : an expression of type 'int' whose value is a valid memory order + * - 'fail' : an expression of type 'int' whose value is a valid load memory + * order not stronger than 'succ' + * - 'ok' : the name of an identifier designating an object of type 'int' + * + * The expected behaviour of calling the macro M as above is: + * - the values of 'exp' and 'des' are read + * - in a single atomic operation, the value of the object pointed to by + * 'obj' is read and, if it compares equal to the value of 'exp', the + * value of 'des' is written to the object pointed to by 'obj' + * - the value read from 'obj' is stored in 'exp' + * - 'ok' is set to non-zero if the value of 'des' was written to the object + * pointed to by 'obj' (success), otherwise it is set to zero (failure) + * - the atomic operation uses a memory ordering at least as strong as 'succ' + * for a successful exchange, and a load memory ordering at least as strong + * as 'fail' for a failed exchange + */ +#define PATOMIC_WRAPPED_CMPXCHG_DEFINE_OP_STORE( \ + atomic_type, type, fn_name, vis_p, order, \ + do_atomic_cmpxchg_weak_explicit \ +) \ + static void \ + fn_name( \ + volatile void *const obj \ + ,const void *const desired \ + vis_p(_,const int order) \ + ) \ + { \ + /* static assertions */ \ + PATOMIC_STATIC_ASSERT( \ + sizeof_type_eq_atype, sizeof(type) == sizeof(atomic_type)); \ + \ + /* declarations */ \ + type exp = {0}; \ + type des; \ + int ok; \ + const int succ = (int) order; \ + const int fail = PATOMIC_CMPXCHG_FAIL_ORDER(succ); \ + \ + /* assertions */ \ + PATOMIC_WRAPPED_DO_ASSERT(obj != NULL); \ + PATOMIC_WRAPPED_DO_ASSERT(desired != NULL); \ + PATOMIC_WRAPPED_DO_ASSERT_ALIGNED(obj, atomic_type); \ + PATOMIC_WRAPPED_DO_ASSERT(PATOMIC_IS_VALID_STORE_ORDER((int) order)); \ + \ + /* inputs */ \ + PATOMIC_WRAPPED_DO_MEMCPY(&des, desired, sizeof(type)); \ + \ + /* operation */ \ + do { \ + do_atomic_cmpxchg_weak_explicit( \ + type, \ + (volatile atomic_type *) obj, \ + exp, des, \ + succ, fail, \ + ok \ + ); \ + while (!ok); \ + } + + +/** + * @addtogroup wrapped.cmpxchg + * + * @brief + * Defines a function which implements an atomic exchange operation using + * cmpxchg_weak as the underlying atomic operation. + * + * @details + * The defined function's signature will match either patomic_opsig_exchange_t + * or patomic_opsig_explicit_exchange_t (depending on the value of 'vis_p'). + * + * @param atomic_type + * The type of the object on which the atomic operation is to be performed. + * Must not be a VLA or an array of unknown size. + * + * @param type + * The non-atomic counterpart of 'atomic_type'. This must have the same size + * as 'atomic_type' and must not have a stricter alignment. + * + * @param fn_name + * The name of the function to be defined. + * + * @param vis_p + * Either the macro 'SHOW_P' if the function should be defined as taking a + * memory order parameter (a.k.a. explicit), or the macro 'HIDE_P' if it + * should not (a.k.a. implicit). + * + * @param order + * The literal token 'order' if 'vis_p' is 'SHOW_P', otherwise the desired + * memory order to be used implicitly by the atomic operation. + * + * @param do_atomic_cmpxchg_weak_explicit + * A macro, M, callable as 'M(type, obj, exp, des, succ, fail, ok);' in block + * scope. + * + * The arguments provided are: + * - 'type' : forwarded directly from the 'type' argument in this macro + * - 'obj' : an expression of type 'volatile atomic_type *' + * - 'exp' : the name of an identifier designating an object of type 'type' + * - 'des' : the name of an identifier designating an object of type 'type' + * - 'succ' : an expression of type 'int' whose value is a valid memory order + * - 'fail' : an expression of type 'int' whose value is a valid load memory + * order not stronger than 'succ' + * - 'ok' : the name of an identifier designating an object of type 'int' + * + * The expected behaviour of calling the macro M as above is: + * - the values of 'exp' and 'des' are read + * - in a single atomic operation, the value of the object pointed to by + * 'obj' is read and, if it compares equal to the value of 'exp', the + * value of 'des' is written to the object pointed to by 'obj' + * - the value read from 'obj' is stored in 'exp' + * - 'ok' is set to non-zero if the value of 'des' was written to the object + * pointed to by 'obj' (success), otherwise it is set to zero (failure) + * - the atomic operation uses a memory ordering at least as strong as 'succ' + * for a successful exchange, and a load memory ordering at least as strong + * as 'fail' for a failed exchange + */ +#define PATOMIC_WRAPPED_CMPXCHG_DEFINE_OP_EXCHANGE( \ + atomic_type, type, fn_name, vis_p, order, \ + do_atomic_cmpxchg_weak_explicit \ +) \ + static void \ + fn_name( \ + volatile void *const obj \ + ,const void *const desired \ + vis_p(_,const int order) \ + ,void *const ret \ + ) \ + { \ + /* static assertions */ \ + PATOMIC_STATIC_ASSERT( \ + sizeof_type_eq_atype, sizeof(type) == sizeof(atomic_type)); \ + \ + /* declarations */ \ + type exp = {0}; \ + type des; \ + int ok; \ + const int succ = (int) order; \ + const int fail = PATOMIC_CMPXCHG_FAIL_ORDER(succ); \ + \ + /* assertions */ \ + PATOMIC_WRAPPED_DO_ASSERT(obj != NULL); \ + PATOMIC_WRAPPED_DO_ASSERT(desired != NULL); \ + PATOMIC_WRAPPED_DO_ASSERT(ret != NULL); \ + PATOMIC_WRAPPED_DO_ASSERT_ALIGNED(obj, atomic_type); \ + PATOMIC_WRAPPED_DO_ASSERT(PATOMIC_IS_VALID_ORDER((int) order)); \ + \ + /* inputs */ \ + PATOMIC_WRAPPED_DO_MEMCPY(&des, desired, sizeof(type)); \ + \ + /* operation */ \ + do { \ + do_atomic_cmpxchg_weak_explicit( \ + type, \ + (volatile atomic_type *) obj, \ + exp, des, \ + succ, fail, \ + ok \ + ); \ + } \ + while (!ok); \ + \ + /* outputs */ \ + PATOMIC_WRAPPED_DO_MEMCPY(ret, &exp, sizeof(type)); \ + } + + +/** + * @addtogroup wrapped.cmpxchg + * + * @brief + * Defines a function which implements an atomic cmpxchg operation which + * cannot spuriously fail using cmpxchg_weak as the underlying atomic + * operation. + * + * @details + * The defined function's signature will match either patomic_opsig_cmpxchg_t + * or patomic_opsig_explicit_cmpxchg_t (depending on the value of 'vis_p'). + * + * @param atomic_type + * The type of the object on which the atomic operation is to be performed. + * Must not be a VLA or an array of unknown size. + * + * @param type + * The non-atomic counterpart of 'atomic_type'. This must have the same size + * as 'atomic_type' and must not have a stricter alignment. + * + * @param fn_name + * The name of the function to be defined. + * + * @param vis_p + * Either the macro 'SHOW_P' if the function should be defined as taking a + * memory order parameter (a.k.a. explicit), or the macro 'HIDE_P' if it + * should not (a.k.a. implicit). + * + * @param inv + * Either the macro 'HIDE' if 'vis_p' is 'SHOW_P', or the macro 'SHOW' if + * 'vis_p' is 'HIDE_P'. + * + * @param order + * The literal token 'order' if 'vis_p' is 'SHOW_P', otherwise the desired + * memory order to be used implicitly by the atomic operation. + * + * @param do_atomic_cmpxchg_weak_explicit + * A macro, M, callable as 'M(type, obj, exp, des, succ, fail, ok);' in block + * scope. + * + * The arguments provided are: + * - 'type' : forwarded directly from the 'type' argument in this macro + * - 'obj' : an expression of type 'volatile atomic_type *' + * - 'exp' : the name of an identifier designating an object of type 'type' + * - 'des' : the name of an identifier designating an object of type 'type' + * - 'succ' : an expression of type 'int' whose value is a valid memory order + * - 'fail' : an expression of type 'int' whose value is a valid load memory + * order not stronger than 'succ' + * - 'ok' : the name of an identifier designating an object of type 'int' + * + * The expected behaviour of calling the macro M as above is: + * - the values of 'exp' and 'des' are read + * - in a single atomic operation, the value of the object pointed to by + * 'obj' is read and, if it compares equal to the value of 'exp', the + * value of 'des' is written to the object pointed to by 'obj' + * - the value read from 'obj' is stored in 'exp' + * - 'ok' is set to non-zero if the value of 'des' was written to the object + * pointed to by 'obj' (success), otherwise it is set to zero (failure) + * - the atomic operation uses a memory ordering at least as strong as 'succ' + * for a successful exchange, and a load memory ordering at least as strong + * as 'fail' for a failed exchange + * + * @param do_cmp_eq + * A macro, C, callable as 'C(type, a, b, cmp);' in block scope. + * + * The arguments provided are: + * - 'type' : forwarded directly from the 'type' argument in this macro + * - 'a' : the name of an identifier designating an object of type 'type' + * - 'b' : the name of an identifier designating an object of type 'type' + * - 'cmp' : the name of an identifier designating an object of type 'int' + * + * The expected behaviour of calling the macro C as above is: + * - 'cmp' is set to a non-zero value if 'a' and 'b' compare equal, otherwise + * it is set to zero + */ +#define PATOMIC_WRAPPED_CMPXCHG_DEFINE_OP_CMPXCHG_STRONG( \ + atomic_type, type, fn_name, vis_p, inv, order, \ + do_atomic_cmpxchg_weak_explicit, do_cmp_eq \ +) \ + static int \ + fn_name( \ + volatile void *const obj \ + ,void *const expected \ + ,const void *const desired \ + vis_p(_,const int succ) \ + vis_p(_,const int fail) \ + ) \ + { \ + /* static assertions */ \ + PATOMIC_STATIC_ASSERT( \ + sizeof_type_eq_atype, sizeof(type) == sizeof(atomic_type)); \ + \ + /* declarations */ \ + type exp; \ + type des; \ + type old; \ + int eq; \ + int ok; \ + inv(const int succ = (int) order;) \ + inv(const int fail = PATOMIC_CMPXCHG_FAIL_ORDER((int) order);) \ + \ + /* assertions */ \ + PATOMIC_WRAPPED_DO_ASSERT(obj != NULL); \ + PATOMIC_WRAPPED_DO_ASSERT(expected != NULL); \ + PATOMIC_WRAPPED_DO_ASSERT(desired != NULL); \ + PATOMIC_WRAPPED_DO_ASSERT_ALIGNED(obj, atomic_type); \ + PATOMIC_WRAPPED_DO_ASSERT(PATOMIC_IS_VALID_ORDER(succ)); \ + PATOMIC_WRAPPED_DO_ASSERT(PATOMIC_IS_VALID_FAIL_ORDER(succ, fail)); \ + \ + /* inputs */ \ + PATOMIC_WRAPPED_DO_MEMCPY(&exp, expected, sizeof(type)); \ + PATOMIC_WRAPPED_DO_MEMCPY(&des, desired, sizeof(type)); \ + \ + /* operation */ \ + do { \ + /* save expected value for comparison */ \ + PATOMIC_WRAPPED_DO_MEMCPY(&old, &exp, sizeof(type)); \ + \ + /* cmpxchg_weak */ \ + do_atomic_cmpxchg_weak_explicit( \ + type, \ + (volatile atomic_type *) obj, \ + exp, des, \ + succ, fail, \ + ok \ + ); \ + \ + /* check for modification */ \ + do_cmp_eq( \ + type, \ + exp, old, \ + eq \ + ); \ + \ + /* was modified: non-spurious failure */ \ + if (!eq) \ + { \ + break; \ + } \ + } \ + while (!ok); \ + \ + /* outputs */ \ + PATOMIC_WRAPPED_DO_MEMCPY(expected, &exp, sizeof(type)); \ + return ok != 0; \ + } + + +/** + * @addtogroup wrapped.cmpxchg + * + * @brief + * Defines a function which implements an atomic bit test-modify operation + * using cmpxchg_weak as the underlying atomic operation. + * + * @details + * The defined function's signature will match either + * patomic_opsig_test_modify_t or patomic_opsig_explicit_test_modify_t + * (depending on the value of 'vis_p'). + * + * @param atomic_type + * The type of the object on which the atomic operation is to be performed. + * Must not be a VLA or an array of unknown size. + * + * @param type + * The non-atomic counterpart of 'atomic_type'. This must have the same size + * as 'atomic_type' and must not have a stricter alignment. + * + * @param fn_name + * The name of the function to be defined. + * + * @param vis_p + * Either the macro 'SHOW_P' if the function should be defined as taking a + * memory order parameter (a.k.a. explicit), or the macro 'HIDE_P' if it + * should not (a.k.a. implicit). + * + * @param order + * The literal token 'order' if 'vis_p' is 'SHOW_P', otherwise the desired + * memory order to be used implicitly by the atomic operation. + * + * @param do_atomic_cmpxchg_weak_explicit + * A macro, M, callable as 'M(type, obj, exp, des, succ, fail, ok);' in block + * scope. + * + * The arguments provided are: + * - 'type' : forwarded directly from the 'type' argument in this macro + * - 'obj' : an expression of type 'volatile atomic_type *' + * - 'exp' : the name of an identifier designating an object of type 'type' + * - 'des' : the name of an identifier designating an object of type 'type' + * - 'succ' : an expression of type 'int' whose value is a valid memory order + * - 'fail' : an expression of type 'int' whose value is a valid load memory + * order not stronger than 'succ' + * - 'ok' : the name of an identifier designating an object of type 'int' + * + * The expected behaviour of calling the macro M as above is: + * - the values of 'exp' and 'des' are read + * - in a single atomic operation, the value of the object pointed to by + * 'obj' is read and, if it compares equal to the value of 'exp', the + * value of 'des' is written to the object pointed to by 'obj' + * - the value read from 'obj' is stored in 'exp' + * - 'ok' is set to non-zero if the value of 'des' was written to the object + * pointed to by 'obj' (success), otherwise it is set to zero (failure) + * - the atomic operation uses a memory ordering at least as strong as 'succ' + * for a successful exchange, and a load memory ordering at least as strong + * as 'fail' for a failed exchange + * + * @param do_get_bit + * A macro, B, callable as 'B(type, exp, offset, bit);' in block scope. + * + * The arguments provided are: + * - 'type' : forwarded directly from the 'type' argument in this macro + * - 'exp' : the name of an identifier designating an object of type 'type' + * - 'offset' : an expression of type 'int' whose value is non-negative and + * less than 'sizeof(type) * CHAR_BIT' + * - 'bit' : the name of an identifier designating an object of type 'int' + * + * The expected behaviour of calling the macro B as above is: + * - 'bit' is set to the value of the bit at offset 'offset' of 'exp' + * - 'exp' should not be modified + * + * @param do_make_desired + * A macro, D, callable as 'D(type, exp, offset, des);' in block scope. + * + * The arguments provided are: + * - 'type' : forwarded directly from the 'type' argument in this macro + * - 'exp' : the name of an identifier designating an object of type 'type' + * - 'offset' : an expression of type 'int' whose value is non-negative and + * less than 'sizeof(type) * CHAR_BIT' + * - 'des' : the name of an identifier designating an object of type 'type' + * + * The expected behaviour of calling the macro D as above is: + * - 'des' should be set to the value of 'exp' after the desired modify + * operation is applied + * - 'exp' should not be modified + */ +#define PATOMIC_WRAPPED_CMPXCHG_DEFINE_OP_BIT_TEST_MODIFY( \ + atomic_type, type, fn_name, vis_p, order, \ + do_atomic_cmpxchg_weak_explicit, \ + do_get_bit, do_make_desired \ +) \ + static int \ + fn_name( \ + volatile void *const obj \ + ,const int offset \ + vis_p(_,const int order) \ + ) \ + { \ + /* static assertions */ \ + PATOMIC_STATIC_ASSERT( \ + sizeof_type_eq_atype, sizeof(type) == sizeof(atomic_type)); \ + \ + /* declarations */ \ + type exp = {0}; \ + type des; \ + int exp_bit; \ + int ok; \ + const int succ = (int) order; \ + const int fail = PATOMIC_CMPXCHG_FAIL_ORDER(succ); \ + \ + /* assertions */ \ + PATOMIC_WRAPPED_DO_ASSERT(obj != NULL); \ + PATOMIC_WRAPPED_DO_ASSERT_ALIGNED(obj, atomic_type); \ + PATOMIC_WRAPPED_DO_ASSERT(offset >= 0); \ + PATOMIC_WRAPPED_DO_ASSERT((size_t) offset < (sizeof(type) * CHAR_BIT)); \ + PATOMIC_WRAPPED_DO_ASSERT(PATOMIC_IS_VALID_ORDER((int) order)); \ + \ + /* operation */ \ + do { \ + /* get the expected bit */ \ + do_get_bit( \ + type, \ + exp, offset, \ + exp_bit \ + ); \ + \ + /* make the desired value from the expected value */ \ + do_make_desired( \ + type, \ + exp, offset, \ + des \ + ); \ + \ + /* cmpxchg_weak */ \ + do_atomic_cmpxchg_weak_explicit( \ + type, \ + (volatile atomic_type *) obj, \ + exp, des, \ + succ, fail, \ + ok \ + ); \ + } \ + while (!ok); \ + \ + /* outputs */ \ + return exp_bit; \ + } + + +/** + * @addtogroup wrapped.cmpxchg + * + * @brief + * Defines a function which implements an atomic fetch operation using + * cmpxchg_weak as the underlying atomic operation. + * + * @details + * The defined function's signature will match either patomic_opsig_fetch_t + * or patomic_opsig_explicit_fetch_t (depending on the value of 'vis_p'). + * + * @param atomic_type + * The type of the object on which the atomic operation is to be performed. + * Must not be a VLA or an array of unknown size. + * + * @param type + * The non-atomic counterpart of 'atomic_type'. This must have the same size + * as 'atomic_type' and must not have a stricter alignment. + * + * @param fn_name + * The name of the function to be defined. + * + * @param vis_p + * Either the macro 'SHOW_P' if the function should be defined as taking a + * memory order parameter (a.k.a. explicit), or the macro 'HIDE_P' if it + * should not (a.k.a. implicit). + * + * @param order + * The literal token 'order' if 'vis_p' is 'SHOW_P', otherwise the desired + * memory order to be used implicitly by the atomic operation. + * + * @param do_atomic_cmpxchg_weak_explicit + * A macro, M, callable as 'M(type, obj, exp, des, succ, fail, ok);' in block + * scope. + * + * The arguments provided are: + * - 'type' : forwarded directly from the 'type' argument in this macro + * - 'obj' : an expression of type 'volatile atomic_type *' + * - 'exp' : the name of an identifier designating an object of type 'type' + * - 'des' : the name of an identifier designating an object of type 'type' + * - 'succ' : an expression of type 'int' whose value is a valid memory order + * - 'fail' : an expression of type 'int' whose value is a valid load memory + * order not stronger than 'succ' + * - 'ok' : the name of an identifier designating an object of type 'int' + * + * The expected behaviour of calling the macro M as above is: + * - the values of 'exp' and 'des' are read + * - in a single atomic operation, the value of the object pointed to by + * 'obj' is read and, if it compares equal to the value of 'exp', the + * value of 'des' is written to the object pointed to by 'obj' + * - the value read from 'obj' is stored in 'exp' + * - 'ok' is set to non-zero if the value of 'des' was written to the object + * pointed to by 'obj' (success), otherwise it is set to zero (failure) + * - the atomic operation uses a memory ordering at least as strong as 'succ' + * for a successful exchange, and a load memory ordering at least as strong + * as 'fail' for a failed exchange + * + * @param do_make_desired + * A macro, D, callable as 'D(type, exp, arg, des);' in block scope. + * + * The arguments provided are: + * - 'type' : forwarded directly from the 'type' argument in this macro + * - 'exp' : the name of an identifier designating an object of type 'type' + * - 'arg' : the name of an identifier designating an object of type 'type' + * - 'des' : the name of an identifier designating an object of type 'type' + * + * The expected behaviour of calling the macro D as above is: + * - 'des' should be set to the value of 'exp' after the desired modify + * operation is applied + * - 'exp' and 'arg' should not be modified + */ +#define PATOMIC_WRAPPED_CMPXCHG_DEFINE_OP_FETCH( \ + atomic_type, type, fn_name, vis_p, order, \ + do_atomic_cmpxchg_weak_explicit, \ + do_make_desired \ +) \ + static void \ + fn_name( \ + volatile void *const obj \ + ,const void *const argument \ + vis_p(_,const int order) \ + ,void *const ret \ + ) \ + { \ + /* static assertions */ \ + PATOMIC_STATIC_ASSERT( \ + sizeof_type_eq_atype, sizeof(type) == sizeof(atomic_type)); \ + \ + /* declarations */ \ + type exp = {0}; \ + type des; \ + type arg; \ + int ok; \ + const int succ = (int) order; \ + const int fail = PATOMIC_CMPXCHG_FAIL_ORDER(succ); \ + \ + /* assertions */ \ + PATOMIC_WRAPPED_DO_ASSERT(obj != NULL); \ + PATOMIC_WRAPPED_DO_ASSERT(argument != NULL); \ + PATOMIC_WRAPPED_DO_ASSERT(ret != NULL); \ + PATOMIC_WRAPPED_DO_ASSERT_ALIGNED(obj, atomic_type); \ + PATOMIC_WRAPPED_DO_ASSERT(PATOMIC_IS_VALID_ORDER((int) order)); \ + \ + /* inputs */ \ + PATOMIC_WRAPPED_DO_MEMCPY(&arg, argument, sizeof(type)); \ + \ + /* operation */ \ + do { \ + /* make the desired value from the expected value */ \ + do_make_desired( \ + type, \ + exp, arg, \ + des \ + ); \ + \ + /* cmpxchg_weak */ \ + do_atomic_cmpxchg_weak_explicit( \ + type, \ + (volatile atomic_type *) obj, \ + exp, des, \ + succ, fail, \ + ok \ + ); \ + } \ + while (!ok); \ + \ + /* outputs */ \ + PATOMIC_WRAPPED_DO_MEMCPY(ret, &exp, sizeof(type)); \ + } + + +/** + * @addtogroup wrapped.cmpxchg + * + * @brief + * Defines a function which implements an atomic noarg fetch operation using + * cmpxchg_weak as the underlying atomic operation. + * + * @details + * The defined function's signature will match either + * patomic_opsig_fetch_noarg_t or patomic_opsig_explicit_fetch_noarg_t + * (depending on the value of 'vis_p'). + * + * @param atomic_type + * The type of the object on which the atomic operation is to be performed. + * Must not be a VLA or an array of unknown size. + * + * @param type + * The non-atomic counterpart of 'atomic_type'. This must have the same size + * as 'atomic_type' and must not have a stricter alignment. + * + * @param fn_name + * The name of the function to be defined. + * + * @param vis_p + * Either the macro 'SHOW_P' if the function should be defined as taking a + * memory order parameter (a.k.a. explicit), or the macro 'HIDE_P' if it + * should not (a.k.a. implicit). + * + * @param order + * The literal token 'order' if 'vis_p' is 'SHOW_P', otherwise the desired + * memory order to be used implicitly by the atomic operation. + * + * @param do_atomic_cmpxchg_weak_explicit + * A macro, M, callable as 'M(type, obj, exp, des, succ, fail, ok);' in block + * scope. + * + * The arguments provided are: + * - 'type' : forwarded directly from the 'type' argument in this macro + * - 'obj' : an expression of type 'volatile atomic_type *' + * - 'exp' : the name of an identifier designating an object of type 'type' + * - 'des' : the name of an identifier designating an object of type 'type' + * - 'succ' : an expression of type 'int' whose value is a valid memory order + * - 'fail' : an expression of type 'int' whose value is a valid load memory + * order not stronger than 'succ' + * - 'ok' : the name of an identifier designating an object of type 'int' + * + * The expected behaviour of calling the macro M as above is: + * - the values of 'exp' and 'des' are read + * - in a single atomic operation, the value of the object pointed to by + * 'obj' is read and, if it compares equal to the value of 'exp', the + * value of 'des' is written to the object pointed to by 'obj' + * - the value read from 'obj' is stored in 'exp' + * - 'ok' is set to non-zero if the value of 'des' was written to the object + * pointed to by 'obj' (success), otherwise it is set to zero (failure) + * - the atomic operation uses a memory ordering at least as strong as 'succ' + * for a successful exchange, and a load memory ordering at least as strong + * as 'fail' for a failed exchange + * + * @param do_make_desired + * A macro, D, callable as 'D(type, exp, des);' in block scope. + * + * The arguments provided are: + * - 'type' : forwarded directly from the 'type' argument in this macro + * - 'exp' : the name of an identifier designating an object of type 'type' + * - 'des' : the name of an identifier designating an object of type 'type' + * + * The expected behaviour of calling the macro D as above is: + * - 'des' should be set to the value of 'exp' after the desired modify + * operation is applied + * - 'exp' should not be modified + */ +#define PATOMIC_WRAPPED_CMPXCHG_DEFINE_OP_FETCH_NOARG( \ + atomic_type, type, fn_name, vis_p, order, \ + do_atomic_cmpxchg_weak_explicit, \ + do_make_desired \ +) \ + static void \ + fn_name( \ + volatile void *const obj \ + vis_p(_,const int order) \ + ,void *const ret \ + ) \ + { \ + /* static assertions */ \ + PATOMIC_STATIC_ASSERT( \ + sizeof_type_eq_atype, sizeof(type) == sizeof(atomic_type)); \ + \ + /* declarations */ \ + type exp = {0}; \ + type des; \ + int ok; \ + const int succ = (int) order; \ + const int fail = PATOMIC_CMPXCHG_FAIL_ORDER(succ); \ + \ + /* assertions */ \ + PATOMIC_WRAPPED_DO_ASSERT(obj != NULL); \ + PATOMIC_WRAPPED_DO_ASSERT(ret != NULL); \ + PATOMIC_WRAPPED_DO_ASSERT_ALIGNED(obj, atomic_type); \ + PATOMIC_WRAPPED_DO_ASSERT(PATOMIC_IS_VALID_ORDER((int) order)); \ + \ + /* operation */ \ + do { \ + /* make the desired value from the expected value */ \ + do_make_desired( \ + type, \ + exp, \ + des \ + ); \ + \ + /* cmpxchg_weak */ \ + do_atomic_cmpxchg_weak_explicit( \ + type, \ + (volatile atomic_type *) obj, \ + exp, des, \ + succ, fail, \ + ok \ + ); \ + } \ + while (!ok); \ + \ + /* outputs */ \ + PATOMIC_WRAPPED_DO_MEMCPY(ret, &exp, sizeof(type)); \ + } + + +/** + * @addtogroup wrapped.cmpxchg + * + * @brief + * Defines a function which implements an atomic void operation using + * cmpxchg_weak as the underlying atomic operation. + * + * @details + * The defined function's signature will match either patomic_opsig_void_t or + * patomic_opsig_explicit_void_t (depending on the value of 'vis_p'). + * + * @param atomic_type + * The type of the object on which the atomic operation is to be performed. + * Must not be a VLA or an array of unknown size. + * + * @param type + * The non-atomic counterpart of 'atomic_type'. This must have the same size + * as 'atomic_type' and must not have a stricter alignment. + * + * @param fn_name + * The name of the function to be defined. + * + * @param vis_p + * Either the macro 'SHOW_P' if the function should be defined as taking a + * memory order parameter (a.k.a. explicit), or the macro 'HIDE_P' if it + * should not (a.k.a. implicit). + * + * @param order + * The literal token 'order' if 'vis_p' is 'SHOW_P', otherwise the desired + * memory order to be used implicitly by the atomic operation. + * + * @param do_atomic_cmpxchg_weak_explicit + * A macro, M, callable as 'M(type, obj, exp, des, succ, fail, ok);' in block + * scope. + * + * The arguments provided are: + * - 'type' : forwarded directly from the 'type' argument in this macro + * - 'obj' : an expression of type 'volatile atomic_type *' + * - 'exp' : the name of an identifier designating an object of type 'type' + * - 'des' : the name of an identifier designating an object of type 'type' + * - 'succ' : an expression of type 'int' whose value is a valid memory order + * - 'fail' : an expression of type 'int' whose value is a valid load memory + * order not stronger than 'succ' + * - 'ok' : the name of an identifier designating an object of type 'int' + * + * The expected behaviour of calling the macro M as above is: + * - the values of 'exp' and 'des' are read + * - in a single atomic operation, the value of the object pointed to by + * 'obj' is read and, if it compares equal to the value of 'exp', the + * value of 'des' is written to the object pointed to by 'obj' + * - the value read from 'obj' is stored in 'exp' + * - 'ok' is set to non-zero if the value of 'des' was written to the object + * pointed to by 'obj' (success), otherwise it is set to zero (failure) + * - the atomic operation uses a memory ordering at least as strong as 'succ' + * for a successful exchange, and a load memory ordering at least as strong + * as 'fail' for a failed exchange + * + * @param do_make_desired + * A macro, D, callable as 'D(type, exp, arg, des);' in block scope. + * + * The arguments provided are: + * - 'type' : forwarded directly from the 'type' argument in this macro + * - 'exp' : the name of an identifier designating an object of type 'type' + * - 'arg' : the name of an identifier designating an object of type 'type' + * - 'des' : the name of an identifier designating an object of type 'type' + * + * The expected behaviour of calling the macro D as above is: + * - 'des' should be set to the value of 'exp' after the desired modify + * operation is applied + * - 'exp' and 'arg' should not be modified + */ +#define PATOMIC_WRAPPED_CMPXCHG_DEFINE_OP_VOID( \ + atomic_type, type, fn_name, vis_p, order, \ + do_atomic_cmpxchg_weak_explicit, \ + do_make_desired \ +) \ + static void \ + fn_name( \ + volatile void *const obj \ + ,const void *const argument \ + vis_p(_,const int order) \ + ) \ + { \ + /* static assertions */ \ + PATOMIC_STATIC_ASSERT( \ + sizeof_type_eq_atype, sizeof(type) == sizeof(atomic_type)); \ + \ + /* declarations */ \ + type exp = {0}; \ + type des; \ + type arg; \ + int ok; \ + const int succ = (int) order; \ + const int fail = PATOMIC_CMPXCHG_FAIL_ORDER(succ); \ + \ + /* assertions */ \ + PATOMIC_WRAPPED_DO_ASSERT(obj != NULL); \ + PATOMIC_WRAPPED_DO_ASSERT(argument != NULL); \ + PATOMIC_WRAPPED_DO_ASSERT_ALIGNED(obj, atomic_type); \ + PATOMIC_WRAPPED_DO_ASSERT(PATOMIC_IS_VALID_ORDER((int) order)); \ + \ + /* inputs */ \ + PATOMIC_WRAPPED_DO_MEMCPY(&arg, argument, sizeof(type)); \ + \ + /* operation */ \ + do { \ + /* make the desired value from the expected value */ \ + do_make_desired( \ + type, \ + exp, arg, \ + des \ + ); \ + \ + /* cmpxchg_weak */ \ + do_atomic_cmpxchg_weak_explicit( \ + type, \ + (volatile atomic_type *) obj, \ + exp, des, \ + succ, fail, \ + ok \ + ); \ + } \ + while (!ok); \ + } + + +/** + * @addtogroup wrapped.cmpxchg + * + * @brief + * Defines a function which implements an atomic void noarg operation using + * cmpxchg_weak as the underlying atomic operation. + * + * @details + * The defined function's signature will match either + * patomic_opsig_void_noarg_t or patomic_opsig_explicit_void_noarg_t + * (depending on the value of 'vis_p'). + * + * @param atomic_type + * The type of the object on which the atomic operation is to be performed. + * Must not be a VLA or an array of unknown size. + * + * @param type + * The non-atomic counterpart of 'atomic_type'. This must have the same size + * as 'atomic_type' and must not have a stricter alignment. + * + * @param fn_name + * The name of the function to be defined. + * + * @param vis_p + * Either the macro 'SHOW_P' if the function should be defined as taking a + * memory order parameter (a.k.a. explicit), or the macro 'HIDE_P' if it + * should not (a.k.a. implicit). + * + * @param order + * The literal token 'order' if 'vis_p' is 'SHOW_P', otherwise the desired + * memory order to be used implicitly by the atomic operation. + * + * @param do_atomic_cmpxchg_weak_explicit + * A macro, M, callable as 'M(type, obj, exp, des, succ, fail, ok);' in block + * scope. + * + * The arguments provided are: + * - 'type' : forwarded directly from the 'type' argument in this macro + * - 'obj' : an expression of type 'volatile atomic_type *' + * - 'exp' : the name of an identifier designating an object of type 'type' + * - 'des' : the name of an identifier designating an object of type 'type' + * - 'succ' : an expression of type 'int' whose value is a valid memory order + * - 'fail' : an expression of type 'int' whose value is a valid load memory + * order not stronger than 'succ' + * - 'ok' : the name of an identifier designating an object of type 'int' + * + * The expected behaviour of calling the macro M as above is: + * - the values of 'exp' and 'des' are read + * - in a single atomic operation, the value of the object pointed to by + * 'obj' is read and, if it compares equal to the value of 'exp', the + * value of 'des' is written to the object pointed to by 'obj' + * - the value read from 'obj' is stored in 'exp' + * - 'ok' is set to non-zero if the value of 'des' was written to the object + * pointed to by 'obj' (success), otherwise it is set to zero (failure) + * - the atomic operation uses a memory ordering at least as strong as 'succ' + * for a successful exchange, and a load memory ordering at least as strong + * as 'fail' for a failed exchange + * + * @param do_make_desired + * A macro, D, callable as 'D(type, exp, des);' in block scope. + * + * The arguments provided are: + * - 'type' : forwarded directly from the 'type' argument in this macro + * - 'exp' : the name of an identifier designating an object of type 'type' + * - 'des' : the name of an identifier designating an object of type 'type' + * + * The expected behaviour of calling the macro D as above is: + * - 'des' should be set to the value of 'exp' after the desired modify + * operation is applied + * - 'exp' should not be modified + */ +#define PATOMIC_WRAPPED_CMPXCHG_DEFINE_OP_VOID_NOARG( \ + atomic_type, type, fn_name, vis_p, order, \ + do_atomic_cmpxchg_weak_explicit, \ + do_make_desired \ +) \ + static void \ + fn_name( \ + volatile void *const obj \ + vis_p(_,const int order) \ + ) \ + { \ + /* static assertions */ \ + PATOMIC_STATIC_ASSERT( \ + sizeof_type_eq_atype, sizeof(type) == sizeof(atomic_type)); \ + \ + /* declarations */ \ + type exp = {0}; \ + type des; \ + int ok; \ + const int succ = (int) order; \ + const int fail = PATOMIC_CMPXCHG_FAIL_ORDER(succ); \ + \ + /* assertions */ \ + PATOMIC_WRAPPED_DO_ASSERT(obj != NULL); \ + PATOMIC_WRAPPED_DO_ASSERT_ALIGNED(obj, atomic_type); \ + PATOMIC_WRAPPED_DO_ASSERT(PATOMIC_IS_VALID_ORDER((int) order)); \ + \ + /* operation */ \ + do { \ + /* make the desired value from the expected value */ \ + do_make_desired( \ + type, \ + exp, \ + des \ + ); \ + \ + /* cmpxchg_weak */ \ + do_atomic_cmpxchg_weak_explicit( \ + type, \ + (volatile atomic_type *) obj, \ + exp, des, \ + succ, fail, \ + ok \ + ); \ + } \ + while (!ok); \ + } + + +#endif /* PATOMIC_WRAPPED_CMPXCHG_H */ diff --git a/src/include/patomic/wrapped/direct.h b/src/include/patomic/wrapped/direct.h new file mode 100644 index 000000000..7ece5c431 --- /dev/null +++ b/src/include/patomic/wrapped/direct.h @@ -0,0 +1,901 @@ +#ifndef PATOMIC_WRAPPED_DIRECT_H +#define PATOMIC_WRAPPED_DIRECT_H + +#include "base.h" + +#include + +#include + +#include +#include + + +/** + * @addtogroup wrapped.direct + * + * @brief + * Defines a function which implements an atomic store operation using + * store as the underlying atomic operation. + * + * @details + * The defined function's signature will match either patomic_opsig_store_t + * or patomic_opsig_explicit_store_t (depending on the value of 'vis_p'). + * + * @param atomic_type + * The type of the object on which the atomic operation is to be performed. + * Must not be a VLA or an array of unknown size. + * + * @param type + * The non-atomic counterpart of 'atomic_type'. This must have the same size + * as 'atomic_type' and must not have a stricter alignment. + * + * @param fn_name + * The name of the function to be defined. + * + * @param vis_p + * Either the macro 'SHOW_P' if the function should be defined as taking a + * memory order parameter (a.k.a. explicit), or the macro 'HIDE_P' if it + * should not (a.k.a. implicit). + * + * @param order + * The literal token 'order' if 'vis_p' is 'SHOW_P', otherwise the desired + * memory order to be used implicitly by the atomic operation. + * + * @param do_atomic_store_explicit + * A macro, M, callable as 'M(type, obj, des, order);' in block scope. + * + * The arguments provided are: + * - 'type' : forwarded directly from the 'type' argument in this macro + * - 'obj' : an expression of type 'volatile atomic_type *' + * - 'des' : the name of an identifier designating an object of type 'type' + * - 'order' : an expression of type 'int' whose value is a valid store + * memory order + * + * The expected behaviour of calling the macro M as above is: + * - the value of 'des' is read and atomically stored into the object pointed + * to by 'obj' + * - the atomic operation uses a store memory ordering at least as strong as + * 'order' + */ +#define PATOMIC_WRAPPED_DIRECT_DEFINE_OP_STORE( \ + atomic_type, type, fn_name, vis_p, order, \ + do_atomic_store_explicit \ +) \ + static void \ + fn_name( \ + volatile void *const obj \ + ,const void *const desired \ + vis_p(_,const int order) \ + ) \ + { \ + /* static assertions */ \ + PATOMIC_STATIC_ASSERT( \ + sizeof_type_eq_atype, sizeof(type) == sizeof(atomic_type)); \ + \ + /* declarations */ \ + type des; \ + \ + /* assertions */ \ + PATOMIC_WRAPPED_DO_ASSERT(obj != NULL); \ + PATOMIC_WRAPPED_DO_ASSERT(desired != NULL); \ + PATOMIC_WRAPPED_DO_ASSERT_ALIGNED(obj, atomic_type); \ + PATOMIC_WRAPPED_DO_ASSERT(PATOMIC_IS_VALID_STORE_ORDER((int) order)); \ + \ + /* inputs */ \ + PATOMIC_WRAPPED_DO_MEMCPY(&des, desired, sizeof(type)); \ + \ + /* operation */ \ + do_atomic_store_explicit( \ + type, \ + (volatile atomic_type *) obj, \ + des, \ + (int) order \ + ); \ + } + + +/** + * @addtogroup wrapped.direct + * + * @brief + * Defines a function which implements an atomic load operation using + * load as the underlying atomic operation. + * + * @details + * The defined function's signature will match either patomic_opsig_load_t + * or patomic_opsig_explicit_load_t (depending on the value of 'vis_p'). + * + * @param atomic_type + * The type of the object on which the atomic operation is to be performed. + * Must not be a VLA or an array of unknown size. + * + * @param type + * The non-atomic counterpart of 'atomic_type'. This must have the same size + * as 'atomic_type' and must not have a stricter alignment. + * + * @param fn_name + * The name of the function to be defined. + * + * @param vis_p + * Either the macro 'SHOW_P' if the function should be defined as taking a + * memory order parameter (a.k.a. explicit), or the macro 'HIDE_P' if it + * should not (a.k.a. implicit). + * + * @param order + * The literal token 'order' if 'vis_p' is 'SHOW_P', otherwise the desired + * memory order to be used implicitly by the atomic operation. + * + * @param do_atomic_load_explicit + * A macro, M, callable as 'M(type, obj, order, res);' in block scope. + * + * The arguments provided are: + * - 'type' : forwarded directly from the 'type' argument in this macro + * - 'obj' : an expression of type 'const volatile atomic_type *' + * - 'order' : an expression of type 'int' whose value is a valid load memory + * order + * - 'res' : the name of an identifier designating an object of type 'type' + * + * The expected behaviour of calling the macro M as above is: + * - the value of the object pointed to by 'obj' is atomically read + * - 'res' is set to the value which was read + * - the atomic operation uses a load memory ordering at least as strong as + * 'order' + */ +#define PATOMIC_WRAPPED_DIRECT_DEFINE_OP_LOAD( \ + atomic_type, type, fn_name, vis_p, order, \ + do_atomic_load_explicit \ +) \ + static void \ + fn_name( \ + const volatile void *const obj \ + vis_p(_,const int order) \ + ,void *const ret \ + ) \ + { \ + /* static assertions */ \ + PATOMIC_STATIC_ASSERT( \ + sizeof_type_eq_atype, sizeof(type) == sizeof(atomic_type)); \ + \ + /* declarations */ \ + type res; \ + \ + /* assertions */ \ + PATOMIC_WRAPPED_DO_ASSERT(obj != NULL); \ + PATOMIC_WRAPPED_DO_ASSERT(ret != NULL); \ + PATOMIC_WRAPPED_DO_ASSERT_ALIGNED(obj, atomic_type); \ + PATOMIC_WRAPPED_DO_ASSERT(PATOMIC_IS_VALID_LOAD_ORDER((int) order)); \ + \ + /* operation */ \ + do_atomic_load_explicit( \ + type, \ + (const volatile atomic_type *) obj, \ + (int) order, \ + res \ + ); \ + \ + /* outputs */ \ + PATOMIC_WRAPPED_DO_MEMCPY(ret, &res, sizeof(type)); \ + } + + +/** + * @addtogroup wrapped.direct + * + * @brief + * Defines a function which implements an atomic exchange operation using + * exchange as the underlying atomic operation. + * + * @details + * The defined function's signature will match either patomic_opsig_exchange_t + * or patomic_opsig_explicit_exchange_t (depending on the value of 'vis_p'). + * + * @param atomic_type + * The type of the object on which the atomic operation is to be performed. + * Must not be a VLA or an array of unknown size. + * + * @param type + * The non-atomic counterpart of 'atomic_type'. This must have the same size + * as 'atomic_type' and must not have a stricter alignment. + * + * @param fn_name + * The name of the function to be defined. + * + * @param vis_p + * Either the macro 'SHOW_P' if the function should be defined as taking a + * memory order parameter (a.k.a. explicit), or the macro 'HIDE_P' if it + * should not (a.k.a. implicit). + * + * @param order + * The literal token 'order' if 'vis_p' is 'SHOW_P', otherwise the desired + * memory order to be used implicitly by the atomic operation. + * + * @param do_atomic_exchange_explicit + * A macro, M, callable as 'M(type, obj, des, order, res);' in block scope. + * + * The arguments provided are: + * - 'type' : forwarded directly from the 'type' argument in this macro + * - 'obj' : an expression of type 'volatile atomic_type *' + * - 'des' : the name of an identifier designating an object of type 'type' + * - 'order' : an expression of type 'int' whose value is a valid memory order + * - 'res' : the name of an identifier designating an object of type 'type' + * + * The expected behaviour of calling the macro M as above is: + * - the value of 'des' is read and, in a single atomic operation is stored + * into the object pointed to by 'obj' while the value of the object + * pointed to by 'obj' is read + * - the value read from 'obj' is stored into 'res' + * - the atomic operation uses a memory ordering at least as strong as + * 'order' + */ +#define PATOMIC_WRAPPED_DIRECT_DEFINE_OP_EXCHANGE( \ + atomic_type, type, fn_name, vis_p, order, \ + do_atomic_exchange_explicit \ +) \ + static void \ + fn_name( \ + volatile void *const obj \ + ,const void *const desired \ + vis_p(_,const int order) \ + ,void *const ret \ + ) \ + { \ + /* static assertions */ \ + PATOMIC_STATIC_ASSERT( \ + sizeof_type_eq_atype, sizeof(type) == sizeof(atomic_type)); \ + \ + /* declarations */ \ + type des; \ + type res; \ + \ + /* assertions */ \ + PATOMIC_WRAPPED_DO_ASSERT(obj != NULL); \ + PATOMIC_WRAPPED_DO_ASSERT(desired != NULL); \ + PATOMIC_WRAPPED_DO_ASSERT(ret != NULL); \ + PATOMIC_WRAPPED_DO_ASSERT_ALIGNED(obj, atomic_type); \ + PATOMIC_WRAPPED_DO_ASSERT(PATOMIC_IS_VALID_ORDER((int) order)); \ + \ + /* inputs */ \ + PATOMIC_WRAPPED_DO_MEMCPY(&des, desired, sizeof(type)); \ + \ + /* operation */ \ + do_atomic_exchange_explicit( \ + type, \ + (volatile atomic_type *) obj, \ + des, \ + (int) order, \ + res \ + ); \ + \ + /* outputs */ \ + PATOMIC_WRAPPED_DO_MEMCPY(ret, &res, sizeof(type)); \ + } + + +/** + * @addtogroup wrapped.direct + * + * @brief + * Defines a function which implements an atomic cmpxchg operation using + * cmpxchg as the underlying atomic operation. + * + * @details + * The defined function's signature will match either patomic_opsig_cmpxchg_t + * or patomic_opsig_explicit_cmpxchg_t (depending on the value of 'vis_p'). + * + * @param atomic_type + * The type of the object on which the atomic operation is to be performed. + * Must not be a VLA or an array of unknown size. + * + * @param type + * The non-atomic counterpart of 'atomic_type'. This must have the same size + * as 'atomic_type' and must not have a stricter alignment. + * + * @param fn_name + * The name of the function to be defined. + * + * @param vis_p + * Either the macro 'SHOW_P' if the function should be defined as taking a + * memory order parameter (a.k.a. explicit), or the macro 'HIDE_P' if it + * should not (a.k.a. implicit). + * + * @param inv + * Either the macro 'HIDE' if 'vis_p' is 'SHOW_P', or the macro 'SHOW' if + * 'vis_p' is 'HIDE_P'. + * + * @param order + * The literal token 'order' if 'vis_p' is 'SHOW_P', otherwise the desired + * memory order to be used implicitly by the atomic operation. + * + * @param do_atomic_cmpxchg_explicit + * A macro, M, callable as 'M(type, obj, exp, des, succ, fail, ok);' in block + * scope. + * + * The arguments provided are: + * - 'type' : forwarded directly from the 'type' argument in this macro + * - 'obj' : an expression of type 'volatile atomic_type *' + * - 'exp' : the name of an identifier designating an object of type 'type' + * - 'des' : the name of an identifier designating an object of type 'type' + * - 'succ' : an expression of type 'int' whose value is a valid memory order + * - 'fail' : an expression of type 'int' whose value is a valid load memory + * order not stronger than 'succ' + * - 'ok' : the name of an identifier designating an object of type 'int' + * + * The expected behaviour of calling the macro M as above is: + * - the values of 'exp' and 'des' are read + * - in a single atomic operation, the value of the object pointed to by + * 'obj' is read and, if it compares equal to the value of 'exp', the + * value of 'des' is written to the object pointed to by 'obj' + * - the value read from 'obj' is stored in 'exp' + * - 'ok' is set to non-zero if the value of 'des' was written to the object + * pointed to by 'obj' (success), otherwise it is set to zero (failure) + * - the atomic operation uses a memory ordering at least as strong as 'succ' + * for a successful exchange, and a load memory ordering at least as strong + * as 'fail' for a failed exchange + */ +#define PATOMIC_WRAPPED_DIRECT_DEFINE_OP_CMPXCHG( \ + atomic_type, type, fn_name, vis_p, inv, order, \ + do_atomic_cmpxchg_explicit \ +) \ + static int \ + fn_name( \ + volatile void *const obj \ + ,void *const expected \ + ,const void *const desired \ + vis_p(_,const int succ) \ + vis_p(_,const int fail) \ + ) \ + { \ + /* static assertions */ \ + PATOMIC_STATIC_ASSERT( \ + sizeof_type_eq_atype, sizeof(type) == sizeof(atomic_type)); \ + \ + /* declarations */ \ + type exp; \ + type des; \ + int ok; \ + inv(const int succ = (int) order;) \ + inv(const int fail = PATOMIC_CMPXCHG_FAIL_ORDER(succ);) \ + \ + /* assertions */ \ + PATOMIC_WRAPPED_DO_ASSERT(obj != NULL); \ + PATOMIC_WRAPPED_DO_ASSERT(expected != NULL); \ + PATOMIC_WRAPPED_DO_ASSERT(desired != NULL); \ + PATOMIC_WRAPPED_DO_ASSERT_ALIGNED(obj, atomic_type); \ + PATOMIC_WRAPPED_DO_ASSERT(PATOMIC_IS_VALID_ORDER(succ)); \ + PATOMIC_WRAPPED_DO_ASSERT(PATOMIC_IS_VALID_FAIL_ORDER(succ, fail)); \ + \ + /* inputs */ \ + PATOMIC_WRAPPED_DO_MEMCPY(&des, desired, sizeof(type)); \ + PATOMIC_WRAPPED_DO_MEMCPY(&exp, expected, sizeof(type)); \ + \ + /* operation */ \ + do_atomic_cmpxchg_explicit( \ + type, \ + (volatile atomic_type *) obj, \ + exp, des, \ + succ, fail, \ + ok \ + ); \ + \ + /* outputs */ \ + PATOMIC_WRAPPED_DO_MEMCPY(expected, &exp, sizeof(type)); \ + return ok != 0; \ + } + + +/** + * @addtogroup wrapped.direct + * + * @brief + * Defines a function which implements an atomic bit test operation using + * bit test as the underlying atomic operation. + * + * @details + * The defined function's signature will match either patomic_opsig_test_t + * or patomic_opsig_explicit_test_t (depending on the value of 'vis_p'). + * + * @param atomic_type + * The type of the object on which the atomic operation is to be performed. + * Must not be a VLA or an array of unknown size. + * + * @param type + * The non-atomic counterpart of 'atomic_type'. This must have the same size + * as 'atomic_type' and must not have a stricter alignment. + * + * @param fn_name + * The name of the function to be defined. + * + * @param vis_p + * Either the macro 'SHOW_P' if the function should be defined as taking a + * memory order parameter (a.k.a. explicit), or the macro 'HIDE_P' if it + * should not (a.k.a. implicit). + * + * @param order + * The literal token 'order' if 'vis_p' is 'SHOW_P', otherwise the desired + * memory order to be used implicitly by the atomic operation. + * + * @param do_atomic_bit_test_explicit + * A macro, M, callable as 'M(type, obj, offset, order, res);' in block scope. + * + * The arguments provided are: + * - 'type' : forwarded directly from the 'type' argument in this macro + * - 'obj' : an expression of type 'const volatile atomic_type *' + * - 'offset' : an expression of type 'int' whose value is non-negative and + * less than 'sizeof(type) * CHAR_BIT' + * - 'order' : an expression of type 'int' whose value is a valid load + * memory order + * - 'res' : the name of an identifier designating an object of type 'int' + * + * The expected behaviour of calling the macro M as above is: + * - the value of the object pointed to by 'obj' is atomically read + * - 'res' is set to the bit at offset 'offset' of the value which was read + * - the atomic operation uses a load memory ordering at least as strong as + * 'order' + */ +#define PATOMIC_WRAPPED_DIRECT_DEFINE_OP_BIT_TEST( \ + atomic_type, type, fn_name, vis_p, order, \ + do_atomic_bit_test_explicit \ +) \ + static int \ + fn_name( \ + const volatile void *const obj \ + ,const int offset \ + vis_p(_,const int order) \ + ) \ + { \ + /* static assertions */ \ + PATOMIC_STATIC_ASSERT( \ + sizeof_type_eq_atype, sizeof(type) == sizeof(atomic_type)); \ + \ + /* declarations */ \ + int res; \ + \ + /* assertions */ \ + PATOMIC_WRAPPED_DO_ASSERT(obj != NULL); \ + PATOMIC_WRAPPED_DO_ASSERT_ALIGNED(obj, atomic_type); \ + PATOMIC_WRAPPED_DO_ASSERT(offset >= 0); \ + PATOMIC_WRAPPED_DO_ASSERT((size_t) offset < (sizeof(type) * CHAR_BIT)); \ + PATOMIC_WRAPPED_DO_ASSERT(PATOMIC_IS_VALID_LOAD_ORDER((int) order)); \ + \ + /* operation */ \ + do_atomic_bit_test_explicit( \ + type, \ + (const volatile atomic_type *) obj, \ + offset, \ + (int) order, \ + res \ + ); \ + \ + /* outputs */ \ + return res != 0; \ + } + + +/** + * @addtogroup wrapped.direct + * + * @brief + * Defines a function which implements an atomic bit test-modify operation + * using bit test-modify as the underlying atomic operation. + * + * @details + * The defined function's signature will match either + * patomic_opsig_test_modify_t or patomic_opsig_explicit_test_modify_t + * (depending on the value of 'vis_p'). + * + * @param atomic_type + * The type of the object on which the atomic operation is to be performed. + * Must not be a VLA or an array of unknown size. + * + * @param type + * The non-atomic counterpart of 'atomic_type'. This must have the same size + * as 'atomic_type' and must not have a stricter alignment. + * + * @param fn_name + * The name of the function to be defined. + * + * @param vis_p + * Either the macro 'SHOW_P' if the function should be defined as taking a + * memory order parameter (a.k.a. explicit), or the macro 'HIDE_P' if it + * should not (a.k.a. implicit). + * + * @param order + * The literal token 'order' if 'vis_p' is 'SHOW_P', otherwise the desired + * memory order to be used implicitly by the atomic operation. + * + * @param do_atomic_bit_test_modify_explicit + * A macro, M, callable as 'M(type, obj, offset, order, res);' in block scope. + * + * The arguments provided are: + * - 'type' : forwarded directly from the 'type' argument in this macro + * - 'obj' : an expression of type 'volatile atomic_type *' + * - 'offset' : an expression of type 'int' whose value is non-negative and + * less than 'sizeof(type) * CHAR_BIT' + * - 'order' : an expression of type 'int' whose value is a valid memory + * order + * - 'res' : the name of an identifier designating an object of type 'int' + * + * The expected behaviour of calling the macro M as above is: + * - the bit at offset 'offset' in the object pointed to by 'obj' is modified + * in a single atomic operation + * - the atomic operation uses a store memory ordering at least as strong as + * 'order' if the operation does not require reading the old value, + * otherwise it uses a memory ordering at least as strong as 'order' + */ +#define PATOMIC_WRAPPED_DIRECT_DEFINE_OP_BIT_TEST_MODIFY( \ + atomic_type, type, fn_name, vis_p, order, \ + do_atomic_bit_test_modify_explicit \ +) \ + static int \ + fn_name( \ + volatile void *const obj \ + ,const int offset \ + vis_p(_,const int order) \ + ) \ + { \ + /* static assertions */ \ + PATOMIC_STATIC_ASSERT( \ + sizeof_type_eq_atype, sizeof(type) == sizeof(atomic_type)); \ + \ + /* declarations */ \ + int res; \ + \ + /* assertions */ \ + PATOMIC_WRAPPED_DO_ASSERT(obj != NULL); \ + PATOMIC_WRAPPED_DO_ASSERT_ALIGNED(obj, atomic_type); \ + PATOMIC_WRAPPED_DO_ASSERT(offset >= 0); \ + PATOMIC_WRAPPED_DO_ASSERT((size_t) offset < (sizeof(type) * CHAR_BIT)); \ + PATOMIC_WRAPPED_DO_ASSERT(PATOMIC_IS_VALID_ORDER((int) order)); \ + \ + /* operation */ \ + do_atomic_bit_test_modify_explicit( \ + type, \ + (volatile atomic_type *) obj, \ + offset, \ + (int) order, \ + res \ + ); \ + \ + /* outputs */ \ + return res != 0; \ + } + + +/** + * @addtogroup wrapped.direct + * + * @brief + * Defines a function which implements an atomic fetch operation using fetch + * as the underlying atomic operation. + * + * @details + * The defined function's signature will match either patomic_opsig_fetch_t + * or patomic_opsig_explicit_fetch_t (depending on the value of 'vis_p'). + * + * @param atomic_type + * The type of the object on which the atomic operation is to be performed. + * Must not be a VLA or an array of unknown size. + * + * @param type + * The non-atomic counterpart of 'atomic_type'. This must have the same size + * as 'atomic_type' and must not have a stricter alignment. + * + * @param fn_name + * The name of the function to be defined. + * + * @param vis_p + * Either the macro 'SHOW_P' if the function should be defined as taking a + * memory order parameter (a.k.a. explicit), or the macro 'HIDE_P' if it + * should not (a.k.a. implicit). + * + * @param order + * The literal token 'order' if 'vis_p' is 'SHOW_P', otherwise the desired + * memory order to be used implicitly by the atomic operation. + * + * @param do_atomic_fetch_explicit + * A macro, M, callable as 'M(type, obj, arg, order, res);' in block scope. + * + * The arguments provided are: + * - 'type' : forwarded directly from the 'type' argument in this macro + * - 'obj' : an expression of type 'volatile atomic_type *' + * - 'arg' : the name of an identifier designating an object of type 'type' + * - 'order' : an expression of type 'int' whose value is a valid memory order + * - 'res' : the name of an identifier designating an object of type 'type' + * + * The expected behaviour of calling the macro M as above is: + * - the value of the object pointed to by 'obj' is read and modified in a + * single atomic operation + * - 'res' is set to the original value read from the object pointed to by + * 'obj' + * - the atomic operation uses a memory ordering at least as strong as + * 'order' + */ +#define PATOMIC_WRAPPED_DIRECT_DEFINE_OP_FETCH( \ + atomic_type, type, fn_name, vis_p, order, \ + do_atomic_fetch_explicit \ +) \ + static void \ + fn_name( \ + volatile void *const obj \ + ,const void *const argument \ + vis_p(_,const int order) \ + ,void *const ret \ + ) \ + { \ + /* static assertions */ \ + PATOMIC_STATIC_ASSERT( \ + sizeof_type_eq_atype, sizeof(type) == sizeof(atomic_type)); \ + \ + /* declarations */ \ + type arg; \ + type res; \ + \ + /* assertions */ \ + PATOMIC_WRAPPED_DO_ASSERT(obj != NULL); \ + PATOMIC_WRAPPED_DO_ASSERT(argument != NULL); \ + PATOMIC_WRAPPED_DO_ASSERT(ret != NULL); \ + PATOMIC_WRAPPED_DO_ASSERT_ALIGNED(obj, atomic_type); \ + PATOMIC_WRAPPED_DO_ASSERT(PATOMIC_IS_VALID_ORDER((int) order)); \ + \ + /* inputs */ \ + PATOMIC_WRAPPED_DO_MEMCPY(&arg, argument, sizeof(type)); \ + \ + /* operation */ \ + do_atomic_fetch_explicit( \ + type, \ + (volatile atomic_type *) obj, \ + arg, \ + (int) order, \ + res \ + ); \ + \ + /* outputs */ \ + PATOMIC_WRAPPED_DO_MEMCPY(ret, &res, sizeof(type)); \ + } + + +/** + * @addtogroup wrapped.direct + * + * @brief + * Defines a function which implements an atomic noarg fetch operation using + * noarg fetch as the underlying atomic operation. + * + * @details + * The defined function's signature will match either + * patomic_opsig_fetch_noarg_t or patomic_opsig_explicit_fetch_noarg_t + * (depending on the value of 'vis_p'). + * + * @param atomic_type + * The type of the object on which the atomic operation is to be performed. + * Must not be a VLA or an array of unknown size. + * + * @param type + * The non-atomic counterpart of 'atomic_type'. This must have the same size + * as 'atomic_type' and must not have a stricter alignment. + * + * @param fn_name + * The name of the function to be defined. + * + * @param vis_p + * Either the macro 'SHOW_P' if the function should be defined as taking a + * memory order parameter (a.k.a. explicit), or the macro 'HIDE_P' if it + * should not (a.k.a. implicit). + * + * @param order + * The literal token 'order' if 'vis_p' is 'SHOW_P', otherwise the desired + * memory order to be used implicitly by the atomic operation. + * + * @param do_atomic_fetch_noarg_explicit + * A macro, M, callable as 'M(type, obj, order, res);' in block scope. + * + * The arguments provided are: + * - 'type' : forwarded directly from the 'type' argument in this macro + * - 'obj' : an expression of type 'volatile atomic_type *' + * - 'order' : an expression of type 'int' whose value is a valid memory order + * - 'res' : the name of an identifier designating an object of type 'type' + * + * The expected behaviour of calling the macro M as above is: + * - the value of the object pointed to by 'obj' is read and modified in a + * single atomic operation + * - 'res' is set to the original value read from the object pointed to by + * 'obj' + * - the atomic operation uses a memory ordering at least as strong as + * 'order' + */ +#define PATOMIC_WRAPPED_DIRECT_DEFINE_OP_FETCH_NOARG( \ + atomic_type, type, fn_name, vis_p, order, \ + do_atomic_fetch_noarg_explicit \ +) \ + static void \ + fn_name( \ + volatile void *const obj \ + vis_p(_,const int order) \ + ,void *const ret \ + ) \ + { \ + /* static assertions */ \ + PATOMIC_STATIC_ASSERT( \ + sizeof_type_eq_atype, sizeof(type) == sizeof(atomic_type)); \ + \ + /* declarations */ \ + type res; \ + \ + /* assertions */ \ + PATOMIC_WRAPPED_DO_ASSERT(obj != NULL); \ + PATOMIC_WRAPPED_DO_ASSERT(ret != NULL); \ + PATOMIC_WRAPPED_DO_ASSERT_ALIGNED(obj, atomic_type); \ + PATOMIC_WRAPPED_DO_ASSERT(PATOMIC_IS_VALID_ORDER((int) order)); \ + \ + /* operation */ \ + do_atomic_fetch_noarg_explicit( \ + type, \ + (volatile atomic_type *) obj, \ + (int) order, \ + res \ + ); \ + \ + /* outputs */ \ + PATOMIC_WRAPPED_DO_MEMCPY(ret, &res, sizeof(type)); \ + } + + +/** + * @addtogroup wrapped.direct + * + * @brief + * Defines a function which implements an atomic void operation using void + * as the underlying atomic operation. + * + * @details + * The defined function's signature will match either patomic_opsig_void_t or + * patomic_opsig_explicit_void_t (depending on the value of 'vis_p'). + * + * @param atomic_type + * The type of the object on which the atomic operation is to be performed. + * Must not be a VLA or an array of unknown size. + * + * @param type + * The non-atomic counterpart of 'atomic_type'. This must have the same size + * as 'atomic_type' and must not have a stricter alignment. + * + * @param fn_name + * The name of the function to be defined. + * + * @param vis_p + * Either the macro 'SHOW_P' if the function should be defined as taking a + * memory order parameter (a.k.a. explicit), or the macro 'HIDE_P' if it + * should not (a.k.a. implicit). + * + * @param order + * The literal token 'order' if 'vis_p' is 'SHOW_P', otherwise the desired + * memory order to be used implicitly by the atomic operation. + * + * @param do_atomic_void_explicit + * A macro, M, callable as 'M(type, obj, arg, order);' in block scope. + * + * The arguments provided are: + * - 'type' : forwarded directly from the 'type' argument in this macro + * - 'obj' : an expression of type 'volatile atomic_type *' + * - 'arg' : the name of an identifier designating an object of type 'type' + * - 'order' : an expression of type 'int' whose value is a valid memory order + * + * The expected behaviour of calling the macro M as above is: + * - the value of the object pointed to by 'obj' is read and modified in a + * single atomic operation + * - the atomic operation uses a memory ordering at least as strong as + * 'order' + */ +#define PATOMIC_WRAPPED_DIRECT_DEFINE_OP_VOID( \ + atomic_type, type, fn_name, vis_p, order, \ + do_atomic_void_explicit \ +) \ + static void \ + fn_name( \ + volatile void *const obj \ + ,const void *const argument \ + vis_p(_,const int order) \ + ) \ + { \ + /* static assertions */ \ + PATOMIC_STATIC_ASSERT( \ + sizeof_type_eq_atype, sizeof(type) == sizeof(atomic_type)); \ + \ + /* declarations */ \ + type arg; \ + \ + /* assertions */ \ + PATOMIC_WRAPPED_DO_ASSERT(obj != NULL); \ + PATOMIC_WRAPPED_DO_ASSERT(argument != NULL); \ + PATOMIC_WRAPPED_DO_ASSERT_ALIGNED(obj, atomic_type); \ + PATOMIC_WRAPPED_DO_ASSERT(PATOMIC_IS_VALID_ORDER((int) order)); \ + \ + /* inputs */ \ + PATOMIC_WRAPPED_DO_MEMCPY(&arg, argument, sizeof(type)); \ + \ + /* operation */ \ + do_atomic_void_explicit( \ + type, \ + (volatile atomic_type *) obj, \ + arg, \ + (int) order \ + ); \ + } + + +/** + * @addtogroup wrapped.direct + * + * @brief + * Defines a function which implements an atomic void noarg operation using + * void noarg as the underlying atomic operation. + * + * @details + * The defined function's signature will match either + * patomic_opsig_void_noarg_t or patomic_opsig_explicit_void_noarg_t + * (depending on the value of 'vis_p'). + * + * @param atomic_type + * The type of the object on which the atomic operation is to be performed. + * Must not be a VLA or an array of unknown size. + * + * @param type + * The non-atomic counterpart of 'atomic_type'. This must have the same size + * as 'atomic_type' and must not have a stricter alignment. + * + * @param fn_name + * The name of the function to be defined. + * + * @param vis_p + * Either the macro 'SHOW_P' if the function should be defined as taking a + * memory order parameter (a.k.a. explicit), or the macro 'HIDE_P' if it + * should not (a.k.a. implicit). + * + * @param order + * The literal token 'order' if 'vis_p' is 'SHOW_P', otherwise the desired + * memory order to be used implicitly by the atomic operation. + * + * @param do_atomic_void_noarg_explicit + * A macro, M, callable as 'M(type, obj, order);' in block scope. + * + * The arguments provided are: + * - 'type' : forwarded directly from the 'type' argument in this macro + * - 'obj' : an expression of type 'volatile atomic_type *' + * - 'order' : an expression of type 'int' whose value is a valid memory order + * + * The expected behaviour of calling the macro M as above is: + * - the value of the object pointed to by 'obj' is read and modified in a + * single atomic operation + * - the atomic operation uses a memory ordering at least as strong as + * 'order' + */ +#define PATOMIC_WRAPPED_DIRECT_DEFINE_OP_VOID_NOARG( \ + atomic_type, type, fn_name, vis_p, order, \ + do_atomic_void_noarg_explicit \ +) \ + static void \ + fn_name( \ + volatile void *const obj \ + vis_p(_,const int order) \ + ) \ + { \ + /* static assertions */ \ + PATOMIC_STATIC_ASSERT( \ + sizeof_type_eq_atype, sizeof(type) == sizeof(atomic_type)); \ + \ + /* assertions */ \ + PATOMIC_WRAPPED_DO_ASSERT(obj != NULL); \ + PATOMIC_WRAPPED_DO_ASSERT_ALIGNED(obj, atomic_type); \ + PATOMIC_WRAPPED_DO_ASSERT(PATOMIC_IS_VALID_ORDER((int) order)); \ + \ + /* operation */ \ + do_atomic_void_noarg_explicit( \ + type, \ + (volatile atomic_type *) obj, \ + (int) order \ + ); \ + } + + +#endif /* PATOMIC_WRAPPED_DIRECT_H */ diff --git a/src/patomic.c b/src/patomic.c index dd6c71c90..7ef47b1aa 100644 --- a/src/patomic.c +++ b/src/patomic.c @@ -43,7 +43,7 @@ patomic_t patomic_create( const size_t byte_width, const patomic_memory_order_t order, - const unsigned int opts, + const unsigned int options, const unsigned int kinds, const unsigned long ids ) @@ -63,7 +63,7 @@ patomic_create( ((unsigned int) patomic_impl_register[i].kind & kinds)) { /* only add to array if some operation is supported */ - *end = patomic_impl_register[i].fp_create(byte_width, order, opts); + *end = patomic_impl_register[i].fp_create(byte_width, order, options); if (opcats != patomic_internal_feature_check_any(&end->ops, opcats)) { ++end; @@ -80,7 +80,7 @@ patomic_create( ); /* combine implementations */ - ret = patomic_impl_create_null(byte_width, order, opts); + ret = patomic_impl_create_null(byte_width, order, options); for (; begin != end; ++begin) { patomic_internal_combine(&ret, begin); @@ -93,7 +93,7 @@ patomic_create( patomic_explicit_t patomic_create_explicit( const size_t byte_width, - const unsigned int opts, + const unsigned int options, const unsigned int kinds, const unsigned long ids ) @@ -113,7 +113,7 @@ patomic_create_explicit( ((unsigned int) patomic_impl_register[i].kind & kinds)) { /* only add to array if some operation is supported */ - *end = patomic_impl_register[i].fp_create_explicit(byte_width, opts); + *end = patomic_impl_register[i].fp_create_explicit(byte_width, options); if (opcats != patomic_internal_feature_check_any_explicit(&end->ops, opcats)) { ++end; @@ -130,7 +130,7 @@ patomic_create_explicit( ); /* combine implementations */ - ret = patomic_impl_create_explicit_null(byte_width, opts); + ret = patomic_impl_create_explicit_null(byte_width, options); for (; begin != end; ++begin) { patomic_internal_combine_explicit(&ret, begin); @@ -142,7 +142,7 @@ patomic_create_explicit( patomic_transaction_t patomic_create_transaction( - const unsigned int opts, + const unsigned int options, const unsigned int kinds, const unsigned long ids ) @@ -163,7 +163,7 @@ patomic_create_transaction( ((unsigned int) patomic_impl_register[i].kind & kinds)) { /* only add to array if some operation is supported */ - ret = patomic_impl_register[i].fp_create_transaction(opts); + ret = patomic_impl_register[i].fp_create_transaction(options); if(opcats != patomic_internal_feature_check_any_transaction(&ret.ops, opcats)) { /* ignore previous implementations if current one has a better kind */ @@ -184,6 +184,6 @@ patomic_create_transaction( } else { - return patomic_impl_create_transaction_null(opts); + return patomic_impl_create_transaction_null(options); } } diff --git a/src/stdlib/CMakeLists.txt b/src/stdlib/CMakeLists.txt index a051df5fe..e8f07b305 100644 --- a/src/stdlib/CMakeLists.txt +++ b/src/stdlib/CMakeLists.txt @@ -3,4 +3,6 @@ target_sources(${target_name} PRIVATE abort.c assert.c sort.c + stdalign.c + string.c ) diff --git a/src/stdlib/assert.c b/src/stdlib/assert.c index 82ed5d9fb..4844dea8a 100644 --- a/src/stdlib/assert.c +++ b/src/stdlib/assert.c @@ -26,9 +26,9 @@ static void patomic_assert_fxprint( FILE *stream, - const char *expr, - const char *file, - const char *func, + const char *const expr, + const char *const file, + const char *const func, const unsigned int line ) { @@ -66,9 +66,9 @@ patomic_assert_fxprint( void __patomic_assert_fail( - const char *expr, - const char *file, - const char *func, + const char *const expr, + const char *const file, + const char *const func, const unsigned int line ) { diff --git a/src/stdlib/sort.c b/src/stdlib/sort.c index 32d341660..b63b758b9 100644 --- a/src/stdlib/sort.c +++ b/src/stdlib/sort.c @@ -3,10 +3,10 @@ void patomic_array_sort( - void *ptr, - size_t count, - size_t size, - int (*comp)(const void*, const void*) + void *const ptr, + const size_t count, + const size_t size, + int (*const comp)(const void*, const void*) ) { qsort(ptr, count, size, comp); diff --git a/src/stdlib/stdalign.c b/src/stdlib/stdalign.c new file mode 100644 index 000000000..df0dae7f4 --- /dev/null +++ b/src/stdlib/stdalign.c @@ -0,0 +1,31 @@ +#include +#include + + +size_t +patomic_alignment_from_size( + const size_t size +) +{ + /* early check for zero size */ + if (size == 0) + { + return 0; + } + + /* return largest power of 2 which divides size */ + return size & -size; +} + + +int +patomic_is_aligned( + const volatile void *const ptr, + const size_t alignment +) +{ + /* check that our address is a multiple of the alignment */ + patomic_intptr_unsigned_t address = (patomic_intptr_unsigned_t) ptr; + address &= (alignment - 1u); + return address == 0; +} diff --git a/src/stdlib/string.c b/src/stdlib/string.c new file mode 100644 index 000000000..78cf5e734 --- /dev/null +++ b/src/stdlib/string.c @@ -0,0 +1,14 @@ +#include + +#include + + +void * +patomic_memcpy( + void *const PATOMIC_RESTRICT dest, + const void *const PATOMIC_RESTRICT src, + const size_t count +) +{ + return memcpy(dest, src, count); +}