From 20793fd6c61eda808fed607525e18893200f7802 Mon Sep 17 00:00:00 2001 From: Marian Buschsieweke Date: Tue, 4 Jan 2022 22:00:50 +0100 Subject: [PATCH 1/2] sys/atomic_utils: Add load/store for kernel PIDs This will make it easier to change the width of kernel_pid_t if needed without breaking code. --- sys/include/atomic_utils.h | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/sys/include/atomic_utils.h b/sys/include/atomic_utils.h index 50dbaef2088c..16d4d2b46767 100644 --- a/sys/include/atomic_utils.h +++ b/sys/include/atomic_utils.h @@ -139,6 +139,8 @@ #include #include "irq.h" +#include "sched.h" + #include "atomic_utils_arch.h" #ifdef __cplusplus @@ -263,6 +265,16 @@ static inline uintptr_t atomic_load_uintptr(const volatile uintptr_t *var) { static inline void * atomic_load_ptr(void **ptr_addr) { return (void *)atomic_load_uintptr((const volatile uintptr_t *)ptr_addr); } +/** + * @brief Load an `kernel_pid_t` atomically + * + * @param[in] var Variable to load atomically + * @return The value stored in @p var + */ +static inline kernel_pid_t atomic_load_kernel_pid(const volatile kernel_pid_t *var) +{ + return atomic_load_u16((const volatile uint16_t *)var); +} /** @} */ /** @@ -321,6 +333,17 @@ static inline void atomic_store_uintptr(volatile uintptr_t *dest, uintptr_t val) static inline void atomic_store_ptr(void **dest, const void *val) { atomic_store_uintptr((volatile uintptr_t *)dest, (uintptr_t)val); } +/** + * @brief Store an `kernel_pid_t` atomically + * + * @param[out] dest Location to atomically write the new value to + * @param[in] val Value to write + */ +static inline void atomic_store_kernel_pid(volatile kernel_pid_t *dest, + kernel_pid_t val) +{ + atomic_store_u16((volatile uint16_t *)dest, (uint16_t)val); +} /** @} */ /** From b6b7065ddccd657f9568d5fb14c63c06223b1d2f Mon Sep 17 00:00:00 2001 From: Marian Buschsieweke Date: Thu, 30 Sep 2021 10:57:57 +0200 Subject: [PATCH 2/2] core/rmutex: use atomic utils Replace use of C11 atomics with atomic utils. This fixes > error: address argument to atomic operation must be a pointer to a > trivially-copyable type ('_Atomic(int) *' invalid) error when compiling on AVR with LLVM. --- core/include/rmutex.h | 9 ++------- core/rmutex.c | 15 +++++++-------- sys/xtimer/xtimer.c | 4 ++-- sys/ztimer/util.c | 4 ++-- sys/ztimer64/util.c | 4 ++-- tests/xtimer_rmutex_lock_timeout/main.c | 23 ++++++++++------------- tests/ztimer_rmutex_lock_timeout/main.c | 23 ++++++++++------------- 7 files changed, 35 insertions(+), 47 deletions(-) diff --git a/core/include/rmutex.h b/core/include/rmutex.h index 3fc6585782ef..7a3daa7146fa 100644 --- a/core/include/rmutex.h +++ b/core/include/rmutex.h @@ -24,11 +24,6 @@ #define RMUTEX_H #include -#ifdef __cplusplus -#include "c11_atomics_compat.hpp" -#else -#include -#endif #include "mutex.h" #include "sched.h" @@ -62,14 +57,14 @@ typedef struct rmutex_t { * atomic_int_least16_t is used. Note @ref kernel_pid_t is an int16 * @internal */ - atomic_int_least16_t owner; + kernel_pid_t owner; } rmutex_t; /** * @brief Static initializer for rmutex_t. * @details This initializer is preferable to rmutex_init(). */ -#define RMUTEX_INIT { MUTEX_INIT, 0, ATOMIC_VAR_INIT(KERNEL_PID_UNDEF) } +#define RMUTEX_INIT { MUTEX_INIT, 0, KERNEL_PID_UNDEF } /** * @brief Initializes a recursive mutex object. diff --git a/core/rmutex.c b/core/rmutex.c index f967049258c2..4eebca0bb479 100644 --- a/core/rmutex.c +++ b/core/rmutex.c @@ -24,9 +24,10 @@ #include #include +#include "assert.h" +#include "atomic_utils.h" #include "rmutex.h" #include "thread.h" -#include "assert.h" #define ENABLE_DEBUG 0 #include "debug.h" @@ -78,7 +79,7 @@ static int _lock(rmutex_t *rmutex, int trylock) */ /* ensure that owner is read atomically, since I need a consistent value */ - owner = atomic_load_explicit(&rmutex->owner, memory_order_relaxed); + owner = atomic_load_kernel_pid(&rmutex->owner); DEBUG("rmutex %" PRIi16 " : mutex held by %" PRIi16 " \n", thread_getpid(), owner); @@ -104,8 +105,7 @@ static int _lock(rmutex_t *rmutex, int trylock) DEBUG("rmutex %" PRIi16 " : setting the owner\n", thread_getpid()); /* ensure that owner is written atomically, since others need a consistent value */ - atomic_store_explicit(&rmutex->owner, thread_getpid(), - memory_order_relaxed); + atomic_store_kernel_pid(&rmutex->owner, thread_getpid()); DEBUG("rmutex %" PRIi16 " : increasing refs\n", thread_getpid()); @@ -127,8 +127,8 @@ int rmutex_trylock(rmutex_t *rmutex) void rmutex_unlock(rmutex_t *rmutex) { - assert(atomic_load_explicit(&rmutex->owner, - memory_order_relaxed) == thread_getpid()); + /* ensure that owner is read atomically, since I need a consistent value */ + assert(atomic_load_kernel_pid(&rmutex->owner) == thread_getpid()); assert(rmutex->refcount > 0); DEBUG("rmutex %" PRIi16 " : decrementing refs refs\n", thread_getpid()); @@ -143,8 +143,7 @@ void rmutex_unlock(rmutex_t *rmutex) DEBUG("rmutex %" PRIi16 " : resetting owner\n", thread_getpid()); /* ensure that owner is written only once */ - atomic_store_explicit(&rmutex->owner, KERNEL_PID_UNDEF, - memory_order_relaxed); + atomic_store_kernel_pid(&rmutex->owner, KERNEL_PID_UNDEF); DEBUG("rmutex %" PRIi16 " : releasing mutex\n", thread_getpid()); diff --git a/sys/xtimer/xtimer.c b/sys/xtimer/xtimer.c index 3b3541bdfb5c..0381bd368ff7 100644 --- a/sys/xtimer/xtimer.c +++ b/sys/xtimer/xtimer.c @@ -23,6 +23,7 @@ #include #include "xtimer.h" +#include "atomic_utils.h" #include "msg.h" #include "mutex.h" #include "rmutex.h" @@ -224,8 +225,7 @@ int xtimer_rmutex_lock_timeout(rmutex_t *rmutex, uint64_t timeout) return 0; } if (xtimer_mutex_lock_timeout(&rmutex->mutex, timeout) == 0) { - atomic_store_explicit(&rmutex->owner, - thread_getpid(), memory_order_relaxed); + atomic_store_kernel_pid(&rmutex->owner, thread_getpid()); rmutex->refcount++; return 0; } diff --git a/sys/ztimer/util.c b/sys/ztimer/util.c index 822b82a5be0b..be0716422b56 100644 --- a/sys/ztimer/util.c +++ b/sys/ztimer/util.c @@ -23,6 +23,7 @@ #include #include +#include "atomic_utils.h" #include "irq.h" #include "mutex.h" #include "rmutex.h" @@ -190,8 +191,7 @@ int ztimer_rmutex_lock_timeout(ztimer_clock_t *clock, rmutex_t *rmutex, return 0; } if (ztimer_mutex_lock_timeout(clock, &rmutex->mutex, timeout) == 0) { - atomic_store_explicit(&rmutex->owner, - thread_getpid(), memory_order_relaxed); + atomic_store_kernel_pid(&rmutex->owner, thread_getpid()); rmutex->refcount++; return 0; } diff --git a/sys/ztimer64/util.c b/sys/ztimer64/util.c index 3e36accde5b7..1d1e8ff5db5d 100644 --- a/sys/ztimer64/util.c +++ b/sys/ztimer64/util.c @@ -23,6 +23,7 @@ #include #include +#include "atomic_utils.h" #include "irq.h" #include "mutex.h" #include "rmutex.h" @@ -183,8 +184,7 @@ int ztimer64_rmutex_lock_until(ztimer64_clock_t *clock, rmutex_t *rmutex, return 0; } if (ztimer64_mutex_lock_until(clock, &rmutex->mutex, target) == 0) { - atomic_store_explicit(&rmutex->owner, - thread_getpid(), memory_order_relaxed); + atomic_store_kernel_pid(&rmutex->owner, thread_getpid()); rmutex->refcount++; return 0; } diff --git a/tests/xtimer_rmutex_lock_timeout/main.c b/tests/xtimer_rmutex_lock_timeout/main.c index 8cac5b0fda0b..35ad64e51e9f 100644 --- a/tests/xtimer_rmutex_lock_timeout/main.c +++ b/tests/xtimer_rmutex_lock_timeout/main.c @@ -20,11 +20,13 @@ #include #include + +#include "atomic_utils.h" +#include "irq.h" +#include "msg.h" #include "shell.h" -#include "xtimer.h" #include "thread.h" -#include "msg.h" -#include "irq.h" +#include "xtimer.h" /* XTIMER_SHIFT can be undefined when using xtimer_on_ztimer on boards * incompatible with xtimers tick conversion, e.g. the waspmote-pro @@ -155,8 +157,7 @@ static int cmd_test_xtimer_rmutex_lock_timeout_long_unlocked(int argc, if (xtimer_rmutex_lock_timeout(&test_rmutex, LONG_RMUTEX_TIMEOUT) == 0) { /* rmutex has to be locked once */ - if (atomic_load_explicit(&test_rmutex.owner, - memory_order_relaxed) == thread_getpid() && + if (atomic_load_kernel_pid(&test_rmutex.owner) == thread_getpid() && test_rmutex.refcount == 1 && mutex_trylock(&test_rmutex.mutex) == 0) { puts("OK"); @@ -209,8 +210,7 @@ static int cmd_test_xtimer_rmutex_lock_timeout_long_locked(int argc, } else { /* rmutex has to be locked once */ - if (atomic_load_explicit(&test_rmutex.owner, - memory_order_relaxed) == second_t_pid && + if (atomic_load_kernel_pid(&test_rmutex.owner) == second_t_pid && test_rmutex.refcount == 1 && mutex_trylock(&test_rmutex.mutex) == 0) { puts("OK"); @@ -261,8 +261,7 @@ static int cmd_test_xtimer_rmutex_lock_timeout_low_prio_thread( if (xtimer_rmutex_lock_timeout(&test_rmutex, LONG_RMUTEX_TIMEOUT) == 0) { /* rmutex has to be locked once */ - if (atomic_load_explicit(&test_rmutex.owner, - memory_order_relaxed) == thread_getpid() && + if (atomic_load_kernel_pid(&test_rmutex.owner) == thread_getpid() && test_rmutex.refcount == 1 && mutex_trylock(&test_rmutex.mutex) == 0) { puts("OK"); @@ -317,8 +316,7 @@ static int cmd_test_xtimer_rmutex_lock_timeout_short_locked(int argc, } else { /* rmutex has to be locked once */ - if (atomic_load_explicit(&test_rmutex.owner, - memory_order_relaxed) == second_t_pid && + if (atomic_load_kernel_pid(&test_rmutex.owner) == second_t_pid && test_rmutex.refcount == 1 && mutex_trylock(&test_rmutex.mutex) == 0) { puts("OK"); @@ -355,8 +353,7 @@ static int cmd_test_xtimer_rmutex_lock_timeout_short_unlocked(int argc, if (xtimer_rmutex_lock_timeout(&test_rmutex, SHORT_RMUTEX_TIMEOUT) == 0) { /* rmutex has to be locked once */ - if (atomic_load_explicit(&test_rmutex.owner, - memory_order_relaxed) == thread_getpid() && + if (atomic_load_kernel_pid(&test_rmutex.owner) == thread_getpid() && test_rmutex.refcount == 1 && mutex_trylock(&test_rmutex.mutex) == 0) { puts("OK"); diff --git a/tests/ztimer_rmutex_lock_timeout/main.c b/tests/ztimer_rmutex_lock_timeout/main.c index 48c6bf49d726..fb063c53e48d 100644 --- a/tests/ztimer_rmutex_lock_timeout/main.c +++ b/tests/ztimer_rmutex_lock_timeout/main.c @@ -20,11 +20,13 @@ #include #include + +#include "atomic_utils.h" +#include "irq.h" +#include "msg.h" #include "shell.h" -#include "ztimer.h" #include "thread.h" -#include "msg.h" -#include "irq.h" +#include "ztimer.h" /* timeout at one millisecond (1000 us) to make sure it does not spin. */ #define LONG_RMUTEX_TIMEOUT 1000 @@ -148,8 +150,7 @@ static int cmd_test_ztimer_rmutex_lock_timeout_long_unlocked(int argc, if (ztimer_rmutex_lock_timeout(ZTIMER_USEC, &test_rmutex, LONG_RMUTEX_TIMEOUT) == 0) { /* rmutex has to be locked once */ - if (atomic_load_explicit(&test_rmutex.owner, - memory_order_relaxed) == thread_getpid() && + if (atomic_load_kernel_pid(&test_rmutex.owner) == thread_getpid() && test_rmutex.refcount == 1 && mutex_trylock(&test_rmutex.mutex) == 0) { puts("OK"); @@ -202,8 +203,7 @@ static int cmd_test_ztimer_rmutex_lock_timeout_long_locked(int argc, } else { /* rmutex has to be locked once */ - if (atomic_load_explicit(&test_rmutex.owner, - memory_order_relaxed) == second_t_pid && + if (atomic_load_kernel_pid(&test_rmutex.owner) == second_t_pid && test_rmutex.refcount == 1 && mutex_trylock(&test_rmutex.mutex) == 0) { puts("OK"); @@ -254,8 +254,7 @@ static int cmd_test_ztimer_rmutex_lock_timeout_low_prio_thread( if (ztimer_rmutex_lock_timeout(ZTIMER_USEC, &test_rmutex, LONG_RMUTEX_TIMEOUT) == 0) { /* rmutex has to be locked once */ - if (atomic_load_explicit(&test_rmutex.owner, - memory_order_relaxed) == thread_getpid() && + if (atomic_load_kernel_pid(&test_rmutex.owner) == thread_getpid() && test_rmutex.refcount == 1 && mutex_trylock(&test_rmutex.mutex) == 0) { puts("OK"); @@ -310,8 +309,7 @@ static int cmd_test_ztimer_rmutex_lock_timeout_short_locked(int argc, } else { /* rmutex has to be locked once */ - if (atomic_load_explicit(&test_rmutex.owner, - memory_order_relaxed) == second_t_pid && + if (atomic_load_kernel_pid(&test_rmutex.owner) == second_t_pid && test_rmutex.refcount == 1 && mutex_trylock(&test_rmutex.mutex) == 0) { puts("OK"); @@ -348,8 +346,7 @@ static int cmd_test_ztimer_rmutex_lock_timeout_short_unlocked(int argc, if (ztimer_rmutex_lock_timeout(ZTIMER_USEC, &test_rmutex, SHORT_RMUTEX_TIMEOUT) == 0) { /* rmutex has to be locked once */ - if (atomic_load_explicit(&test_rmutex.owner, - memory_order_relaxed) == thread_getpid() && + if (atomic_load_kernel_pid(&test_rmutex.owner) == thread_getpid() && test_rmutex.refcount == 1 && mutex_trylock(&test_rmutex.mutex) == 0) { puts("OK");