diff --git a/tests/sys_atomic_utils/Makefile b/tests/sys_atomic_utils/Makefile new file mode 100644 index 000000000000..f23bc3972ac0 --- /dev/null +++ b/tests/sys_atomic_utils/Makefile @@ -0,0 +1,9 @@ +include ../Makefile.tests_common + +USEMODULE += atomic_utils +USEMODULE += fmt +USEMODULE += random +USEMODULE += shell +USEMODULE += xtimer + +include $(RIOTBASE)/Makefile.include diff --git a/tests/sys_atomic_utils/Makefile.ci b/tests/sys_atomic_utils/Makefile.ci new file mode 100644 index 000000000000..ffa12570d151 --- /dev/null +++ b/tests/sys_atomic_utils/Makefile.ci @@ -0,0 +1,10 @@ +BOARD_INSUFFICIENT_MEMORY := \ + arduino-duemilanove \ + arduino-leonardo \ + arduino-nano \ + arduino-uno \ + atmega328p \ + nucleo-f031k6 \ + nucleo-l011k4 \ + stm32f030f4-demo \ + # diff --git a/tests/sys_atomic_utils/README.md b/tests/sys_atomic_utils/README.md new file mode 100644 index 000000000000..98cf1c863a7e --- /dev/null +++ b/tests/sys_atomic_utils/README.md @@ -0,0 +1,87 @@ +# Test Application for `sys/atomic_utils` + +## Design of the Test + +This test application will launch one worker and one tester thread. The +worker thread will perform a specific operation (such as +`atomic_fetch_add_u32()`) over and over again on a single target variable, and +the tester thread occasionally interrupts to verify the target variable's value +is valid. If the variable has corrupted, this is reported. + +This test works only *statistically*. Absence of detected corruption does not +guarantee, that a specific function indeed works correctly. (But a detected +corruptions guarantees that something is indeed broken.) However, the longer +the tests runs the higher the odds are that a malfunctioning implementation is +indeed caught corrupting memory. + +## Types of Corruptions Tested For + +### Lost Update + +In the lost update the worker thread will perform an operation that has no +effect (addition, subtraction, binary or, and binary xor with `0` as +second parameter, or binary and with `0xff...`). The checker thread will +atomically increment the target value. If in the next iteration the of the +checker thread the value has changed, a corruption is detected. This could +happen e.g. by + +``` +Worker Thread | Checker Thread | Value of t + | | + | t++ | 1 +reg1 = t | | 1 +reg1 += 0 | | 1 + | t++ | 2 +t = reg1 | | 1 +``` + +Here, the read-modify-write sequence (`reg1 = t; reg1 += 0; t = reg1;`) has +been interrupted by the Checker Thread. The update of `t` (the atomic `t++` +operation) is afterwards lost, when the Worker Thread writes `reg1` into +`t`. Such a lost update proves that the read-modify-write operation was not +atomic. + +Note: Only the `atomic__u` family of functions must pass this test. + A failure for the other families does ***not*** indicate an issue. + +### Store Tearing + +In the tearing test the worker thread will first initialize the target variable, +e.g. with zero. Then, a sequence of read-modify-write operations is performed, +e.g. 3 times `atomic_fetch_add_u16(&target, 0x5555)`. During this sequence, only +the target variable should contain on of the following values: +`0x0000`, `0x5555`, `0xaaaa`, and `0xffff`. + +After each sequence is complete, the target variable will be atomically +re-initialized and the next sequence starts. If e.g. on AVR the write is +interrupted after only one byte is written (AVR is an 8-bit platform and only +can write 8 bits per store), e.g. a value of `0x55aa` or `0xaa55` might be +stored in the target variable. If such an value is observed, an atomic store +operation was torn apart into two parts and a memory corruption was detected. + +Note: Both the `atomic__u` and `semi_atomic__u` families + of functions must pass this test. A failure of the + `volatile__u` family of functions does ***not*** indicate an + issue. + +## Usage + +The test will drop you into a shell. The welcome message and the help command +contain all information on how to use the test. In addition, `make test` will +run all tests that are expected to pass for one second each. This is hopefully +long enough to detect any issues. It is certainly not possible to run the test +longer in automated tests. + +## Test Self Check + +The test brings an alternative implementation of the `atomic__u` +family of functions called `volatile__u`. This implementation +incorrectly assumes that `volatile` provides atomic access. Thus, checking +this implementation should result in failures: + +- The lost update test is expected to (eventually) fail for every platform +- The tearing test is expected for width bigger than the word size + - Cortex-M7 is one exception: Due to instruction fusion two 32 bit writes + can be issued in one CPU cycle, so that a 64 bit write can indeed be + atomic for this platform. Thus, it could happen that no tearing failure + is detected for the `volatile` implementation on Cortex-M7 at all. diff --git a/tests/sys_atomic_utils/main.c b/tests/sys_atomic_utils/main.c new file mode 100644 index 000000000000..3120973346b6 --- /dev/null +++ b/tests/sys_atomic_utils/main.c @@ -0,0 +1,1361 @@ +/* + * Copyright (C) 2020 Otto-von-Guericke-Universität Magdeburg + * + * This file is subject to the terms and conditions of the GNU Lesser + * General Public License v2.1. See the file LICENSE in the top level + * directory for more details. + */ + +/** + * @ingroup tests + * @{ + * + * @file + * @brief Atomic util benchmark + * + * @author Marian Buschsieweke + * + * @} + */ + +#include +#include +#include +#include + +#include "atomic_utils.h" +#include "fmt.h" +#include "mutex.h" +#include "random.h" +#include "shell.h" +#include "thread.h" +#include "xtimer.h" + +#include "volatile_utils.h" + +typedef enum { + TEST_TYPE_TEARING, + TEST_TYPE_LOST_UPDATE, + TEST_TYPE_NUMOF +} test_type_t; + +typedef enum { + TEST_WIDTH_8_BIT, + TEST_WIDTH_16_BIT, + TEST_WIDTH_32_BIT, + TEST_WIDTH_64_BIT, + TEST_WIDTH_NUMOF +} test_width_t; + +typedef void (*fetch_op_u8_t)(uint8_t *dest, uint8_t val); +typedef void (*fetch_op_u16_t)(uint16_t *dest, uint16_t val); +typedef void (*fetch_op_u32_t)(uint32_t *dest, uint32_t val); +typedef void (*fetch_op_u64_t)(uint64_t *dest, uint64_t val); + +typedef struct { + const char *name; + fetch_op_u8_t op; + uint8_t operand; + uint8_t noop_operand; + uint8_t init; + uint8_t allowed[4]; + uint8_t allowed_numof; + uint8_t reinit_every; +} fetch_op_test_u8_t; + +typedef struct { + const char *name; + fetch_op_u16_t op; + uint16_t operand; + uint16_t noop_operand; + uint16_t init; + uint16_t allowed[4]; + uint8_t allowed_numof; + uint8_t reinit_every; +} fetch_op_test_u16_t; + +typedef struct { + const char *name; + fetch_op_u32_t op; + uint32_t operand; + uint32_t noop_operand; + uint32_t init; + uint32_t allowed[4]; + uint8_t allowed_numof; + uint8_t reinit_every; +} fetch_op_test_u32_t; + +typedef struct { + const char *name; + fetch_op_u64_t op; + uint64_t operand; + uint64_t noop_operand; + uint64_t init; + uint64_t allowed[4]; + uint8_t allowed_numof; + uint8_t reinit_every; +} fetch_op_test_u64_t; + +typedef struct { + test_type_t type; + test_width_t width; + unsigned idx; +} test_conf_t; + +typedef struct { + test_conf_t conf; + uint8_t counter; +} test_state_t; + +static const fetch_op_test_u8_t fetch_op_tests_u8[] = { + /* atomic_*() */ + { + .name = "atomic_fetch_add_u8", + .op = atomic_fetch_add_u8, + .operand = 0x55, + .noop_operand = 0, + .allowed = { + 0x00, 0x55, + 0xaa, 0xff + }, + .allowed_numof = 4, + .init = 0x00, + .reinit_every = 3, + }, + { + .name = "atomic_fetch_sub_u8", + .op = atomic_fetch_sub_u8, + .operand = 0x55, + .noop_operand = 0, + .allowed = { + 0x00, 0x55, + 0xaa, 0xff + }, + .allowed_numof = 4, + .init = 0xff, + .reinit_every = 3, + }, + { + .name = "atomic_fetch_or_u8", + .op = atomic_fetch_or_u8, + .operand = 0x55, + .noop_operand = 0, + .allowed = { 0x00, 0x55 }, + .allowed_numof = 2, + .init = 0x00, + .reinit_every = 1, + }, + { + .name = "atomic_fetch_xor_u8", + .op = atomic_fetch_xor_u8, + .operand = 0x55, + .noop_operand = 0, + .allowed = { 0x00, 0x55 }, + .allowed_numof = 2, + .init = 0x00, + }, + { + .name = "atomic_fetch_and_u8", + .op = atomic_fetch_and_u8, + .operand = 0x55, + .noop_operand = 0xff, + .allowed = { 0xff, 0x55 }, + .allowed_numof = 2, + .init = 0xff, + .reinit_every = 1, + }, + /* semi_atomic_*() */ + { + .name = "semi_atomic_fetch_add_u8", + .op = semi_atomic_fetch_add_u8, + .operand = 0x55, + .noop_operand = 0, + .allowed = { + 0x00, 0x55, + 0xaa, 0xff + }, + .allowed_numof = 4, + .init = 0x00, + .reinit_every = 3, + }, + { + .name = "semi_atomic_fetch_sub_u8", + .op = semi_atomic_fetch_sub_u8, + .operand = 0x55, + .noop_operand = 0, + .allowed = { + 0x00, 0x55, + 0xaa, 0xff + }, + .allowed_numof = 4, + .init = 0xff, + .reinit_every = 3, + }, + { + .name = "semi_atomic_fetch_or_u8", + .op = semi_atomic_fetch_or_u8, + .operand = 0x55, + .noop_operand = 0, + .allowed = { 0x00, 0x55 }, + .allowed_numof = 2, + .init = 0x00, + .reinit_every = 1, + }, + { + .name = "semi_atomic_fetch_xor_u8", + .op = semi_atomic_fetch_xor_u8, + .operand = 0x55, + .noop_operand = 0, + .allowed = { 0x00, 0x55 }, + .allowed_numof = 2, + .init = 0x00, + }, + { + .name = "semi_atomic_fetch_and_u8", + .op = semi_atomic_fetch_and_u8, + .operand = 0x55, + .noop_operand = 0xff, + .allowed = { 0xff, 0x55 }, + .allowed_numof = 2, + .init = 0xff, + .reinit_every = 1, + }, + /* volatile_*() */ + { + .name = "volatile_fetch_add_u8", + .op = volatile_fetch_add_u8, + .operand = 0x55, + .noop_operand = 0, + .allowed = { + 0x00, 0x55, + 0xaa, 0xff + }, + .allowed_numof = 4, + .init = 0x00, + .reinit_every = 3, + }, + { + .name = "volatile_fetch_sub_u8", + .op = volatile_fetch_sub_u8, + .operand = 0x55, + .noop_operand = 0, + .allowed = { + 0x00, 0x55, + 0xaa, 0xff + }, + .allowed_numof = 4, + .init = 0xff, + .reinit_every = 3, + }, + { + .name = "volatile_fetch_or_u8", + .op = volatile_fetch_or_u8, + .operand = 0x55, + .noop_operand = 0, + .allowed = { 0x00, 0x55 }, + .allowed_numof = 2, + .init = 0x00, + .reinit_every = 1, + }, + { + .name = "volatile_fetch_xor_u8", + .op = volatile_fetch_xor_u8, + .operand = 0x55, + .noop_operand = 0, + .allowed = { 0x00, 0x55 }, + .allowed_numof = 2, + .init = 0x00, + }, + { + .name = "volatile_fetch_and_u8", + .op = volatile_fetch_and_u8, + .operand = 0x55, + .noop_operand = 0xff, + .allowed = { 0xff, 0x55 }, + .allowed_numof = 2, + .init = 0xff, + .reinit_every = 1, + }, +}; + +static const fetch_op_test_u16_t fetch_op_tests_u16[] = { + /* atomic_*() */ + { + .name = "atomic_fetch_add_u16", + .op = atomic_fetch_add_u16, + .operand = 0x5555, + .noop_operand = 0, + .allowed = { + 0x0000, 0x5555, + 0xaaaa, 0xffff + }, + .allowed_numof = 4, + .init = 0x0000, + .reinit_every = 3, + }, + { + .name = "atomic_fetch_sub_u16", + .op = atomic_fetch_sub_u16, + .operand = 0x5555, + .noop_operand = 0, + .allowed = { + 0x0000, 0x5555, + 0xaaaa, 0xffff + }, + .allowed_numof = 4, + .init = 0xffff, + .reinit_every = 3, + }, + { + .name = "atomic_fetch_or_u16", + .op = atomic_fetch_or_u16, + .operand = 0x5555, + .noop_operand = 0, + .allowed = { 0x0000, 0x5555 }, + .allowed_numof = 2, + .init = 0x0000, + .reinit_every = 1, + }, + { + .name = "atomic_fetch_xor_u16", + .op = atomic_fetch_xor_u16, + .operand = 0x5555, + .noop_operand = 0, + .allowed = { 0x0000, 0x5555 }, + .allowed_numof = 2, + .init = 0x0000, + }, + { + .name = "atomic_fetch_and_u16", + .op = atomic_fetch_and_u16, + .operand = 0x5555, + .noop_operand = 0xffff, + .allowed = { 0xffff, 0x5555 }, + .allowed_numof = 2, + .init = 0xffff, + .reinit_every = 1, + }, + /* semi_atomic_*() */ + { + .name = "semi_atomic_fetch_add_u16", + .op = semi_atomic_fetch_add_u16, + .operand = 0x5555, + .noop_operand = 0, + .allowed = { + 0x0000, 0x5555, + 0xaaaa, 0xffff + }, + .allowed_numof = 4, + .init = 0x0000, + .reinit_every = 3, + }, + { + .name = "semi_atomic_fetch_sub_u16", + .op = semi_atomic_fetch_sub_u16, + .operand = 0x5555, + .noop_operand = 0, + .allowed = { + 0x0000, 0x5555, + 0xaaaa, 0xffff + }, + .allowed_numof = 4, + .init = 0xffff, + .reinit_every = 3, + }, + { + .name = "semi_atomic_fetch_or_u16", + .op = semi_atomic_fetch_or_u16, + .operand = 0x5555, + .noop_operand = 0, + .allowed = { 0x0000, 0x5555 }, + .allowed_numof = 2, + .init = 0x0000, + .reinit_every = 1, + }, + { + .name = "semi_atomic_fetch_xor_u16", + .op = semi_atomic_fetch_xor_u16, + .operand = 0x5555, + .noop_operand = 0, + .allowed = { 0x0000, 0x5555 }, + .allowed_numof = 2, + .init = 0x0000, + }, + { + .name = "semi_atomic_fetch_and_u16", + .op = semi_atomic_fetch_and_u16, + .operand = 0x5555, + .noop_operand = 0xffff, + .allowed = { 0xffff, 0x5555 }, + .allowed_numof = 2, + .init = 0xffff, + .reinit_every = 1, + }, + /* volatile_*() */ + { + .name = "volatile_fetch_add_u16", + .op = volatile_fetch_add_u16, + .operand = 0x5555, + .noop_operand = 0, + .allowed = { + 0x0000, 0x5555, + 0xaaaa, 0xffff + }, + .allowed_numof = 4, + .init = 0x0000, + .reinit_every = 3, + }, + { + .name = "volatile_fetch_sub_u16", + .op = volatile_fetch_sub_u16, + .operand = 0x5555, + .noop_operand = 0, + .allowed = { + 0x0000, 0x5555, + 0xaaaa, 0xffff + }, + .allowed_numof = 4, + .init = 0xffff, + .reinit_every = 3, + }, + { + .name = "volatile_fetch_or_u16", + .op = volatile_fetch_or_u16, + .operand = 0x5555, + .noop_operand = 0, + .allowed = { 0x0000, 0x5555 }, + .allowed_numof = 2, + .init = 0x0000, + .reinit_every = 1, + }, + { + .name = "volatile_fetch_xor_u16", + .op = volatile_fetch_xor_u16, + .operand = 0x5555, + .noop_operand = 0, + .allowed = { 0x0000, 0x5555 }, + .allowed_numof = 2, + .init = 0x0000, + }, + { + .name = "volatile_fetch_and_u16", + .op = volatile_fetch_and_u16, + .operand = 0x5555, + .noop_operand = 0xffff, + .allowed = { 0xffff, 0x5555 }, + .allowed_numof = 2, + .init = 0xffff, + .reinit_every = 1, + }, +}; + +static const fetch_op_test_u32_t fetch_op_tests_u32[] = { + /* atomic_*() */ + { + .name = "atomic_fetch_add_u32", + .op = atomic_fetch_add_u32, + .operand = 0x55555555, + .noop_operand = 0, + .allowed = { + 0x00000000, 0x55555555, + 0xaaaaaaaa, 0xffffffff + }, + .allowed_numof = 4, + .init = 0x00000000, + .reinit_every = 3, + }, + { + .name = "atomic_fetch_sub_u32", + .op = atomic_fetch_sub_u32, + .operand = 0x55555555, + .noop_operand = 0, + .allowed = { + 0x00000000, 0x55555555, + 0xaaaaaaaa, 0xffffffff + }, + .allowed_numof = 4, + .init = 0xffffffff, + .reinit_every = 3, + }, + { + .name = "atomic_fetch_or_u32", + .op = atomic_fetch_or_u32, + .operand = 0x55555555, + .noop_operand = 0, + .allowed = { 0x00000000, 0x55555555 }, + .allowed_numof = 2, + .init = 0x00000000, + .reinit_every = 1, + }, + { + .name = "atomic_fetch_xor_u32", + .op = atomic_fetch_xor_u32, + .operand = 0x55555555, + .noop_operand = 0, + .allowed = { 0x00000000, 0x55555555 }, + .allowed_numof = 2, + .init = 0x00000000, + }, + { + .name = "atomic_fetch_and_u32", + .op = atomic_fetch_and_u32, + .operand = 0x55555555, + .noop_operand = 0xffffffff, + .allowed = { 0xffffffff, 0x55555555 }, + .allowed_numof = 2, + .init = 0xffffffff, + .reinit_every = 1, + }, + /* semi_atomic_*() */ + { + .name = "semi_atomic_fetch_add_u32", + .op = semi_atomic_fetch_add_u32, + .operand = 0x55555555, + .noop_operand = 0, + .allowed = { + 0x00000000, 0x55555555, + 0xaaaaaaaa, 0xffffffff + }, + .allowed_numof = 4, + .init = 0x00000000, + .reinit_every = 3, + }, + { + .name = "semi_atomic_fetch_sub_u32", + .op = semi_atomic_fetch_sub_u32, + .operand = 0x55555555, + .noop_operand = 0, + .allowed = { + 0x00000000, 0x55555555, + 0xaaaaaaaa, 0xffffffff + }, + .allowed_numof = 4, + .init = 0xffffffff, + .reinit_every = 3, + }, + { + .name = "semi_atomic_fetch_or_u32", + .op = semi_atomic_fetch_or_u32, + .operand = 0x55555555, + .noop_operand = 0, + .allowed = { 0x00000000, 0x55555555 }, + .allowed_numof = 2, + .init = 0x00000000, + .reinit_every = 1, + }, + { + .name = "semi_atomic_fetch_xor_u32", + .op = semi_atomic_fetch_xor_u32, + .operand = 0x55555555, + .noop_operand = 0, + .allowed = { 0x00000000, 0x55555555 }, + .allowed_numof = 2, + .init = 0x00000000, + }, + { + .name = "semi_atomic_fetch_and_u32", + .op = semi_atomic_fetch_and_u32, + .operand = 0x55555555, + .noop_operand = 0xffffffff, + .allowed = { 0xffffffff, 0x55555555 }, + .allowed_numof = 2, + .init = 0xffffffff, + .reinit_every = 1, + }, + /* volatile_*() */ + { + .name = "volatile_fetch_add_u32", + .op = volatile_fetch_add_u32, + .operand = 0x55555555, + .noop_operand = 0, + .allowed = { + 0x00000000, 0x55555555, + 0xaaaaaaaa, 0xffffffff + }, + .allowed_numof = 4, + .init = 0x00000000, + .reinit_every = 3, + }, + { + .name = "volatile_fetch_sub_u32", + .op = volatile_fetch_sub_u32, + .operand = 0x55555555, + .noop_operand = 0, + .allowed = { + 0x00000000, 0x55555555, + 0xaaaaaaaa, 0xffffffff + }, + .allowed_numof = 4, + .init = 0xffffffff, + .reinit_every = 3, + }, + { + .name = "volatile_fetch_or_u32", + .op = volatile_fetch_or_u32, + .operand = 0x55555555, + .noop_operand = 0, + .allowed = { 0x00000000, 0x55555555 }, + .allowed_numof = 2, + .init = 0x00000000, + .reinit_every = 1, + }, + { + .name = "volatile_fetch_xor_u32", + .op = volatile_fetch_xor_u32, + .operand = 0x55555555, + .noop_operand = 0, + .allowed = { 0x00000000, 0x55555555 }, + .allowed_numof = 2, + .init = 0x00000000, + }, + { + .name = "volatile_fetch_and_u32", + .op = volatile_fetch_and_u32, + .operand = 0x55555555, + .noop_operand = 0xffffffff, + .allowed = { 0xffffffff, 0x55555555 }, + .allowed_numof = 2, + .init = 0xffffffff, + .reinit_every = 1, + }, +}; + +static const fetch_op_test_u64_t fetch_op_tests_u64[] = { + /* atomic_*() */ + { + .name = "atomic_fetch_add_u64", + .op = atomic_fetch_add_u64, + .operand = 0x5555555555555555, + .noop_operand = 0, + .allowed = { + 0x0000000000000000, 0x5555555555555555, + 0xaaaaaaaaaaaaaaaa, 0xffffffffffffffff + }, + .allowed_numof = 4, + .init = 0x0000000000000000, + .reinit_every = 3, + }, + { + .name = "atomic_fetch_sub_u64", + .op = atomic_fetch_sub_u64, + .operand = 0x5555555555555555, + .noop_operand = 0, + .allowed = { + 0x0000000000000000, 0x5555555555555555, + 0xaaaaaaaaaaaaaaaa, 0xffffffffffffffff + }, + .allowed_numof = 4, + .init = 0xffffffffffffffff, + .reinit_every = 3, + }, + { + .name = "atomic_fetch_or_u64", + .op = atomic_fetch_or_u64, + .operand = 0x5555555555555555, + .noop_operand = 0, + .allowed = { 0x0000000000000000, 0x5555555555555555 }, + .allowed_numof = 2, + .init = 0x0000000000000000, + .reinit_every = 1, + }, + { + .name = "atomic_fetch_xor_u64", + .op = atomic_fetch_xor_u64, + .operand = 0x5555555555555555, + .noop_operand = 0, + .allowed = { 0x0000000000000000, 0x5555555555555555 }, + .allowed_numof = 2, + .init = 0x0000000000000000, + }, + { + .name = "atomic_fetch_and_u64", + .op = atomic_fetch_and_u64, + .operand = 0x5555555555555555, + .noop_operand = 0xffffffffffffffff, + .allowed = { 0xffffffffffffffff, 0x5555555555555555 }, + .allowed_numof = 2, + .init = 0xffffffffffffffff, + .reinit_every = 1, + }, + /* semi_atomic_*() */ + { + .name = "semi_atomic_fetch_add_u64", + .op = semi_atomic_fetch_add_u64, + .operand = 0x5555555555555555, + .noop_operand = 0, + .allowed = { + 0x0000000000000000, 0x5555555555555555, + 0xaaaaaaaaaaaaaaaa, 0xffffffffffffffff + }, + .allowed_numof = 4, + .init = 0x0000000000000000, + .reinit_every = 3, + }, + { + .name = "semi_atomic_fetch_sub_u64", + .op = semi_atomic_fetch_sub_u64, + .operand = 0x5555555555555555, + .noop_operand = 0, + .allowed = { + 0x0000000000000000, 0x5555555555555555, + 0xaaaaaaaaaaaaaaaa, 0xffffffffffffffff + }, + .allowed_numof = 4, + .init = 0xffffffffffffffff, + .reinit_every = 3, + }, + { + .name = "semi_atomic_fetch_or_u64", + .op = semi_atomic_fetch_or_u64, + .operand = 0x5555555555555555, + .noop_operand = 0, + .allowed = { 0x0000000000000000, 0x5555555555555555 }, + .allowed_numof = 2, + .init = 0x0000000000000000, + .reinit_every = 1, + }, + { + .name = "semi_atomic_fetch_xor_u64", + .op = semi_atomic_fetch_xor_u64, + .operand = 0x5555555555555555, + .noop_operand = 0, + .allowed = { 0x0000000000000000, 0x5555555555555555 }, + .allowed_numof = 2, + .init = 0x0000000000000000, + }, + { + .name = "semi_atomic_fetch_and_u64", + .op = semi_atomic_fetch_and_u64, + .operand = 0x5555555555555555, + .noop_operand = 0xffffffffffffffff, + .allowed = { 0xffffffffffffffff, 0x5555555555555555 }, + .allowed_numof = 2, + .init = 0xffffffffffffffff, + .reinit_every = 1, + }, + /* volatile_*() */ + { + .name = "volatile_fetch_add_u64", + .op = volatile_fetch_add_u64, + .operand = 0x5555555555555555, + .noop_operand = 0, + .allowed = { + 0x0000000000000000, 0x5555555555555555, + 0xaaaaaaaaaaaaaaaa, 0xffffffffffffffff + }, + .allowed_numof = 4, + .init = 0x0000000000000000, + .reinit_every = 3, + }, + { + .name = "volatile_fetch_sub_u64", + .op = volatile_fetch_sub_u64, + .operand = 0x5555555555555555, + .noop_operand = 0, + .allowed = { + 0x0000000000000000, 0x5555555555555555, + 0xaaaaaaaaaaaaaaaa, 0xffffffffffffffff + }, + .allowed_numof = 4, + .init = 0xffffffffffffffff, + .reinit_every = 3, + }, + { + .name = "volatile_fetch_or_u64", + .op = volatile_fetch_or_u64, + .operand = 0x5555555555555555, + .noop_operand = 0, + .allowed = { 0x0000000000000000, 0x5555555555555555 }, + .allowed_numof = 2, + .init = 0x0000000000000000, + .reinit_every = 1, + }, + { + .name = "volatile_fetch_xor_u64", + .op = volatile_fetch_xor_u64, + .operand = 0x5555555555555555, + .noop_operand = 0, + .allowed = { 0x0000000000000000, 0x5555555555555555 }, + .allowed_numof = 2, + .init = 0x0000000000000000, + }, + { + .name = "volatile_fetch_and_u64", + .op = volatile_fetch_and_u64, + .operand = 0x5555555555555555, + .noop_operand = 0xffffffffffffffff, + .allowed = { 0xffffffffffffffff, 0x5555555555555555 }, + .allowed_numof = 2, + .init = 0xffffffffffffffff, + .reinit_every = 1, + }, +}; + +static char thread_worker_stack[THREAD_STACKSIZE_SMALL]; +static char thread_checker_stack[THREAD_STACKSIZE_SMALL]; +static char thread_timeout_stack[THREAD_STACKSIZE_SMALL]; + +static test_conf_t conf; +static mutex_t conf_mutex = MUTEX_INIT_LOCKED; +static mutex_t stop_mutex = MUTEX_INIT_LOCKED; + +static int testing_active = 0; + +/* Testing values to operate on */ +static uint8_t val_u8; +static uint16_t val_u16; +static uint32_t val_u32; +static uint64_t val_u64; + +static uint64_t stats_ops; +static uint64_t stats_tests; +static uint64_t stats_failures; + +static int sc_tearing_test(int argc, char **argv); +static int sc_lost_update_test(int argc, char **argv); +static int sc_stats(int argc, char **argv); +static int sc_stop(int argc, char **argv); +static int sc_list(int argc, char **argv); +static const shell_command_t shell_commands[] = { + { "tearing_test", "Run a store/load tearing test", sc_tearing_test }, + { "lost_update_test", "Run a lost update test", sc_lost_update_test }, + { "stats", "Show stats of current test", sc_stats }, + { "stop", "Stop running test", sc_stop }, + { "list", "List functions that can be tested", sc_list }, + { NULL, NULL, NULL } +}; + +static void tearing_test_worker(test_state_t *state) +{ + switch (state->conf.width) { + default: + break; + case TEST_WIDTH_8_BIT: + { + const fetch_op_test_u8_t *t = &fetch_op_tests_u8[state->conf.idx]; + if (state->counter >= t->reinit_every) { + atomic_store_u8(&val_u8, t->init); + state->counter = 0; + } + t->op(&val_u8, t->operand); + state->counter++; + } + break; + case TEST_WIDTH_16_BIT: + { + const fetch_op_test_u16_t *t = &fetch_op_tests_u16[state->conf.idx]; + if (state->counter >= t->reinit_every) { + atomic_store_u16(&val_u16, t->init); + state->counter = 0; + } + t->op(&val_u16, t->operand); + state->counter++; + } + break; + case TEST_WIDTH_32_BIT: + { + const fetch_op_test_u32_t *t = &fetch_op_tests_u32[state->conf.idx]; + if (state->counter >= t->reinit_every) { + atomic_store_u32(&val_u32, t->init); + state->counter = 0; + } + t->op(&val_u32, t->operand); + state->counter++; + } + break; + case TEST_WIDTH_64_BIT: + { + const fetch_op_test_u64_t *t = &fetch_op_tests_u64[state->conf.idx]; + if (state->counter >= t->reinit_every) { + atomic_store_u64(&val_u64, t->init); + state->counter = 0; + } + t->op(&val_u64, t->operand); + state->counter++; + } + break; + } +} + +static void tearing_test_checker(test_state_t *state) +{ + switch (state->conf.width) { + default: + break; + case TEST_WIDTH_8_BIT: + { + const fetch_op_test_u8_t *t = &fetch_op_tests_u8[state->conf.idx]; + uint8_t val = atomic_load_u8(&val_u8); + for (uint8_t i = 0; i < t->allowed_numof; i++) { + if (t->allowed[i] == val) { + return; + } + } + print_str(t->name); + print_str(": Load/store tearing detected. (Value was "); + print_u32_hex(val); + print_str(")\n"); + stats_failures++; + } + break; + case TEST_WIDTH_16_BIT: + { + const fetch_op_test_u16_t *t = &fetch_op_tests_u16[state->conf.idx]; + uint16_t val = atomic_load_u16(&val_u16); + for (uint8_t i = 0; i < t->allowed_numof; i++) { + if (t->allowed[i] == val) { + return; + } + } + print_str(t->name); + print_str(": Load/store tearing detected. (Value was "); + print_u32_hex(val); + print_str(")\n"); + stats_failures++; + } + break; + case TEST_WIDTH_32_BIT: + { + const fetch_op_test_u32_t *t = &fetch_op_tests_u32[state->conf.idx]; + uint32_t val = atomic_load_u32(&val_u32); + for (uint8_t i = 0; i < t->allowed_numof; i++) { + if (t->allowed[i] == val) { + return; + } + } + print_str(t->name); + print_str(": Load/store tearing detected. (Value was "); + print_u32_hex(val); + print_str(")\n"); + stats_failures++; + } + break; + case TEST_WIDTH_64_BIT: + { + const fetch_op_test_u64_t *t = &fetch_op_tests_u64[state->conf.idx]; + uint64_t val = atomic_load_u64(&val_u64); + for (uint8_t i = 0; i < t->allowed_numof; i++) { + if (t->allowed[i] == val) { + return; + } + } + print_str(t->name); + print_str(": Load/store tearing detected. (Value was "); + print_u64_hex(val); + print_str(")\n"); + stats_failures++; + } + break; + } +} + +static void lost_update_test_worker(test_state_t *state) +{ + switch (state->conf.width) { + default: + break; + case TEST_WIDTH_8_BIT: + { + const fetch_op_test_u8_t *t = &fetch_op_tests_u8[state->conf.idx]; + t->op(&val_u8, t->noop_operand); + } + break; + case TEST_WIDTH_16_BIT: + { + const fetch_op_test_u16_t *t = &fetch_op_tests_u16[state->conf.idx]; + t->op(&val_u16, t->noop_operand); + } + break; + case TEST_WIDTH_32_BIT: + { + const fetch_op_test_u32_t *t = &fetch_op_tests_u32[state->conf.idx]; + t->op(&val_u32, t->noop_operand); + } + break; + case TEST_WIDTH_64_BIT: + { + const fetch_op_test_u64_t *t = &fetch_op_tests_u64[state->conf.idx]; + t->op(&val_u64, t->noop_operand); + } + break; + } +} + +static void lost_update_test_checker(test_state_t *state) +{ + switch (state->conf.width) { + default: + break; + case TEST_WIDTH_8_BIT: + { + const fetch_op_test_u8_t *t = &fetch_op_tests_u8[state->conf.idx]; + uint8_t val = atomic_load_u8(&val_u8); + if (val != state->counter) { + print_str(t->name); + print_str(": Lost update detected.\n"); + stats_failures++; + } + atomic_store_u8(&val_u8, ++state->counter); + } + break; + case TEST_WIDTH_16_BIT: + { + const fetch_op_test_u16_t *t = &fetch_op_tests_u16[state->conf.idx]; + uint16_t val = atomic_load_u16(&val_u16); + if (val != state->counter) { + print_str(t->name); + print_str(": Lost update detected.\n"); + stats_failures++; + } + atomic_store_u16(&val_u16, ++state->counter); + } + break; + case TEST_WIDTH_32_BIT: + { + const fetch_op_test_u32_t *t = &fetch_op_tests_u32[state->conf.idx]; + uint32_t val = atomic_load_u32(&val_u32); + if (val != state->counter) { + print_str(t->name); + print_str(": Lost update detected.\n"); + stats_failures++; + } + atomic_store_u32(&val_u32, ++state->counter); + } + break; + case TEST_WIDTH_64_BIT: + { + const fetch_op_test_u64_t *t = &fetch_op_tests_u64[state->conf.idx]; + uint64_t val = atomic_load_u64(&val_u64); + if (val != state->counter) { + print_str(t->name); + print_str(": Lost update detected.\n"); + stats_failures++; + } + atomic_store_u64(&val_u64, ++state->counter); + } + break; + } +} + +static void *thread_worker_func(void *arg) +{ + (void)arg; + static test_state_t state = { .conf = { .idx = UINT8_MAX } }; + + while (1) { + mutex_lock(&conf_mutex); + test_conf_t c = conf; + stats_ops++; + mutex_unlock(&conf_mutex); + if (memcmp(&c, &state.conf, sizeof(c))) { + state.conf = c; + state.counter = UINT8_MAX; + } + + switch (state.conf.type) { + default: + break; + case TEST_TYPE_TEARING: + tearing_test_worker(&state); + break; + case TEST_TYPE_LOST_UPDATE: + lost_update_test_worker(&state); + break; + } + } + + return NULL; +} + +static void *thread_checker_func(void *arg) +{ + (void)arg; + static test_state_t state = { .conf = { .idx = 0 } }; + + while (1) { + mutex_lock(&conf_mutex); + test_conf_t c = conf; + stats_tests++; + mutex_unlock(&conf_mutex); + if (memcmp(&c, &state.conf, sizeof(c))) { + state.conf = c; + state.counter = 0; + } + + switch (state.conf.type) { + default: + break; + case TEST_TYPE_TEARING: + tearing_test_checker(&state); + break; + case TEST_TYPE_LOST_UPDATE: + lost_update_test_checker(&state); + break; + } + + xtimer_usleep((random_uint32() & 0x3ff) + XTIMER_BACKOFF); + } + + return NULL; +} + +static void *thread_timeout_func(void *arg) +{ + (void)arg; + while (1) { + mutex_lock(&stop_mutex); + sc_stop(0, NULL); + } + return NULL; +} + +static void test_timeout_callback(void *arg) +{ + (void)arg; + mutex_unlock(&stop_mutex); +} + +static int start_test(test_width_t width, size_t fn_index, int timeout) +{ + conf.width = width; + conf.idx = fn_index; + testing_active = 1; + stats_ops = 0; + stats_tests = 0; + stats_failures = 0; + + /* Initialize values. Doing so for every width safes ROM and lines of code + * but wastes a few CPU cycles */ + if (conf.type == TEST_TYPE_LOST_UPDATE) { + atomic_store_u8(&val_u8, 0); + atomic_store_u16(&val_u16, 0); + atomic_store_u32(&val_u32, 0); + atomic_store_u64(&val_u64, 0); + } + else { + const fetch_op_test_u8_t *t8 = &fetch_op_tests_u8[fn_index]; + const fetch_op_test_u16_t *thread_worker6 = &fetch_op_tests_u16[fn_index]; + const fetch_op_test_u32_t *t32 = &fetch_op_tests_u32[fn_index]; + const fetch_op_test_u64_t *t64 = &fetch_op_tests_u64[fn_index]; + atomic_store_u8(&val_u8, t8->init); + atomic_store_u16(&val_u16, thread_worker6->init); + atomic_store_u32(&val_u32, t32->init); + atomic_store_u64(&val_u64, t64->init); + } + + if (timeout) { + static xtimer_t xt = { .callback = test_timeout_callback }; + xtimer_set(&xt, US_PER_SEC * timeout); + } + mutex_unlock(&conf_mutex); + return 0; +} + +static int select_func_and_start_test(const char *funcname, int timeout) +{ + size_t fn_len = strlen(funcname); + + /* Valid function names end with *_u8, *_u16, *_u32, or *_u64. Thus, the + * last char is already sufficient to determine the width. We do not need + * to search all test specs for the given name, but only those of + * matching width + */ + switch (funcname[fn_len - 1]) { + case '8': + for (size_t i = 0; i < ARRAY_SIZE(fetch_op_tests_u8); i++) { + if (!strcmp(fetch_op_tests_u8[i].name, funcname)) { + return start_test(TEST_WIDTH_8_BIT, i, timeout); + } + } + break; + case '6': + for (size_t i = 0; i < ARRAY_SIZE(fetch_op_tests_u16); i++) { + if (!strcmp(fetch_op_tests_u16[i].name, funcname)) { + return start_test(TEST_WIDTH_16_BIT, i, timeout); + } + } + break; + case '2': + for (size_t i = 0; i < ARRAY_SIZE(fetch_op_tests_u32); i++) { + if (!strcmp(fetch_op_tests_u32[i].name, funcname)) { + return start_test(TEST_WIDTH_32_BIT, i, timeout); + } + } + break; + case '4': + for (size_t i = 0; i < ARRAY_SIZE(fetch_op_tests_u64); i++) { + if (!strcmp(fetch_op_tests_u64[i].name, funcname)) { + return start_test(TEST_WIDTH_64_BIT, i, timeout); + } + } + break; + } + + print_str("Function \""); + print_str(funcname); + print_str("\" not found\n"); + return 1; + +} + +static int sc_tearing_test(int argc, char **argv) +{ + if ((argc != 2) && (argc != 3)) { + print_str("Usage: "); + print_str(argv[0]); + print_str(" \n"); + return 1; + } + + int timeout = 0; + if (argc == 3) { + timeout = atoi(argv[2]); + if (timeout <= 0) { + print_str("Invalid timeout\n"); + return 1; + } + } + + if (testing_active) { + mutex_lock(&conf_mutex); + testing_active = 0; + } + + conf.type = TEST_TYPE_TEARING; + return select_func_and_start_test(argv[1], timeout); +} + +static int sc_lost_update_test(int argc, char **argv) +{ + if ((argc != 2) && (argc != 3)) { + print_str("Usage: "); + print_str(argv[0]); + print_str(" [TIMEOUT_IN_SECONDS]\n"); + return 1; + } + + int timeout = 0; + if (argc == 3) { + timeout = atoi(argv[2]); + if (timeout <= 0) { + print_str("Invalid timeout\n"); + return 1; + } + } + + if (testing_active) { + mutex_lock(&conf_mutex); + testing_active = 0; + } + + conf.type = TEST_TYPE_LOST_UPDATE; + return select_func_and_start_test(argv[1], timeout); +} + +static int sc_stats(int argc, char **argv) +{ + (void)argc; + (void)argv; + if (!testing_active) { + print_str("No test active\n"); + return 0; + } + mutex_lock(&conf_mutex); + print_u64_dec(stats_ops); + print_str(" operations performed\n"); + print_u64_dec(stats_tests); + print_str(" tests performed\n"); + print_u64_dec(stats_failures); + print_str(" corruptions detected\n"); + mutex_unlock(&conf_mutex); + return 0; +} + +static int sc_stop(int argc, char **argv) +{ + (void)argc; + (void)argv; + if (testing_active) { + mutex_lock(&conf_mutex); + testing_active = 0; + if (stats_failures) { + print_str("ERROR: Detected "); + print_u64_dec(stats_failures); + print_str(" corruptions\n"); + } + else { + print_str("OK\n"); + } + } + return 0; +} + +static int sc_list(int argc, char **argv) +{ + (void)argc; + (void)argv; + + for (size_t i = 0; i < ARRAY_SIZE(fetch_op_tests_u8); i++) { + print_str(fetch_op_tests_u8[i].name); + print_str("\n"); + } + + for (size_t i = 0; i < ARRAY_SIZE(fetch_op_tests_u16); i++) { + print_str(fetch_op_tests_u16[i].name); + print_str("\n"); + } + + for (size_t i = 0; i < ARRAY_SIZE(fetch_op_tests_u32); i++) { + print_str(fetch_op_tests_u32[i].name); + print_str("\n"); + } + + for (size_t i = 0; i < ARRAY_SIZE(fetch_op_tests_u64); i++) { + print_str(fetch_op_tests_u64[i].name); + print_str("\n"); + } + + return 0; +} + +int main(void) +{ + thread_create(thread_worker_stack, sizeof(thread_worker_stack), + THREAD_PRIORITY_MAIN + 2, THREAD_CREATE_STACKTEST, + thread_worker_func, NULL, "worker"); + thread_create(thread_checker_stack, sizeof(thread_checker_stack), + THREAD_PRIORITY_MAIN + 1, THREAD_CREATE_STACKTEST, + thread_checker_func, NULL, "checker"); + thread_create(thread_timeout_stack, sizeof(thread_timeout_stack), + THREAD_PRIORITY_MAIN - 1, THREAD_CREATE_STACKTEST, + thread_timeout_func, NULL, "timeout"); + + print_str( + "Test Application for sys/atomic_utils\n" + "=====================================\n" + "\n" + "Use the shell commands \"tearing_test\" and \"lost_update_test\" to\n" + "test the various _fetch__u functions for lost\n" + "updates and store tearing. The \"list\" shell commands lists\n" + "functions to test. See below which function families should\n" + "pass which tests.\n" + "\n" + "The atomic_fetch__u family must pass all tests.\n" + "\n" + "The semi_atomic_fetch__u family must pass the tearing\n" + "test, but may fail the lost update test.\n" + "\n" + "The volatile_fetch__u family should fail the lost update\n" + "tests for all platforms. On most platforms they should fail the\n" + "tearing tests for widths greater than the word size. (One exception\n" + "is the Cortex-M7 family, which can by using instruction fusion issue\n" + "two 32 bit writes in a single CPU cycle.). The volatile family is\n" + "provided to verify that the test actually can detect issues. Any\n" + "failure here is not an indication of an issue, but indicates the.\n" + "test is working as expected.\n" + ); + char line_buf[SHELL_DEFAULT_BUFSIZE]; + shell_run(shell_commands, line_buf, SHELL_DEFAULT_BUFSIZE); + + return 0; +} diff --git a/tests/sys_atomic_utils/tests/01-run.py b/tests/sys_atomic_utils/tests/01-run.py new file mode 100755 index 000000000000..3a468acefd25 --- /dev/null +++ b/tests/sys_atomic_utils/tests/01-run.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python3 + +# Copyright (C) 2020 Otto-von-Guericke-Universität Magdeburg +# +# This file is subject to the terms and conditions of the GNU Lesser +# General Public License v2.1. See the file LICENSE in the top level +# directory for more details. + +# @author Marian Buschsieweke + +import sys +from testrunner import run + + +def testfunc(child): + fns = ["fetch_add", "fetch_sub", "fetch_or", "fetch_xor", "fetch_and"] + postfixes = ["_u8", "_u16", "_u32", "_u64"] + tests = ["tearing_test", "lost_update_test"] + prefixes = { + "tearing_test": ["atomic_", "semi_atomic_"], + "lost_update_test": ["atomic_"] + } + timeout = "1" + + for test in tests: + for prefix in prefixes[test]: + for postfix in postfixes: + for fn in fns: + child.sendline(test + " " + prefix + fn + postfix + " " + + timeout) + child.expect("OK") + + +if __name__ == "__main__": + sys.exit(run(testfunc)) diff --git a/tests/sys_atomic_utils/volatile_utils.h b/tests/sys_atomic_utils/volatile_utils.h new file mode 100644 index 000000000000..dd9eefa1e4f6 --- /dev/null +++ b/tests/sys_atomic_utils/volatile_utils.h @@ -0,0 +1,192 @@ +/* + * Copyright (C) 2020 Otto-von-Guericke-Universität Magdeburg + * + * This file is subject to the terms and conditions of the GNU Lesser General + * Public License v2.1. See the file LICENSE in the top level directory for more + * details. + */ + +/** + * @ingroup tests + * @{ + * + * @file + * @brief For comparison: "Atomic" accesses using volatile + * @author Marian Buschsieweke + * + * This header implements the `volatile_*()` family of functions of + * @ref sys_volatile_utils with `volatile_` instead of `volatile_` as prefix. + * These implementation rely on the `volatile` type qualifier for implementing + * "atomic" accesses; which in many cases will not result in atomic operations. + * So this is a known to be ***BROKEN*** implementation. Its sole purpose is + * to verify that the tests does detect broken implementations. Do not use + * these functions for anything else but testing ;-) + */ + +#ifndef VOLATILE_UTILS_H +#define VOLATILE_UTILS_H + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +static inline uint8_t volatile_load_u8(const uint8_t *var) +{ + return *((const volatile uint8_t *)var); +} +static inline uint16_t volatile_load_u16(const uint16_t *var) +{ + return *((const volatile uint16_t *)var); +} +static inline uint32_t volatile_load_u32(const uint32_t *var) +{ + return *((const volatile uint32_t *)var); +} +static inline uint64_t volatile_load_u64(const uint64_t *var) +{ + return *((const volatile uint64_t *)var); +} + +static inline void volatile_store_u8(uint8_t *dest, uint8_t val) +{ + *((volatile uint8_t *)dest) = val; +} +static inline void volatile_store_u16(uint16_t *dest, uint16_t val) +{ + *((volatile uint16_t *)dest) = val; +} +static inline void volatile_store_u32(uint32_t *dest, uint32_t val) +{ + *((volatile uint32_t *)dest) = val; +} +static inline void volatile_store_u64(uint64_t *dest, uint64_t val) +{ + *((volatile uint64_t *)dest) = val; +} + +static inline void volatile_fetch_add_u8(uint8_t *dest, uint8_t val) +{ + *((volatile uint8_t *)dest) += val; +} +static inline void volatile_fetch_sub_u8(uint8_t *dest, uint8_t val) +{ + *((volatile uint8_t *)dest) -= val; +} +static inline void volatile_fetch_or_u8(uint8_t *dest, uint8_t val) +{ + *((volatile uint8_t *)dest) |= val; +} +static inline void volatile_fetch_xor_u8(uint8_t *dest, uint8_t val) +{ + *((volatile uint8_t *)dest) ^= val; +} +static inline void volatile_fetch_and_u8(uint8_t *dest, uint8_t val) +{ + *((volatile uint8_t *)dest) &= val; +} + +static inline void volatile_fetch_add_u16(uint16_t *dest, uint16_t val) +{ + *((volatile uint16_t *)dest) += val; +} +static inline void volatile_fetch_sub_u16(uint16_t *dest, uint16_t val) +{ + *((volatile uint16_t *)dest) -= val; +} +static inline void volatile_fetch_or_u16(uint16_t *dest, uint16_t val) +{ + *((volatile uint16_t *)dest) |= val; +} +static inline void volatile_fetch_xor_u16(uint16_t *dest, uint16_t val) +{ + *((volatile uint16_t *)dest) ^= val; +} +static inline void volatile_fetch_and_u16(uint16_t *dest, uint16_t val) +{ + *((volatile uint16_t *)dest) &= val; +} + +static inline void volatile_fetch_add_u32(uint32_t *dest, uint32_t val) +{ + *((volatile uint32_t *)dest) += val; +} +static inline void volatile_fetch_sub_u32(uint32_t *dest, uint32_t val) +{ + *((volatile uint32_t *)dest) -= val; +} +static inline void volatile_fetch_or_u32(uint32_t *dest, uint32_t val) +{ + *((volatile uint32_t *)dest) |= val; +} +static inline void volatile_fetch_xor_u32(uint32_t *dest, uint32_t val) +{ + *((volatile uint32_t *)dest) ^= val; +} +static inline void volatile_fetch_and_u32(uint32_t *dest, uint32_t val) +{ + *((volatile uint32_t *)dest) &= val; +} + +static inline void volatile_fetch_add_u64(uint64_t *dest, uint64_t val) +{ + *((volatile uint64_t *)dest) += val; +} +static inline void volatile_fetch_sub_u64(uint64_t *dest, uint64_t val) +{ + *((volatile uint64_t *)dest) -= val; +} +static inline void volatile_fetch_or_u64(uint64_t *dest, uint64_t val) +{ + *((volatile uint64_t *)dest) |= val; +} +static inline void volatile_fetch_xor_u64(uint64_t *dest, uint64_t val) +{ + *((volatile uint64_t *)dest) ^= val; +} +static inline void volatile_fetch_and_u64(uint64_t *dest, uint64_t val) +{ + *((volatile uint64_t *)dest) &= val; +} + +static inline void volatile_set_bit_u8(uint8_t *mask, uint8_t bit) +{ + *((volatile uint8_t *)mask) |= 1 << bit; +} +static inline void volatile_set_bit_u16(uint16_t *mask, uint8_t bit) +{ + *((volatile uint16_t *)mask) |= 1 << bit; +} +static inline void volatile_set_bit_u32(uint32_t *mask, uint8_t bit) +{ + *((volatile uint32_t *)mask) |= 1UL << bit; +} +static inline void volatile_set_bit_u64(uint64_t *mask, uint8_t bit) +{ + *((volatile uint64_t *)mask) |= 1ULL << bit; +} + +static inline void volatile_clear_bit_u8(uint8_t *mask, uint8_t bit) +{ + *((volatile uint8_t *)mask) &= ~(1 << bit); +} +static inline void volatile_clear_bit_u16(uint16_t *mask, uint8_t bit) +{ + *((volatile uint16_t *)mask) &= ~(1 << bit); +} +static inline void volatile_clear_bit_u32(uint32_t *mask, uint8_t bit) +{ + *((volatile uint32_t *)mask) &= ~(1UL << bit); +} +static inline void volatile_clear_bit_u64(uint64_t *mask, uint8_t bit) +{ + *((volatile uint64_t *)mask) &= ~(1ULL << bit); +} + +#ifdef __cplusplus +} +#endif + +#endif /* VOLATILE_UTILS_H */ +/** @} */