From 991933b78ffc1bac1f564e1bccd64173cf042451 Mon Sep 17 00:00:00 2001 From: "Peter A. Bigot" Date: Wed, 2 Jan 2019 10:19:32 -0600 Subject: [PATCH 1/4] sys: dlist: add API to split lists Add a constant-time operation that splits a list into a portion before a node and the portion starting at the node, moving the portion before the node to the end of a different list and leaving the original list starting at the node. Add a constant-time list join operation to move an existing list to the end of a different list. This supports use cases where a dlist is split at some point (e.g. timers that have reached their deadline), so that the extracted elements can be processed without affecting content added to the dlist during processing. Upstream-Status: Pending [from upstream 12485, no upstream need] Signed-off-by: Peter A. Bigot --- include/misc/dlist.h | 62 +++++++++++++++++++++++++++++ tests/kernel/common/src/dlist.c | 69 +++++++++++++++++++++++++++++++++ 2 files changed, 131 insertions(+) diff --git a/include/misc/dlist.h b/include/misc/dlist.h index dab55dddf79028..8b58bcf408bc27 100644 --- a/include/misc/dlist.h +++ b/include/misc/dlist.h @@ -547,6 +547,68 @@ static inline sys_dnode_t *sys_dlist_get(sys_dlist_t *list) return node; } +/** + * @brief place the contents of one list at the end of another list. + * + * The @p to and @p from lists must be distinct. On completion @p from + * will be empty, all of its elements having been appended in original + * order to @p to. + * + * @param to a list, possibly non-empty, to which from will be appended + * @param from the list providing the elements to append + * + * @return N/A + */ +static inline void sys_dlist_join(sys_dlist_t *to, + sys_dlist_t *from) +{ + if (!sys_dlist_is_empty(from)) { + from->head->prev = to->tail; + to->tail->next = from->head; + + from->tail->next = to; + to->tail = from->tail; + + sys_dlist_init(from); + } +} + +/** + * @brief split a list at a node + * + * list will be updated to start at node. Any nodes before node will + * be appended to prefix. + * + * This and other sys_dlist_*() functions are not thread safe. + * + * @param prefix a list to which items in @p list before @p node + * will be appended + * @param list a non-empty list + * @param node a node within @p list + * + * @return N/A + */ +static inline void sys_dlist_split(sys_dlist_t *prefix, + sys_dlist_t *list, + sys_dnode_t *node) +{ + sys_dnode_t *old_pfx_tail = prefix->tail; + sys_dnode_t *new_pfx_tail = node->prev; + + if (sys_dlist_peek_head(list) == node) { + return; + } + + list->head->prev = old_pfx_tail; + old_pfx_tail->next = list->head; + + prefix->tail = new_pfx_tail; + new_pfx_tail->next = prefix; + + list->head = node; + node->prev = list; +} + #ifdef __cplusplus } #endif diff --git a/tests/kernel/common/src/dlist.c b/tests/kernel/common/src/dlist.c index 4c60ff528c5e17..da5cd1b62ebe23 100644 --- a/tests/kernel/common/src/dlist.c +++ b/tests/kernel/common/src/dlist.c @@ -8,6 +8,7 @@ #include static sys_dlist_t test_list; +static sys_dlist_t test_list2; struct container_node { sys_dnode_t node; @@ -280,6 +281,74 @@ void test_dlist(void) zassert_true((verify_emptyness(&test_list)), "test_list should be empty"); + + /* Catenate an empty list to a non-empty list */ + sys_dlist_append(&test_list, &test_node_1.node); + sys_dlist_init(&test_list2); + sys_dlist_join(&test_list, &test_list2); + zassert_true(sys_dlist_is_empty(&test_list2), + "list2 not empty"); + zassert_true((verify_tail_head(&test_list, &test_node_1.node, + &test_node_1.node, true)), + "test_list head/tail are wrong"); + + /* Catenate a non-empty list to an empty list moves elements. */ + sys_dlist_join(&test_list2, &test_list); + zassert_true(sys_dlist_is_empty(&test_list), + "list not empty"); + zassert_true((verify_tail_head(&test_list2, &test_node_1.node, + &test_node_1.node, true)), + "test_list2 head/tail are wrong"); + + /* Catenate a non-empty list to a non-empty list moves elements. */ + sys_dlist_append(&test_list, &test_node_2.node); + sys_dlist_append(&test_list, &test_node_3.node); + zassert_true((verify_tail_head(&test_list, &test_node_2.node, + &test_node_3.node, false)), + "test_list head/tail are wrong"); + sys_dlist_join(&test_list2, &test_list); + zassert_true(sys_dlist_is_empty(&test_list), + "list not empty"); + zassert_true((verify_tail_head(&test_list2, &test_node_1.node, + &test_node_3.node, false)), + "test_list2 head/tail are wrong"); + zassert_equal(test_node_1.node.next, &test_node_2.node, + "node2 not after node1"); + zassert_equal(test_node_2.node.prev, &test_node_1.node, + "node1 not before node2"); + + /* Split list at head does nothing */ + sys_dlist_split(&test_list, &test_list2, &test_node_1.node); + zassert_true(sys_dlist_is_empty(&test_list), + "list not empty"); + + /* Split list after head moves */ + sys_dlist_split(&test_list, &test_list2, &test_node_2.node); + zassert_true((verify_tail_head(&test_list, &test_node_1.node, + &test_node_1.node, true)), + "test_list head/tail are wrong"); + zassert_true((verify_tail_head(&test_list2, &test_node_2.node, + &test_node_3.node, false)), + "test_list2 head/tail are wrong"); + + /* Split list after head moves */ + sys_dlist_split(&test_list, &test_list2, &test_node_3.node); + zassert_true((verify_tail_head(&test_list, &test_node_1.node, + &test_node_2.node, false)), + "test_list head/tail are wrong"); + zassert_true((verify_tail_head(&test_list2, &test_node_3.node, + &test_node_3.node, true)), + "test_list2 head/tail are wrong"); + + sys_dlist_remove(&test_node_1.node); + sys_dlist_remove(&test_node_2.node); + zassert_true(sys_dlist_is_empty(&test_list), + "list not empty"); + + sys_dlist_remove(&test_node_3.node); + zassert_true(sys_dlist_is_empty(&test_list2), + "list2 not empty"); + /* test iterator from a node */ struct data_node { sys_dnode_t node; From 1d333074833e091c9a334dc8c3b2b401b02620c1 Mon Sep 17 00:00:00 2001 From: "Peter A. Bigot" Date: Thu, 18 Apr 2019 09:13:49 -0500 Subject: [PATCH 2/4] [DNM] passim: add k_alarm support k_alarm is analogous to k_timer but with more flexibility and precision. Alarms are set using an absolute deadline measured in ticks of the system clock. The system clock must be maintained as a 64-bit unsigned counter that will never overflow. Most operations involve a deadline specified as a 32-bit unsigned value. The deadline is compared against the low 32 bits of a 64-bit system clock. As long as the system clock rate does not exceed 64 MHz the 32-bit counter not wrap for at least one minute; the 64-bit counter permits 10 years at 54 GHz. There is no alignment of deadlines to Zephyr's notion of a clock tick (generally 10 ms, minimum 1 ms). Consequently deadlines are expressed and triggered with the full accuracy of the system clock, ignoring delays from higher-priority interrupts. Further, alarms do not fire early due to cycles lost as a result of fractional or truncated clock ticks. Deadlines that are at or before the current (32-bit) clock value are considered to be in the past. For a 32 KiHz system clock this allows deadlines up to 18:12:15.999969 H:M:S in the future. (Alarms with 64-bit deadlines could be supported by an additional data structure.) Unlike k_timer the k_alarm data structure does not support automatic rescheduling at a fixed interface. However, the alarm-specific callback is permitted to reschedule the alarm. The original deadline is available in the k_alarm structure for fixed interval reschedule that ignores delays in callback invocation. Alternatively, the live clock can be read and the alarm scheduled at a fixed duration after the time of last callback. Alarms that are scheduled with a delay that has already passed become ready immediately and are processed in order of scheduling within deadline. Late alarms scheduled within an alarm callback will be processed without delay. Alarms that are ready or scheduled can be cancelled; whether the cancellation took affect before the callback was invoked is indicated by the cancellation return value. Flags allow detection of late-to-set, atomic rescheduling of alarms that have not fired, and conditional rescheduling of alarms to fire earlier. Upstream-Status: Pending [probably not acceptable] Signed-off-by: Peter A. Bigot --- include/arch/arm/cortex_m/misc.h | 4 + include/kernel.h | 307 +++++++++++++++++++++++++++++++ include/linker/common-ram.ld | 9 + kernel/CMakeLists.txt | 1 + kernel/Kconfig | 8 + kernel/alarm.c | 251 +++++++++++++++++++++++++ scripts/gen_kobject_list.py | 1 + scripts/sanitycheck | 2 +- 8 files changed, 582 insertions(+), 1 deletion(-) create mode 100644 kernel/alarm.c diff --git a/include/arch/arm/cortex_m/misc.h b/include/arch/arm/cortex_m/misc.h index 82cedab39bbafa..3bcf8b677d0b25 100644 --- a/include/arch/arm/cortex_m/misc.h +++ b/include/arch/arm/cortex_m/misc.h @@ -24,6 +24,10 @@ extern void k_cpu_idle(void); extern u32_t z_timer_cycle_get_32(void); #define z_arch_k_cycle_get_32() z_timer_cycle_get_32() +#if CONFIG_ALARM +extern u64_t z_timer_cycle_get_64(void); +#endif /* CONFIG_ALARM */ + /** * @brief Explicitly nop operation. */ diff --git a/include/kernel.h b/include/kernel.h index 1acafea7a89e46..892737d6cd4809 100644 --- a/include/kernel.h +++ b/include/kernel.h @@ -1,5 +1,6 @@ /* * Copyright (c) 2016, Wind River Systems, Inc. + * Copyright (c) 2019, Peter Bigot Consulting, LLC * * SPDX-License-Identifier: Apache-2.0 */ @@ -125,6 +126,9 @@ struct k_stack; struct k_mem_slab; struct k_mem_pool; struct k_timer; +#if CONFIG_ALARM +struct k_alarm; +#endif /* CONFIG_ALARM */ struct k_poll_event; struct k_poll_signal; struct k_mem_domain; @@ -1417,6 +1421,309 @@ struct k_timer { #define K_TIMER_INITIALIZER DEPRECATED_MACRO Z_TIMER_INITIALIZER +#if CONFIG_ALARM + +enum k_alarm_state { + /* + * Initial and idle state of an alarm. + * + * Transitions to: + * * SCHEDULED on k_alarm_schedule(); + * * READY on k_alarm_schedule(). + */ + K_ALARM_UNSCHEDULED, + + /* + * State of an alarm that had not yet reached its deadline the + * last time it was inspected. + * + * Transitions to: + * * READY when system clock determines the alarm deadline has + * been reached; + * * CANCELLED on k_alarm_cancel(). + */ + K_ALARM_SCHEDULED, + + /* + * State of an alarm that has reached its deadline, but has + * not yet had its handler callback invoked. + * + * Transitions to: + * * ACTIVE when system alarm infrastructure is processing the + * alarm; + * * CANCELLED on k_alarm_cancel(). + */ + K_ALARM_READY, + + /* + * State the alarm is placed in just before the system alarm + * infrastructure invokes its handler_fn. + * + * The alarm is in this state when its handler_fn is invoked. + * + * Transitions to: + * * UNSCHEDULED if its state is still ACTIVE when the + * handler() function returns; + * * Other states if the handler() function invokes + * k_alarm_schedule(). + */ + K_ALARM_ACTIVE, + + /* + * State the alarm is placed in just before k_alarm_cancel() + * invokes its cancel_fn. + * + * The alarm is in this state when its cancel_fn is invoked. + * + * Transitions to: + * * Unscheduled if its state is still CANCELLED when the + * cancel_fn returns; + * * Other states if the cancel function invokes + * k_alarm_schedule(). + */ + K_ALARM_CANCELLED, +}; + +/** Treat late-to-set as an error in k_alarm_schedule(). + * + * Normally late-to-set alarms are scheduled to fire as soon as + * possible. When this flag is included such an alarm will not be + * scheduled; instead the `k_alarm_schedule()` will fail with + * `-EINVAL`. + */ +#define K_ALARM_FLAG_ERROR_IF_LATE 0x01 + +/** Override an existing scheduled alarm. + * + * This flag overrides the behavior of `k_alarm_schedule()` when + * invoked with an already-scheduled alarm that has not fired; it has + * no effect in other alarm states. + * + * If the flag is present the alarm will be rescheduled to the new + * deadline. `K_ALARM_FLAG_ERROR_IF_LATE` applies to the attempt to + * reschedule. + * + * This flag supersedes `K_ALARM_FLAG_IF_SOONER`. + */ +#define K_ALARM_FLAG_REPLACE 0x02 + +/** Override an existing scheduled alarm if the new deadline is closer. + * + * This flag overrides the behavior of `k_alarm_schedule()` when + * invoked with an already-scheduled alarm that has not fired; it has + * no effect in other alarm states. + * + * If the flag is present the alarm will be rescheduled to the new + * deadline only if that is sooner than the existing deadline; + * otherwise the existing deadline will be left unchanged. + * + * `K_ALARM_ERROR_IF_LATE` applies if the alarm is rescheduled. + * + * This flag is superseded by `K_ALARM_FLAG_REPLACE`. + */ +#define K_ALARM_FLAG_IF_SOONER 0x04 + +typedef void (*k_alarm_handler_t)(struct k_alarm *alarm, + void *ud); + +typedef void (*k_alarm_cancel_t)(struct k_alarm *alarm, + void *ud); + +struct k_alarm { + /* links the alarm into the scheduled and ready queues. */ + sys_dnode_t node; + + /* Low 32-bits of system clock at which the alarm should fire. */ + u32_t deadline; + + /* runs in ISR context */ + k_alarm_handler_t handler_fn; + + /* runs in the context of the thread that calls k_alarm_cancel() */ + k_alarm_cancel_t cancel_fn; + + /* user-specific data passed to handler and cancel functions. */ + void *user_data; + + /* the state of the alarm. */ + enum k_alarm_state state; +}; + +#define Z_ALARM_INITIALIZER(handler, cancel, ud) (struct k_alarm) \ + { \ + .node = {},\ + .deadline = 0, \ + .handler_fn = handler, \ + .cancel_fn = cancel, \ + .user_data = ud, \ + .state = K_ALARM_UNSCHEDULED, \ + } + +#define K_ALARM_DEFINE(name, handler, cancel, user_data) \ + struct k_alarm name \ + __in_section(_k_alarm, static, name) = \ + Z_ALARM_INITIALIZER(handler, cancel, user_data) + +/** Ensure the RTC driving the system clock is configured to generate + * an interrupt for the next scheduled deadline. + * + * This is invoked by the alarm infrastructure whenever the head of + * the scheduled alarm list changes. The implementation should be + * provided by the platform-specific system timer module. + * + * @note This is not user API. + */ +void z_alarm_update_deadline(void); + +/** Determine how long until alarm processing is required. + * + * @note This is not user API. + * + * @param deadlinep pointer to a location to store the absolute + * deadline of the first scheduled alarm. The referenced object is + * only modified if this function returns a positive value. + * + * @retval negative if no alarms are ready or scheduled. + * + * @retval 0 if at least one alarm is in the ready queue. + * + * @retval positive if there is a scheduled alarm. *deadlinep will + * contain the corresponding deadline, which may be in the past. + */ +int k_alarm_next_deadline_(u32_t *deadlinep); + +/** Move all scheduled alarms that are due at or before now to the end + * of the ready list. + * + * This is invoked from the platform-specific timer module interrupt + * handler when the system cycle counter reaches a value relevant to + * the alarm infrastructure. + * + * @note This is not user API. + * + * @retval negative if a non-empty ready list was left unchanged. + * + * @retval zero if there are no ready alarms. + * + * @retval postive the number of alarms added to the ready list. + */ +int k_alarm_split_(u32_t now); + +/** Walk the set of ready alarms invoking each one's handler. + * + * This is invoked from the platform-specific timer module interrupt + * handler after the system clocks have been updated and after any + * k_timer processing has completed. It iteratively invokes the + * handlers for each ready alarm in order of scheduling within + * deadline. + * + * @note This is not user API. + */ +void k_alarm_process_ready_(void); + +/** Schedule the alarm to fire at the corresponding deadline. + * + * Normally an alarm can be scheduled only when its state is + * #K_ALARM_UNSCHEDULED, #K_ALARM_ACTIVE, or #K_ALARM_CANCELLED. The + * @p flags parameter can be used to allow scheduling when in state + * #K_ALARM_SCHEDULED under certain circumstances. + * + * @param alarm the alarm to be scheduled. This must be in + * state. + * + * @param deadline the low 32 bits of the system clock time at which + * the alarm should be scheduled. If this is not less than 2^31 ticks + * in the future the deadline will be interpreted to have passed, and + * the alarm will immediately transition to the READY state. + * + * @param flags options that affect the scheduling behavior, including + * `K_ALARM_FLAG_ERROR_IF_LATE`, `K_ALARM_FLAG_REPLACE`, and + * `K_ALARM_FLAG_IF_SOONER`. + * + * @retval positive if the alarm has been scheduled to fire in the future. + * + * @retval zero if the alarm has been added to the ready list. + * + * @retval -EINVAL if @p alarm is null. + * + * @retval -EINVAL if the alarm state is corrupted. + * + * @retval -EBUSY if the alarm is not in UNSCHEDULED, ACTIVE, or + * CANCELLED state. + */ +__syscall int k_alarm_schedule(struct k_alarm *alarm, + u32_t deadline, + u32_t flags); + +int z_impl_k_alarm_schedule(struct k_alarm *alarm, + u32_t deadline, + u32_t flags); + +/** Cancel a scheduled or ready alarm. + * + * If a non-error result is returned the registered cancel handler + * will have been invoked, and if it did not reschedule the alarm the + * alarm is left unscheduled. + * + * @retval positive if the alarm was scheduled but had not reached its + * deadline at the point the cancellation was requested. + * + * @retval 0 if the alarm had reached its deadline but the callback + * had not been invoked at the point where the cancellation was requested. + * + * @retval -EINVAL if the alarm was in any other state than SCHEDULED + * or READY when the cancellation was requested. + */ +__syscall int k_alarm_cancel(struct k_alarm *alarm); + +int z_impl_k_alarm_cancel(struct k_alarm *alarm); + +/** Retrieve the user data pointer associated with the alarm. */ +__syscall void *k_alarm_get_user_data(struct k_alarm *alarm); + +static inline void *z_impl_k_alarm_get_user_data(struct k_alarm *alarm) +{ + return alarm->user_data; +} + +/** Associate user data with the alarm. + * + * This data is passed along with the alarm to the alarm handler and + * cancel callbacks. + */ +__syscall void k_alarm_set_user_data(struct k_alarm *alarm, + void *user_data); + +static inline void z_impl_k_alarm_set_user_data(struct k_alarm *alarm, + void *user_data) +{ + alarm->user_data = user_data; +} + +/** Initialize a dynamically allocated alarm. + * + * @warning This must not be invoked if the alarm is not in an + * unscheduled (or uninitialized) state. + * + * @param alarm pointer to the alarm structure to initialize. + * + * @param handler the handler to be invoked when the alarm deadline is + * reached. (A null pointer may be passed if you don't want the alarm + * to have any useful effect.) + * + * @param cancel the handler to be invoked if the alarm is cancelled. + * Pass a null pointer if no special handling is required at + * cancellation. + * + * @param user_data initializes the alarm callback @c ud parameter. + */ +void k_alarm_init(struct k_alarm *alarm, + k_alarm_handler_t handler, + k_alarm_cancel_t cancel, + void *user_data); + +#endif /* CONFIG_ALARM */ + /** * INTERNAL_HIDDEN @endcond */ diff --git a/include/linker/common-ram.ld b/include/linker/common-ram.ld index 856188ac79a59c..b3b20bf625e0ce 100644 --- a/include/linker/common-ram.ld +++ b/include/linker/common-ram.ld @@ -56,6 +56,15 @@ _k_timer_list_end = .; } GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION) +#ifdef CONFIG_ALARM + SECTION_DATA_PROLOGUE(_k_alarm_area,,SUBALIGN(4)) + { + _k_alarm_list_start = .; + KEEP(*("._k_alarm.static.*")) + _k_alarm_list_end = .; + } GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION) +#endif /* CONFIG_ALARM */ + SECTION_DATA_PROLOGUE(_k_mem_slab_area,,SUBALIGN(4)) { _k_mem_slab_list_start = .; diff --git a/kernel/CMakeLists.txt b/kernel/CMakeLists.txt index 1f04d1dae6fc31..c74980c6d5da9d 100644 --- a/kernel/CMakeLists.txt +++ b/kernel/CMakeLists.txt @@ -37,6 +37,7 @@ set_target_properties( target_sources_ifdef(CONFIG_INT_LATENCY_BENCHMARK kernel PRIVATE int_latency_bench.c) target_sources_ifdef(CONFIG_STACK_CANARIES kernel PRIVATE compiler_stack_protect.c) target_sources_ifdef(CONFIG_SYS_CLOCK_EXISTS kernel PRIVATE timeout.c timer.c) +target_sources_ifdef(CONFIG_ALARM kernel PRIVATE alarm.c) target_sources_ifdef(CONFIG_ATOMIC_OPERATIONS_C kernel PRIVATE atomic_c.c) target_sources_if_kconfig( kernel PRIVATE poll.c) diff --git a/kernel/Kconfig b/kernel/Kconfig index 034e57208f60e1..372995832e630e 100644 --- a/kernel/Kconfig +++ b/kernel/Kconfig @@ -457,6 +457,14 @@ endmenu menu "Other Kernel Object Options" +config ALARM + bool "Kernel alarm infrastructure" + default n + help + This feature enables the k_alarm infrastructure, which + provides schedulable callbacks at the resolution of the system + clock, unmediated by any tick concept. + config NUM_MBOX_ASYNC_MSGS int "Maximum number of in-flight asynchronous mailbox messages" default 10 diff --git a/kernel/alarm.c b/kernel/alarm.c new file mode 100644 index 00000000000000..1afde0f6008f06 --- /dev/null +++ b/kernel/alarm.c @@ -0,0 +1,251 @@ +/* + * Copyright (c) 2019 Peter Bigot Consulting, LLC + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include + +extern struct k_alarm _k_alarm_list_start[]; +extern struct k_alarm _k_alarm_list_end[]; + +static struct k_spinlock lock; +static sys_dlist_t scheduled_list = SYS_DLIST_STATIC_INIT(&scheduled_list); +static sys_dlist_t ready_list = SYS_DLIST_STATIC_INIT(&ready_list); + +/** Return a pointer to the k_alarm that contains the node if it has a + * deadline at or before the provided when. Return a null pointer if + * the k_alarm is scheduled strictly after the provided when. + */ +static ALWAYS_INLINE struct k_alarm *alarm_due_by(sys_dnode_t *node, + uint32_t when) +{ + struct k_alarm *cap = CONTAINER_OF(node, struct k_alarm, node); + + if ((s32_t)(cap->deadline - when) > 0) { + return 0; + } + return cap; +} + +/** Add alarm to the provided list before the first alarm that has a + * later deadline, or at the end if necessary. + * + * Return @true if the head of list changed as a result of this + * addition. + */ +static bool link_alarm(sys_dlist_t *list, + struct k_alarm *alarm, + u32_t now) +{ + sys_dnode_t *node = &alarm->node; + sys_dnode_t *cp = sys_dlist_peek_head(list); + + while (cp) { + if (!alarm_due_by(cp, alarm->deadline)) { + break; + } + cp = sys_dlist_peek_next_no_check(list, cp); + } + if (cp) { + sys_dlist_insert(cp, node); + } else { + sys_dlist_append(list, node); + } + return sys_dlist_is_head(list, node); +} + +void k_alarm_init(struct k_alarm *alarm, + k_alarm_handler_t handler, + k_alarm_cancel_t cancel, + void *user_data) +{ + *alarm = Z_ALARM_INITIALIZER(handler, cancel, user_data); +} + +int k_alarm_next_deadline_(u32_t *deadline) +{ + int rv = -1; + k_spinlock_key_t key = k_spin_lock(&lock); + + if (!sys_dlist_is_empty(&ready_list)) { + rv = 0; + goto unlock; + } + + sys_dnode_t *hp = sys_dlist_peek_head(&scheduled_list); + + if (hp) { + struct k_alarm *ap = CONTAINER_OF(hp, struct k_alarm, node); + *deadline = ap->deadline; + rv = 1; + } +unlock: + k_spin_unlock(&lock, key); + return rv; +} + +int k_alarm_split_(u32_t now) +{ + int rv = 0; + k_spinlock_key_t key = k_spin_lock(&lock); + sys_dnode_t *cp = sys_dlist_peek_head(&scheduled_list); + + while (cp) { + struct k_alarm *cap = alarm_due_by(cp, now); + + if (!cap) { + break; + } + cap->state = K_ALARM_READY; + ++rv; + cp = sys_dlist_peek_next_no_check(&scheduled_list, cp); + } + if ((rv == 0) + && !sys_dlist_is_empty(&ready_list)) { + rv = -1; + } + + if (cp) { + sys_dlist_split(&ready_list, &scheduled_list, cp); + } else { + sys_dlist_join(&ready_list, &scheduled_list); + } + + k_spin_unlock(&lock, key); + return rv; +} + +void k_alarm_process_ready_(void) +{ + k_spinlock_key_t key = k_spin_lock(&lock); + + do { + sys_dnode_t *cp = sys_dlist_get(&ready_list); + + if (!cp) { + break; + } + + struct k_alarm *cap = CONTAINER_OF(cp, struct k_alarm, node); + + cap->state = K_ALARM_ACTIVE; + if (cap->handler_fn) { + k_spin_unlock(&lock, key); + cap->handler_fn(cap, cap->user_data); + key = k_spin_lock(&lock); + } + if (cap->state == K_ALARM_ACTIVE) { + cap->state = K_ALARM_UNSCHEDULED; + } + } while (true); + k_spin_unlock(&lock, key); +} + +int z_impl_k_alarm_schedule(struct k_alarm *alarm, + u32_t deadline, + u32_t flags) +{ + int rv = -EINVAL; + bool update_deadline = false; + + if (!alarm) { + goto out; + } + k_spinlock_key_t key = k_spin_lock(&lock); + + bool may_resched = true + && (alarm->state == K_ALARM_SCHEDULED) + && (flags & (K_ALARM_FLAG_REPLACE | K_ALARM_FLAG_IF_SOONER)); + + if (sys_dnode_is_linked(&alarm->node) + && !may_resched) { + rv = -EINVAL; + goto unlock; + } + if ((alarm->state != K_ALARM_UNSCHEDULED) + && (alarm->state != K_ALARM_ACTIVE) + && (alarm->state != K_ALARM_CANCELLED) + && !may_resched) { + rv = -EBUSY; + goto unlock; + } + + u32_t now = k_cycle_get_32(); + s32_t delay = deadline - now; + + if (may_resched) { + if (K_ALARM_FLAG_REPLACE & flags) { + /* Unconditional reschedule */ + } else if ((K_ALARM_FLAG_IF_SOONER & flags) + && (delay >= (s32_t)(alarm->deadline - now))) { + /* Not sooner; keep unchanged */ + rv = 1; + goto unlock; + } + sys_dlist_remove(&alarm->node); + } + + alarm->deadline = deadline; + if (delay > 0) { + alarm->state = K_ALARM_SCHEDULED; + update_deadline = link_alarm(&scheduled_list, alarm, now); + rv = 1; + } else if (K_ALARM_FLAG_ERROR_IF_LATE & flags) { + rv = -EINVAL; + } else { + alarm->state = K_ALARM_READY; + update_deadline = link_alarm(&ready_list, alarm, now); + rv = 0; + } + +unlock: + k_spin_unlock(&lock, key); +out: + if (update_deadline) { + z_alarm_update_deadline(); + } + + return rv; +} + +int z_impl_k_alarm_cancel(struct k_alarm *alarm) +{ + int rv = -EINVAL; + bool update_deadline = false; + + if (!alarm) { + goto out; + } + k_spinlock_key_t key = k_spin_lock(&lock); + + if (alarm->state == K_ALARM_SCHEDULED) { + update_deadline = (&alarm->node + == sys_dlist_peek_head(&scheduled_list)); + rv = 1; + sys_dlist_remove(&alarm->node); + } else if (alarm->state == K_ALARM_READY) { + sys_dlist_remove(&alarm->node); + rv = 0; + } else { + goto unlock; + } + alarm->state = K_ALARM_CANCELLED; + if (alarm->cancel_fn) { + k_spin_unlock(&lock, key); + alarm->cancel_fn(alarm, alarm->user_data); + key = k_spin_lock(&lock); + } + if (alarm->state == K_ALARM_CANCELLED) { + alarm->state = K_ALARM_UNSCHEDULED; + } +unlock: + k_spin_unlock(&lock, key); +out: + if (update_deadline) { + z_alarm_update_deadline(); + } + + return rv; +} diff --git a/scripts/gen_kobject_list.py b/scripts/gen_kobject_list.py index c5559407df47e3..83318c64d352cc 100755 --- a/scripts/gen_kobject_list.py +++ b/scripts/gen_kobject_list.py @@ -84,6 +84,7 @@ ("k_stack", (None, False)), ("k_thread", (None, False)), ("k_timer", (None, False)), + ("k_alarm", ("CONFIG_ALARM", False)), ("_k_thread_stack_element", (None, False)), ("device", (None, False)), ("sys_mutex", (None, True)) diff --git a/scripts/sanitycheck b/scripts/sanitycheck index 662172cda2b47d..90ef3558ee8441 100755 --- a/scripts/sanitycheck +++ b/scripts/sanitycheck @@ -891,7 +891,7 @@ class SizeCalculator: alloc_sections = ["bss", "noinit", "app_bss", "app_noinit", "ccm_bss", "ccm_noinit"] rw_sections = ["datas", "initlevel", "exceptions", "initshell", - "_static_thread_area", "_k_timer_area", + "_static_thread_area", "_k_timer_area", "_k_alarm_area", "_k_mem_slab_area", "_k_mem_pool_area", "sw_isr_table", "_k_sem_area", "_k_mutex_area", "app_shmem_regions", "_k_fifo_area", "_k_lifo_area", "_k_stack_area", From 5b3048975ee9aaec9988b32306f7f226cacd77d6 Mon Sep 17 00:00:00 2001 From: "Peter A. Bigot" Date: Thu, 18 Apr 2019 09:21:59 -0500 Subject: [PATCH 3/4] [DNM] drivers: timer: nrf_timalarm Provides an alternative timer infrastructure that primarily provides k_alarm support, and supports the legacy tick-based timeout API through a k_alarm. Upstream-Status: Inappropriate [conflicts with upstream project] Signed-off-by: Peter A. Bigot --- CODEOWNERS | 1 + drivers/timer/CMakeLists.txt | 1 + drivers/timer/Kconfig | 13 + drivers/timer/nrf_rtc_timalarm.c | 459 +++++++++++++++++++++++++++ soc/arm/nordic_nrf/Kconfig.defconfig | 16 +- 5 files changed, 488 insertions(+), 2 deletions(-) create mode 100644 drivers/timer/nrf_rtc_timalarm.c diff --git a/CODEOWNERS b/CODEOWNERS index f551e7bb917cec..7f630f58661cd1 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -149,6 +149,7 @@ /drivers/timer/altera_avalon_timer_hal.c @wentongwu /drivers/timer/riscv_machine_timer.c @nategraff-sifive @kgugala @pgielda /drivers/timer/litex_timer.c @mateusz-holenko @kgugala @pgielda +/drivers/timer/nrf_rtc_timalarm.c @pabigot /drivers/usb/ @jfischer-phytec-iot @finikorg /drivers/usb/device/usb_dc_stm32.c @ydamigos @loicpoulain /drivers/i2c/i2c_ll_stm32* @ldts @ydamigos diff --git a/drivers/timer/CMakeLists.txt b/drivers/timer/CMakeLists.txt index 88b534c8ab25ee..3c09f2dcd4a54a 100644 --- a/drivers/timer/CMakeLists.txt +++ b/drivers/timer/CMakeLists.txt @@ -6,6 +6,7 @@ zephyr_sources_ifdef(CONFIG_ARCV2_TIMER arcv2_timer0.c) zephyr_sources_if_kconfig( loapic_timer.c) zephyr_sources_ifdef(CONFIG_ALTERA_AVALON_TIMER altera_avalon_timer_hal.c) zephyr_sources_if_kconfig( nrf_rtc_timer.c) +zephyr_sources_if_kconfig( nrf_rtc_timalarm.c) zephyr_sources_if_kconfig( riscv_machine_timer.c) zephyr_sources_if_kconfig( rv32m1_lptmr_timer.c) zephyr_sources_if_kconfig( cortex_m_systick.c) diff --git a/drivers/timer/Kconfig b/drivers/timer/Kconfig index d779edffb21cc0..4b0cb9416c29a4 100644 --- a/drivers/timer/Kconfig +++ b/drivers/timer/Kconfig @@ -111,12 +111,25 @@ config NRF_RTC_TIMER bool "nRF Real Time Counter (NRF_RTC1) Timer" depends on CLOCK_CONTROL depends on SOC_COMPATIBLE_NRF + depends on !NRF_RTC_TIMALARM select TICKLESS_CAPABLE help This module implements a kernel device driver for the nRF Real Time Counter NRF_RTC1 and provides the standard "system clock driver" interfaces. +config NRF_RTC_TIMALARM + bool "nRF RTC Timer and Kernel Alarm" + depends on CLOCK_CONTROL + depends on SOC_COMPATIBLE_NRF + depends on ALARM + select TICKLESS_CAPABLE + help + This module implements a kernel device driver for the nRF Real Time + Counter NRF_RTC1 that provides the low-level k_alarm API, and + and adds a k_alarm to implement the standard "system clock driver" + interfaces. + config RISCV_MACHINE_TIMER bool "RISCV Machine Timer" depends on SOC_FAMILY_RISCV_PRIVILEGE diff --git a/drivers/timer/nrf_rtc_timalarm.c b/drivers/timer/nrf_rtc_timalarm.c new file mode 100644 index 00000000000000..d3c72098d3727f --- /dev/null +++ b/drivers/timer/nrf_rtc_timalarm.c @@ -0,0 +1,459 @@ +/* + * Copyright (c) 2016-2017 Nordic Semiconductor ASA + * Copyright (c) 2018 Intel Corporation + * Copyright (c) 2019 Peter Bigot Consulting, LLC + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include +#include +#include +#include +#include + +/* Zephyr RTC resource use: + * - RTC0 is used for Bluetooth. + * - RTC1 is used for system timer, here; only CC[0] is used. + * - RTC2 is used for Nordic HAL 802.15.4. + */ + +#define RTC NRF_RTC1 + +/* Alarm rework notes. + * + * A counter *wraps* when the counter value increments to zero. + * + * A counter *laps* when the counter value increments back to the + * reference counter value. + * + * The *span* of a counter is the number of counter increments + * required to lap the counter. + * + * The span of a counter is required to be 2^S, i.e. the counter + * values exactly match the values of an S-bit unsigned integer. + * + * The signed difference between two counter values with an S-bit span + * is the 2s-complement interpretation of the unsigned S-bit + * difference between the values. + * + * The implementation here assumes: + * * A 64-bit cycle clock counting at 32 KiHz. + * * Deadlines that are expressed as 32-bit values matching the low 32 + * bits of the cycle clock. + * * The hardware counter is 24-bit. The cycle clock matches the + * hardware counter in its low 24 bits. + * + * Deadlines are in the past if the signed difference between the + * cycle clock and the deadline is non-positive. + * + * The minimum interval between alarm processing is 2^23 ticks, to + * ensure that a timer FLIH delayed by higher-priority tasks will not + * result in the hardware counter lapping. + */ + +/* RTC counter has 24 valid bits. */ +#define COUNTER_SPAN (1U << 24) + +/* Mask to isolate the valid bits of the counter. */ +#define COUNTER_MASK (COUNTER_SPAN - 1) + +/* RTC requires that a stored compare value be at least 2 ticks in + * advance of the counter value in order to guarantee the compare + * event is detected. Assume that the counter will increment at most + * twice between when it is read and the CC register is updated with + * the value derived from the reading. + */ +#define COUNTER_MIN_DELTA 4U + +/* The system clock infrastructure updates the 64-bit clock base value + * in the FLIH whenever an alarm event occurs. Snsure that there's an + * alarm event at least twice for each counter wrap, to avoid the + * possibility of delayed FLIH execution causing a counter lap to be + * missed. + */ +#define COUNTER_MAX_DELTA (COUNTER_SPAN / 2) + +/* The number of system clock ticks per configured tick. + * + * Note that unless CONFIG_SYS_CLOCK_TICKS_PER_SEC is an integral + * power of 2 the resulting system will not be synchronized to the + * standard understanding of how long 1 s actually takes. + */ +#define SC_PER_TICK (u32_t)((CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC \ + / CONFIG_SYS_CLOCK_TICKS_PER_SEC)) + +/* The maximum number of ticks that we can be sure will, when scaled + * and added to last_tick_sc, produce a value that is interpreted as + * being in the future. The chosen SC maximum is one quarter the 32 + * bit counter span. + * + * This serves as the upper bound for any requested tick-based delay + * in z_clock_set_timeout(). + */ +#define MAX_TICKS ((1U << 30) / SC_PER_TICK) + +static struct k_spinlock lock; + +/* Last checkpointed cycle counter value. + * + * By design this is updated at least every COUNTER_MAX_DELTA. + */ +static u64_t last_cycles; + +/* Flag indicating that the RTC FLIH is active. This is used to + * bypass compare register updates while the alarm queue is + * potentially in flux. + */ +static bool volatile in_flih; + +/* The number of cycles that must be added to last_cycles to produce + * the current cycle counter value. + */ +static ALWAYS_INLINE u32_t cycles_delta_di(void) +{ + u32_t now24 = RTC->COUNTER; + s32_t delta24 = now24 - (COUNTER_MASK & (u32_t)last_cycles); + + if (delta24 < 0) { + delta24 += COUNTER_SPAN; + } + return delta24; +} + +static ALWAYS_INLINE u32_t sysclock_get_32(void) +{ + return last_cycles + cycles_delta_di(); +} + +/* The low 32-bits of the cycle counter. */ +u32_t z_timer_cycle_get_32(void) +{ + k_spinlock_key_t key = k_spin_lock(&lock); + + u32_t ret = sysclock_get_32(); + + k_spin_unlock(&lock, key); + return ret; +} + +/* The full cycle counter. This is not standard API; it should be. */ +u64_t z_timer_cycle_get_64(void) +{ + k_spinlock_key_t key = k_spin_lock(&lock); + + u64_t ret = last_cycles; + + ret += cycles_delta_di(); + + k_spin_unlock(&lock, key); + return ret; +} + +/* In a non-tickless system a tick should occur every SC_PER_TICK + * system clock increments. z_clock_announce() is invoked with 1 at + * each alarm event. + * + * In a tickless system the timeout infrastructure uses an alarm + * deadline that is an integer multiple of SC_PER_TICK, where the + * multiplier is stored in next_tick_delta and the difference in ticks + * is reflected in the value of next_tick_sc. z_clock_announce() is + * invoked with value that was in next_tick_delta when the alarm + * fires. + */ + +/* Low 32 bits of the system clock at the last tick event. */ +static u32_t last_tick_sc; + +/* Low 32 bits of the system clock at the next scheduled tick. This + * is also the deadline of tick_alarm when it is scheduled/ready. + */ +static u32_t next_tick_sc; + +/* The number of SC_PER_TICK increments expressed by the difference + * between next_tick_sc and last_tick_sc. + */ +static u32_t next_tick_delta; + +/* Flag indicating that the tick alarm has been scheduled to fire as a + * soon as possible, and rescheduling it is only going to delay the + * announcement. + */ +static bool tick_asap; + +static void +tick_alarm_handler (struct k_alarm *alarm, + void *ud) +{ + u32_t ntd; + u32_t nts; + k_spinlock_key_t key = k_spin_lock(&lock); + + ntd = next_tick_delta; + if (ntd != 0) { + last_tick_sc = next_tick_sc; + if (IS_ENABLED(CONFIG_TICKLESS_KERNEL)) { + next_tick_delta = 0; + tick_asap = false; + } else { + next_tick_sc += SC_PER_TICK; + nts = next_tick_sc; + } + } + + k_spin_unlock(&lock, key); + + if (ntd != 0) { + if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) { + (void)k_alarm_schedule(alarm, nts, 0); + } + z_clock_announce(ntd); + } +} + +static K_ALARM_DEFINE(tick_alarm, tick_alarm_handler, NULL, NULL); + +void z_clock_set_timeout(s32_t ticks, bool idle) +{ + ARG_UNUSED(idle); + +#ifdef CONFIG_TICKLESS_KERNEL + /* Behavior interpreted from the documentation for this + * function: + * + * ticks=K_FOREVER disables the tick alarm. + * + * ticks=INT_MAX enables the tick alarm at the maximum + * possible delay. + * + * It is permitted to timeout early, as long as the wakeup is + * aligned to a tick boundary and the tick is properly + * announced. + * + * The number of ticks to announce is tied to the deadline, + * and must be positive. + * + * A non-positive tick schedules a wakeup for the next tick + * that can be announced. This may produce a deadline that's + * passed (causing immediate callback) if uncounted ticks have + * occurred. + * + * A positive tick schedules for the requested number of ticks + * after the last announced tick. This too may be in the + * past. + */ + + /* Explain why we need to do this. */ + (void)k_alarm_cancel(&tick_alarm); + + if (ticks == K_FOREVER) { + /* "no future timer interrupts are expected or + * required" + * + * At this point we have no obligation to maintain the + * tick clock; consequently in tick calculation code + * we are entitled to act as though last_tick_sc is + * within a half 32-bit span of the current time, an + * invariant maintained by MAX_TICKS. + */ + return; + } + + u32_t nts; + k_spinlock_key_t key = k_spin_lock(&lock); + u32_t ntd = (sysclock_get_32() - last_tick_sc) / SC_PER_TICK; + + if (ntd == 0) { + /* Can't announce zero ticks */ + ntd = 1; + } + if (ticks > 0) { + /* Kernel wants a timeout with a positive offset + * relative to the last announced tick. + */ + ntd = MAX(ntd, MIN(ticks, MAX_TICKS)); + } + next_tick_delta = ntd; + next_tick_sc = last_tick_sc + next_tick_delta * SC_PER_TICK; + nts = next_tick_sc; + k_spin_unlock(&lock, key); + + (void)k_alarm_schedule(&tick_alarm, nts, 0); +#endif +} + +u32_t z_clock_elapsed(void) +{ + u32_t rv = 0; +#ifdef CONFIG_TICKLESS_KERNEL + k_spinlock_key_t key = k_spin_lock(&lock); + + rv = (sysclock_get_32() - last_tick_sc) / SC_PER_TICK; + + k_spin_unlock(&lock, key); +#endif + return rv; +} + +/* Implement z_alarm_update_deadline. */ +static void alarm_update_deadline_(u32_t now) +{ + u32_t compare = now; + u32_t deadline = 0; + int rc = k_alarm_next_deadline_(&deadline); + + if (rc < 0) { + /* No unscheduled alarms, use maximum delay */ + compare += COUNTER_MAX_DELTA; + } else if (rc > 0) { + /* Next event at deadline. If that's now or in the + * past target a compare of now; otherwise queue an + * event at that deadline or the maximum delta from + * now whichever's sooner. + */ + u32_t delay = deadline - now; + + if ((s32_t)delay > 0) { + if (delay < COUNTER_MAX_DELTA) { + compare += delay; + } else { + compare += COUNTER_MAX_DELTA; + } + } else { + /* Alarm is (past) due, fire ASAP */ + } + } else { + /* Something is ready, fire ASAP */ + } + + /* compare is at most COUNTER_MAX_DELTA past now. The RTC + * counter should not have advanced more than + * COUNTER_MAX_DELTA - 2 ticks past now. + * + * If the next event is already due and we haven't yet cleared + * the last event, leave it enabled so we'll re-enter the + * FLIH. + */ + if ((compare == now) + && RTC->EVENTS_COMPARE[0]) { + return; + } + + /* Make sure compare is at least COUNTER_MIN_DELTA past now, + * then clear the COMPARE event and set the compare value. + */ + if (COUNTER_MIN_DELTA > (compare - now)) { + compare = now + COUNTER_MIN_DELTA; + } + RTC->EVENTS_COMPARE[0] = 0; + RTC->CC[0] = compare; +} + +void z_alarm_update_deadline(void) +{ + k_spinlock_key_t key = k_spin_lock(&lock); + + /* + * Skip the update if we get here because somebody scheduled + * an alarm during a timer or alarm callback, because we're + * going to do an update just before we leave the FLIH when we + * can adjust for time that passed while processing those + * callbacks. + */ + if (!in_flih) { + alarm_update_deadline_(sysclock_get_32()); + } + k_spin_unlock(&lock, key); +} + +/* Note: this function has public linkage, and MUST have this + * particular name. The platform architecture itself doesn't care, + * but there is a test (tests/kernel/arm_irq_vector_table) that needs + * to find it to it can set it in a custom vector table. Should + * probably better abstract that at some point (e.g. query and reset + * it by pointer at runtime, maybe?) so we don't have this leaky + * symbol. + */ +void rtc1_nrf_isr(void *arg) +{ + ARG_UNUSED(arg); + + bool do_ready = false; + u32_t now; + k_spinlock_key_t key = k_spin_lock(&lock); + + in_flih = true; + if (RTC->EVENTS_COMPARE[0]) { + /* + * Refresh the captured system clock. Transfer all + * alarms due at or before that clock to the ready + * queue. + * + * Note that we don't clear EVENTS_COMPARE here. Both + * timer and alarm callbacks may be invoked after the + * currently-held lock is released; those callbacks + * will take time, and the next scheduled alarm may + * come due before we get back to complete the ISR. + * By leaving EVENTS_COMPARE enabled we can + * immediately re-enter the FLIH to process alarms + * that became due while we were busy, without having + * to delay 122 us just to be sure the COMPARE event + * is triggered. + */ + last_cycles += cycles_delta_di(); + do_ready = true; + now = last_cycles; + } + + k_spin_unlock(&lock, key); + if (do_ready + && (k_alarm_split_(now) != 0)) { + k_alarm_process_ready_(); + } + + /* Update the alarm COMPARE register for the next scheduled + * alarm event. If it's already due because of time spent in + * callbacks any pending EVENTS_COMPARE will remain set so we + * don't incur COMPARE_MIN_DELAY. + */ + in_flih = false; + z_alarm_update_deadline(); +} + +int z_clock_driver_init(struct device *device) +{ + struct device *clock; + + ARG_UNUSED(device); + + clock = device_get_binding(DT_NORDIC_NRF_CLOCK_0_LABEL "_32K"); + if (!clock) { + return -1; + } + + clock_control_on(clock, (void *)CLOCK_CONTROL_NRF_K32SRC); + + nrf_rtc_prescaler_set(RTC, 0); + nrf_rtc_task_trigger(RTC, NRF_RTC_TASK_CLEAR); + + RTC->EVENTS_COMPARE[0] = 0; + RTC->INTENSET = (RTC_INTENSET_COMPARE0_Set + << RTC_INTENSET_COMPARE0_Pos); + RTC->CC[0] = COUNTER_MAX_DELTA; + + IRQ_CONNECT(RTC1_IRQn, 1, rtc1_nrf_isr, 0, 0); + NVIC_ClearPendingIRQ(RTC1_IRQn); + irq_enable(RTC1_IRQn); + + nrf_rtc_task_trigger(RTC, NRF_RTC_TASK_START); + + if (!IS_ENABLED(TICKLESS_KERNEL)) { + next_tick_delta = 1; + next_tick_sc = SC_PER_TICK; + k_alarm_schedule(&tick_alarm, SC_PER_TICK, 0); + } + + return 0; +} diff --git a/soc/arm/nordic_nrf/Kconfig.defconfig b/soc/arm/nordic_nrf/Kconfig.defconfig index f3756b4e3bbfc7..58d70dd7c431f8 100644 --- a/soc/arm/nordic_nrf/Kconfig.defconfig +++ b/soc/arm/nordic_nrf/Kconfig.defconfig @@ -7,15 +7,27 @@ if SOC_FAMILY_NRF +config ALARM + default y + source "soc/arm/nordic_nrf/*/Kconfig.defconfig.series" if SYS_CLOCK_EXISTS # If the kernel has timer support, enable both clock control and timer config CLOCK_CONTROL - default y + default y + +config ALARM + default y + +config NRF_RTC_TIMALARM + depends on ALARM + default y if ALARM config NRF_RTC_TIMER - default y + depends on !ALARM + default y + endif # SYS_CLOCK_EXISTS config SYS_CLOCK_HW_CYCLES_PER_SEC From 9c8ecef757b7e4afe998bea89ec358f9683a6b0c Mon Sep 17 00:00:00 2001 From: "Peter A. Bigot" Date: Sun, 26 May 2019 09:54:12 -0500 Subject: [PATCH 4/4] [DNM] kernel: unconditionally declare k_alarm struct The syscall infrastructure generates wrappers for the k_alarm API even on platforms where it's not enabled, which causes build errors. Declare the required data structure when the feature is not enabled. Upstream-Status: Inappropriate [alarm must support all targets] Signed-off-by: Peter A. Bigot --- include/kernel.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/include/kernel.h b/include/kernel.h index 892737d6cd4809..8d2f74c28fac1d 100644 --- a/include/kernel.h +++ b/include/kernel.h @@ -1722,6 +1722,11 @@ void k_alarm_init(struct k_alarm *alarm, k_alarm_cancel_t cancel, void *user_data); +#else /* CONFIG_ALARM */ + +/* Declare struct to satisfy syscall infrastructure */ +struct k_alarm; + #endif /* CONFIG_ALARM */ /**