From b6bff6f557ecf8b92131471d5668e7b90a196fab Mon Sep 17 00:00:00 2001 From: Bret Ambrose Date: Tue, 12 Nov 2024 09:45:08 -0800 Subject: [PATCH] Event loop public api (#691) Co-authored-by: Bret Ambrose --- include/aws/io/event_loop.h | 411 ++++---------------- include/aws/io/private/event_loop_impl.h | 299 ++++++++++++++ include/aws/testing/io_testing_channel.h | 29 +- source/bsd/kqueue_event_loop.c | 4 +- source/channel.c | 1 + source/event_loop.c | 164 ++++---- source/exponential_backoff_retry_strategy.c | 5 +- source/linux/epoll_event_loop.c | 7 +- source/posix/pipe.c | 1 + source/posix/socket.c | 1 + source/s2n/s2n_tls_channel_handler.c | 11 +- source/windows/iocp/iocp_event_loop.c | 1 + source/windows/iocp/pipe.c | 1 + source/windows/iocp/socket.c | 1 + tests/alpn_handler_test.c | 1 + tests/byo_crypto_test.c | 6 +- tests/channel_test.c | 6 +- tests/default_host_resolver_test.c | 60 ++- tests/event_loop_test.c | 31 +- tests/exponential_backoff_retry_test.c | 25 +- tests/future_test.c | 1 + tests/pipe_test.c | 1 + tests/pkcs11_test.c | 6 +- tests/socket_handler_test.c | 15 +- tests/socket_test.c | 16 +- tests/standard_retry_test.c | 8 +- tests/tls_handler_test.c | 19 +- 27 files changed, 655 insertions(+), 476 deletions(-) create mode 100644 include/aws/io/private/event_loop_impl.h diff --git a/include/aws/io/event_loop.h b/include/aws/io/event_loop.h index 58041a4c7..098e9428e 100644 --- a/include/aws/io/event_loop.h +++ b/include/aws/io/event_loop.h @@ -6,80 +6,27 @@ * SPDX-License-Identifier: Apache-2.0. */ -#include -#include -#include - #include AWS_PUSH_SANE_WARNING_LEVEL -enum aws_io_event_type { - AWS_IO_EVENT_TYPE_READABLE = 1, - AWS_IO_EVENT_TYPE_WRITABLE = 2, - AWS_IO_EVENT_TYPE_REMOTE_HANG_UP = 4, - AWS_IO_EVENT_TYPE_CLOSED = 8, - AWS_IO_EVENT_TYPE_ERROR = 16, -}; - struct aws_event_loop; +struct aws_event_loop_group; +struct aws_shutdown_callback_options; struct aws_task; -struct aws_thread_options; - -#if AWS_USE_IO_COMPLETION_PORTS - -struct aws_overlapped; - -typedef void(aws_event_loop_on_completion_fn)( - struct aws_event_loop *event_loop, - struct aws_overlapped *overlapped, - int status_code, - size_t num_bytes_transferred); - -/** - * The aws_win32_OVERLAPPED struct is layout-compatible with OVERLAPPED as defined in . It is used - * here to avoid pulling in a dependency on which would also bring along a lot of bad macros, such - * as redefinitions of GetMessage and GetObject. Note that the OVERLAPPED struct layout in the Windows SDK can - * never be altered without breaking binary compatibility for every existing third-party executable, so there - * is no need to worry about keeping this definition in sync. - */ -struct aws_win32_OVERLAPPED { - uintptr_t Internal; - uintptr_t InternalHigh; - union { - struct { - uint32_t Offset; - uint32_t OffsetHigh; - } s; - void *Pointer; - } u; - void *hEvent; -}; /** - * Use aws_overlapped when a handle connected to the event loop needs an OVERLAPPED struct. - * OVERLAPPED structs are needed to make OS-level async I/O calls. - * When the I/O completes, the assigned aws_event_loop_on_completion_fn is called from the event_loop's thread. - * While the I/O is pending, it is not safe to modify or delete aws_overlapped. - * Call aws_overlapped_init() before first use. If the aws_overlapped will be used multiple times, call - * aws_overlapped_reset() or aws_overlapped_init() between uses. + * @internal */ -struct aws_overlapped { - struct aws_win32_OVERLAPPED overlapped; - aws_event_loop_on_completion_fn *on_completion; - void *user_data; -}; - -#else /* !AWS_USE_IO_COMPLETION_PORTS */ - typedef void(aws_event_loop_on_event_fn)( struct aws_event_loop *event_loop, struct aws_io_handle *handle, int events, void *user_data); -#endif /* AWS_USE_IO_COMPLETION_PORTS */ - +/** + * @internal + */ struct aws_event_loop_vtable { void (*destroy)(struct aws_event_loop *event_loop); int (*run)(struct aws_event_loop *event_loop); @@ -88,203 +35,49 @@ struct aws_event_loop_vtable { void (*schedule_task_now)(struct aws_event_loop *event_loop, struct aws_task *task); void (*schedule_task_future)(struct aws_event_loop *event_loop, struct aws_task *task, uint64_t run_at_nanos); void (*cancel_task)(struct aws_event_loop *event_loop, struct aws_task *task); -#if AWS_USE_IO_COMPLETION_PORTS int (*connect_to_io_completion_port)(struct aws_event_loop *event_loop, struct aws_io_handle *handle); -#else int (*subscribe_to_io_events)( struct aws_event_loop *event_loop, struct aws_io_handle *handle, int events, aws_event_loop_on_event_fn *on_event, void *user_data); -#endif int (*unsubscribe_from_io_events)(struct aws_event_loop *event_loop, struct aws_io_handle *handle); void (*free_io_event_resources)(void *user_data); bool (*is_on_callers_thread)(struct aws_event_loop *event_loop); }; -struct aws_event_loop { - struct aws_event_loop_vtable *vtable; - struct aws_allocator *alloc; - aws_io_clock_fn *clock; - struct aws_hash_table local_data; - struct aws_atomic_var current_load_factor; - uint64_t latest_tick_start; - size_t current_tick_latency_sum; - struct aws_atomic_var next_flush_time; - void *impl_data; -}; - -struct aws_event_loop_local_object; -typedef void(aws_event_loop_on_local_object_removed_fn)(struct aws_event_loop_local_object *); - -struct aws_event_loop_local_object { - const void *key; - void *object; - aws_event_loop_on_local_object_removed_fn *on_object_removed; -}; - -struct aws_event_loop_options { - aws_io_clock_fn *clock; - struct aws_thread_options *thread_options; -}; - -typedef struct aws_event_loop *(aws_new_event_loop_fn)(struct aws_allocator *alloc, - const struct aws_event_loop_options *options, - void *new_loop_user_data); - -struct aws_event_loop_group { - struct aws_allocator *allocator; - struct aws_array_list event_loops; - struct aws_ref_count ref_count; - struct aws_shutdown_callback_options shutdown_options; -}; - -AWS_EXTERN_C_BEGIN - -#ifdef AWS_USE_IO_COMPLETION_PORTS -/** - * Prepares aws_overlapped for use, and sets a function to call when the overlapped operation completes. - */ -AWS_IO_API -void aws_overlapped_init( - struct aws_overlapped *overlapped, - aws_event_loop_on_completion_fn *on_completion, - void *user_data); - /** - * Prepares aws_overlapped for re-use without changing the assigned aws_event_loop_on_completion_fn. - * Call aws_overlapped_init(), instead of aws_overlapped_reset(), to change the aws_event_loop_on_completion_fn. + * Event loop group configuration options */ -AWS_IO_API -void aws_overlapped_reset(struct aws_overlapped *overlapped); +struct aws_event_loop_group_options { -/** - * Casts an aws_overlapped pointer for use as a LPOVERLAPPED parameter to Windows API functions - */ -AWS_IO_API -struct _OVERLAPPED *aws_overlapped_to_windows_overlapped(struct aws_overlapped *overlapped); -#endif /* AWS_USE_IO_COMPLETION_PORTS */ + /** + * How many event loops that event loop group should contain. For most group types, this implies + * the creation and management of an analagous amount of managed threads + */ + uint16_t loop_count; -/** - * Creates an instance of the default event loop implementation for the current architecture and operating system. - */ -AWS_IO_API -struct aws_event_loop *aws_event_loop_new_default(struct aws_allocator *alloc, aws_io_clock_fn *clock); + /** + * Optional callback to invoke when the event loop group finishes destruction. + */ + const struct aws_shutdown_callback_options *shutdown_options; -/** - * Creates an instance of the default event loop implementation for the current architecture and operating system using - * extendable options. - */ -AWS_IO_API -struct aws_event_loop *aws_event_loop_new_default_with_options( - struct aws_allocator *alloc, - const struct aws_event_loop_options *options); - -/** - * Invokes the destroy() fn for the event loop implementation. - * If the event loop is still in a running state, this function will block waiting on the event loop to shutdown. - * If you do not want this function to block, call aws_event_loop_stop() manually first. - * If the event loop is shared by multiple threads then destroy must be called by exactly one thread. All other threads - * must ensure their API calls to the event loop happen-before the call to destroy. - */ -AWS_IO_API -void aws_event_loop_destroy(struct aws_event_loop *event_loop); - -/** - * Initializes common event-loop data structures. - * This is only called from the *new() function of event loop implementations. - */ -AWS_IO_API -int aws_event_loop_init_base(struct aws_event_loop *event_loop, struct aws_allocator *alloc, aws_io_clock_fn *clock); - -/** - * Common cleanup code for all implementations. - * This is only called from the *destroy() function of event loop implementations. - */ -AWS_IO_API -void aws_event_loop_clean_up_base(struct aws_event_loop *event_loop); - -/** - * Fetches an object from the event-loop's data store. Key will be taken as the memory address of the memory pointed to - * by key. This function is not thread safe and should be called inside the event-loop's thread. - */ -AWS_IO_API -int aws_event_loop_fetch_local_object( - struct aws_event_loop *event_loop, - void *key, - struct aws_event_loop_local_object *obj); - -/** - * Puts an item object the event-loop's data store. Key will be taken as the memory address of the memory pointed to by - * key. The lifetime of item must live until remove or a put item overrides it. This function is not thread safe and - * should be called inside the event-loop's thread. - */ -AWS_IO_API -int aws_event_loop_put_local_object(struct aws_event_loop *event_loop, struct aws_event_loop_local_object *obj); - -/** - * Removes an object from the event-loop's data store. Key will be taken as the memory address of the memory pointed to - * by key. If removed_item is not null, the removed item will be moved to it if it exists. Otherwise, the default - * deallocation strategy will be used. This function is not thread safe and should be called inside the event-loop's - * thread. - */ -AWS_IO_API -int aws_event_loop_remove_local_object( - struct aws_event_loop *event_loop, - void *key, - struct aws_event_loop_local_object *removed_obj); - -/** - * Triggers the running of the event loop. This function must not block. The event loop is not active until this - * function is invoked. This function can be called again on an event loop after calling aws_event_loop_stop() and - * aws_event_loop_wait_for_stop_completion(). - */ -AWS_IO_API -int aws_event_loop_run(struct aws_event_loop *event_loop); - -/** - * Triggers the event loop to stop, but does not wait for the loop to stop completely. - * This function may be called from outside or inside the event loop thread. It is safe to call multiple times. - * This function is called from destroy(). - * - * If you do not call destroy(), an event loop can be run again by calling stop(), wait_for_stop_completion(), run(). - */ -AWS_IO_API -int aws_event_loop_stop(struct aws_event_loop *event_loop); + /** + * Optional configuration to control how the event loop group's threads bind to CPU groups + */ + const uint16_t *cpu_group; -/** - * For event-loop implementations to use for providing metrics info to the base event-loop. This enables the - * event-loop load balancer to take into account load when vending another event-loop to a caller. - * - * Call this function at the beginning of your event-loop tick: after wake-up, but before processing any IO or tasks. - */ -AWS_IO_API -void aws_event_loop_register_tick_start(struct aws_event_loop *event_loop); - -/** - * For event-loop implementations to use for providing metrics info to the base event-loop. This enables the - * event-loop load balancer to take into account load when vending another event-loop to a caller. - * - * Call this function at the end of your event-loop tick: after processing IO and tasks. - */ -AWS_IO_API -void aws_event_loop_register_tick_end(struct aws_event_loop *event_loop); - -/** - * Returns the current load factor (however that may be calculated). If the event-loop is not invoking - * aws_event_loop_register_tick_start() and aws_event_loop_register_tick_end(), this value will always be 0. - */ -AWS_IO_API -size_t aws_event_loop_get_load_factor(struct aws_event_loop *event_loop); + /** + * Override for the clock function that event loops should use. Defaults to the system's high resolution + * timer. + * + * Do not bind this value to managed code; it is only used in timing-sensitive tests. + */ + aws_io_clock_fn *clock_override; +}; -/** - * Blocks until the event loop stops completely. - * If you want to call aws_event_loop_run() again, you must call this after aws_event_loop_stop(). - * It is not safe to call this function from inside the event loop thread. - */ -AWS_IO_API -int aws_event_loop_wait_for_stop_completion(struct aws_event_loop *event_loop); +AWS_EXTERN_C_BEGIN /** * The event loop will schedule the task and run it on the event loop thread as soon as possible. @@ -319,105 +112,63 @@ void aws_event_loop_schedule_task_future( AWS_IO_API void aws_event_loop_cancel_task(struct aws_event_loop *event_loop, struct aws_task *task); -#if AWS_USE_IO_COMPLETION_PORTS - /** - * Associates an aws_io_handle with the event loop's I/O Completion Port. - * - * The handle must use aws_overlapped for all async operations requiring an OVERLAPPED struct. - * When the operation completes, the aws_overlapped's completion function will run on the event loop thread. - * Note that completion functions will not be invoked while the event loop is stopped. Users should wait for all async - * operations on connected handles to complete before cleaning up or destroying the event loop. - * - * A handle may only be connected to one event loop in its lifetime. + * Returns true if the event loop's thread is the same thread that called this function, otherwise false. */ AWS_IO_API -int aws_event_loop_connect_handle_to_io_completion_port( - struct aws_event_loop *event_loop, - struct aws_io_handle *handle); - -#else /* !AWS_USE_IO_COMPLETION_PORTS */ +bool aws_event_loop_thread_is_callers_thread(struct aws_event_loop *event_loop); /** - * Subscribes on_event to events on the event-loop for handle. events is a bitwise concatenation of the events that were - * received. The definition for these values can be found in aws_io_event_type. Currently, only - * AWS_IO_EVENT_TYPE_READABLE and AWS_IO_EVENT_TYPE_WRITABLE are honored. You always are registered for error conditions - * and closure. This function may be called from outside or inside the event loop thread. However, the unsubscribe - * function must be called inside the event-loop's thread. + * Gets the current timestamp for the event loop's clock, in nanoseconds. This function is thread-safe. */ AWS_IO_API -int aws_event_loop_subscribe_to_io_events( - struct aws_event_loop *event_loop, - struct aws_io_handle *handle, - int events, - aws_event_loop_on_event_fn *on_event, - void *user_data); - -#endif /* AWS_USE_IO_COMPLETION_PORTS */ +int aws_event_loop_current_clock_time(const struct aws_event_loop *event_loop, uint64_t *time_nanos); /** - * Unsubscribes handle from event-loop notifications. - * This function is not thread safe and should be called inside the event-loop's thread. - * - * NOTE: if you are using io completion ports, this is a risky call. We use it in places, but only when we're certain - * there's no pending events. If you want to use it, it's your job to make sure you don't have pending events before - * calling it. + * Creation function for event loop groups. */ AWS_IO_API -int aws_event_loop_unsubscribe_from_io_events(struct aws_event_loop *event_loop, struct aws_io_handle *handle); +struct aws_event_loop_group *aws_event_loop_group_new( + struct aws_allocator *allocator, + const struct aws_event_loop_group_options *options); /** - * Cleans up resources (user_data) associated with the I/O eventing subsystem for a given handle. This should only - * ever be necessary in the case where you are cleaning up an event loop during shutdown and its thread has already - * been joined. + * Increments the reference count on the event loop group, allowing the caller to take a reference to it. + * + * Returns the same event loop group passed in. */ AWS_IO_API -void aws_event_loop_free_io_event_resources(struct aws_event_loop *event_loop, struct aws_io_handle *handle); +struct aws_event_loop_group *aws_event_loop_group_acquire(struct aws_event_loop_group *el_group); /** - * Returns true if the event loop's thread is the same thread that called this function, otherwise false. + * Decrements an event loop group's ref count. When the ref count drops to zero, the event loop group will be + * destroyed. */ AWS_IO_API -bool aws_event_loop_thread_is_callers_thread(struct aws_event_loop *event_loop); +void aws_event_loop_group_release(struct aws_event_loop_group *el_group); /** - * Gets the current timestamp for the event loop's clock, in nanoseconds. This function is thread-safe. + * Returns the event loop at a particular index. If the index is out of bounds, null is returned. */ AWS_IO_API -int aws_event_loop_current_clock_time(struct aws_event_loop *event_loop, uint64_t *time_nanos); +struct aws_event_loop *aws_event_loop_group_get_loop_at(struct aws_event_loop_group *el_group, size_t index); /** - * Creates an event loop group, with clock, number of loops to manage, and the function to call for creating a new - * event loop. + * Gets the number of event loops managed by an event loop group. */ AWS_IO_API -struct aws_event_loop_group *aws_event_loop_group_new( - struct aws_allocator *alloc, - aws_io_clock_fn *clock, - uint16_t el_count, - aws_new_event_loop_fn *new_loop_fn, - void *new_loop_user_data, - const struct aws_shutdown_callback_options *shutdown_options); +size_t aws_event_loop_group_get_loop_count(const struct aws_event_loop_group *el_group); -/** Creates an event loop group, with clock, number of loops to manage, the function to call for creating a new - * event loop, and also pins all loops to hw threads on the same cpu_group (e.g. NUMA nodes). Note: - * If el_count exceeds the number of hw threads in the cpu_group it will be ignored on the assumption that if you - * care about NUMA, you don't want hyper-threads doing your IO and you especially don't want IO on a different node. +/** + * Fetches the next loop for use. The purpose is to enable load balancing across loops. You should not depend on how + * this load balancing is done as it is subject to change in the future. Currently it uses the "best-of-two" algorithm + * based on the load factor of each loop. */ AWS_IO_API -struct aws_event_loop_group *aws_event_loop_group_new_pinned_to_cpu_group( - struct aws_allocator *alloc, - aws_io_clock_fn *clock, - uint16_t el_count, - uint16_t cpu_group, - aws_new_event_loop_fn *new_loop_fn, - void *new_loop_user_data, - const struct aws_shutdown_callback_options *shutdown_options); +struct aws_event_loop *aws_event_loop_group_get_next_loop(struct aws_event_loop_group *el_group); /** - * Initializes an event loop group with platform defaults. If max_threads == 0, then the - * loop count will be the number of available processors on the machine / 2 (to exclude hyper-threads). - * Otherwise, max_threads will be the number of event loops in the group. + * @deprecated - use aws_event_loop_group_new() instead */ AWS_IO_API struct aws_event_loop_group *aws_event_loop_group_new_default( @@ -425,14 +176,8 @@ struct aws_event_loop_group *aws_event_loop_group_new_default( uint16_t max_threads, const struct aws_shutdown_callback_options *shutdown_options); -/** Creates an event loop group, with clock, number of loops to manage, the function to call for creating a new - * event loop, and also pins all loops to hw threads on the same cpu_group (e.g. NUMA nodes). Note: - * If el_count exceeds the number of hw threads in the cpu_group it will be clamped to the number of hw threads - * on the assumption that if you care about NUMA, you don't want hyper-threads doing your IO and you especially - * don't want IO on a different node. - * - * If max_threads == 0, then the - * loop count will be the number of available processors in the cpu_group / 2 (to exclude hyper-threads) +/** + * @deprecated - use aws_event_loop_group_new() instead */ AWS_IO_API struct aws_event_loop_group *aws_event_loop_group_new_default_pinned_to_cpu_group( @@ -442,35 +187,49 @@ struct aws_event_loop_group *aws_event_loop_group_new_default_pinned_to_cpu_grou const struct aws_shutdown_callback_options *shutdown_options); /** - * Increments the reference count on the event loop group, allowing the caller to take a reference to it. + * @internal - Don't use outside of testing. * - * Returns the same event loop group passed in. + * Returns the opaque internal user data of an event loop. Can be cast into a specific implementation by + * privileged consumers. */ AWS_IO_API -struct aws_event_loop_group *aws_event_loop_group_acquire(struct aws_event_loop_group *el_group); +void *aws_event_loop_get_impl(struct aws_event_loop *event_loop); /** - * Decrements an event loop group's ref count. When the ref count drops to zero, the event loop group will be - * destroyed. + * @internal - Don't use outside of testing. + * + * Initializes the base structure used by all event loop implementations with test-oriented overrides. */ AWS_IO_API -void aws_event_loop_group_release(struct aws_event_loop_group *el_group); - -AWS_IO_API -struct aws_event_loop *aws_event_loop_group_get_loop_at(struct aws_event_loop_group *el_group, size_t index); +struct aws_event_loop *aws_event_loop_new_base( + struct aws_allocator *allocator, + aws_io_clock_fn *clock, + struct aws_event_loop_vtable *vtable, + void *impl); +/** + * @internal - Don't use outside of testing. + * + * Common cleanup code for all implementations. + * This is only called from the *destroy() function of event loop implementations. + */ AWS_IO_API -size_t aws_event_loop_group_get_loop_count(struct aws_event_loop_group *el_group); +void aws_event_loop_clean_up_base(struct aws_event_loop *event_loop); /** - * Fetches the next loop for use. The purpose is to enable load balancing across loops. You should not depend on how - * this load balancing is done as it is subject to change in the future. Currently it uses the "best-of-two" algorithm - * based on the load factor of each loop. + * @internal - Don't use outside of testing. + * + * Invokes the destroy() fn for the event loop implementation. + * If the event loop is still in a running state, this function will block waiting on the event loop to shutdown. + * If you do not want this function to block, call aws_event_loop_stop() manually first. + * If the event loop is shared by multiple threads then destroy must be called by exactly one thread. All other threads + * must ensure their API calls to the event loop happen-before the call to destroy. */ AWS_IO_API -struct aws_event_loop *aws_event_loop_group_get_next_loop(struct aws_event_loop_group *el_group); +void aws_event_loop_destroy(struct aws_event_loop *event_loop); AWS_EXTERN_C_END + AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_IO_EVENT_LOOP_H */ diff --git a/include/aws/io/private/event_loop_impl.h b/include/aws/io/private/event_loop_impl.h new file mode 100644 index 000000000..4eb2f6230 --- /dev/null +++ b/include/aws/io/private/event_loop_impl.h @@ -0,0 +1,299 @@ +#ifndef AWS_IO_EVENT_LOOP_IMPL_H +#define AWS_IO_EVENT_LOOP_IMPL_H + +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include + +#include +#include +#include +#include + +AWS_PUSH_SANE_WARNING_LEVEL + +struct aws_event_loop; +struct aws_overlapped; + +typedef void(aws_event_loop_on_completion_fn)( + struct aws_event_loop *event_loop, + struct aws_overlapped *overlapped, + int status_code, + size_t num_bytes_transferred); + +/** + * The aws_win32_OVERLAPPED struct is layout-compatible with OVERLAPPED as defined in . It is used + * here to avoid pulling in a dependency on which would also bring along a lot of bad macros, such + * as redefinitions of GetMessage and GetObject. Note that the OVERLAPPED struct layout in the Windows SDK can + * never be altered without breaking binary compatibility for every existing third-party executable, so there + * is no need to worry about keeping this definition in sync. + */ +struct aws_win32_OVERLAPPED { + uintptr_t Internal; + uintptr_t InternalHigh; + union { + struct { + uint32_t Offset; + uint32_t OffsetHigh; + } s; + void *Pointer; + } u; + void *hEvent; +}; + +/** + * Use aws_overlapped when a handle connected to the event loop needs an OVERLAPPED struct. + * OVERLAPPED structs are needed to make OS-level async I/O calls. + * When the I/O completes, the assigned aws_event_loop_on_completion_fn is called from the event_loop's thread. + * While the I/O is pending, it is not safe to modify or delete aws_overlapped. + * Call aws_overlapped_init() before first use. If the aws_overlapped will be used multiple times, call + * aws_overlapped_reset() or aws_overlapped_init() between uses. + */ +struct aws_overlapped { + struct aws_win32_OVERLAPPED overlapped; + aws_event_loop_on_completion_fn *on_completion; + void *user_data; +}; + +enum aws_io_event_type { + AWS_IO_EVENT_TYPE_READABLE = 1, + AWS_IO_EVENT_TYPE_WRITABLE = 2, + AWS_IO_EVENT_TYPE_REMOTE_HANG_UP = 4, + AWS_IO_EVENT_TYPE_CLOSED = 8, + AWS_IO_EVENT_TYPE_ERROR = 16, +}; + +struct aws_event_loop { + struct aws_event_loop_vtable *vtable; + struct aws_allocator *alloc; + aws_io_clock_fn *clock; + struct aws_hash_table local_data; + struct aws_atomic_var current_load_factor; + uint64_t latest_tick_start; + size_t current_tick_latency_sum; + struct aws_atomic_var next_flush_time; + void *impl_data; +}; + +struct aws_event_loop_local_object; +typedef void(aws_event_loop_on_local_object_removed_fn)(struct aws_event_loop_local_object *); + +struct aws_event_loop_local_object { + const void *key; + void *object; + aws_event_loop_on_local_object_removed_fn *on_object_removed; +}; + +struct aws_event_loop_options { + aws_io_clock_fn *clock; + struct aws_thread_options *thread_options; +}; + +typedef struct aws_event_loop *(aws_new_event_loop_fn)(struct aws_allocator *alloc, + const struct aws_event_loop_options *options, + void *new_loop_user_data); + +struct aws_event_loop_group { + struct aws_allocator *allocator; + struct aws_array_list event_loops; + struct aws_ref_count ref_count; + struct aws_shutdown_callback_options shutdown_options; +}; + +AWS_EXTERN_C_BEGIN + +#ifdef AWS_USE_IO_COMPLETION_PORTS + +/** + * Prepares aws_overlapped for use, and sets a function to call when the overlapped operation completes. + */ +AWS_IO_API +void aws_overlapped_init( + struct aws_overlapped *overlapped, + aws_event_loop_on_completion_fn *on_completion, + void *user_data); + +/** + * Prepares aws_overlapped for re-use without changing the assigned aws_event_loop_on_completion_fn. + * Call aws_overlapped_init(), instead of aws_overlapped_reset(), to change the aws_event_loop_on_completion_fn. + */ +AWS_IO_API +void aws_overlapped_reset(struct aws_overlapped *overlapped); + +/** + * Casts an aws_overlapped pointer for use as a LPOVERLAPPED parameter to Windows API functions + */ +AWS_IO_API +struct _OVERLAPPED *aws_overlapped_to_windows_overlapped(struct aws_overlapped *overlapped); + +/** + * Associates an aws_io_handle with the event loop's I/O Completion Port. + * + * The handle must use aws_overlapped for all async operations requiring an OVERLAPPED struct. + * When the operation completes, the aws_overlapped's completion function will run on the event loop thread. + * Note that completion functions will not be invoked while the event loop is stopped. Users should wait for all async + * operations on connected handles to complete before cleaning up or destroying the event loop. + * + * A handle may only be connected to one event loop in its lifetime. + */ +AWS_IO_API +int aws_event_loop_connect_handle_to_io_completion_port( + struct aws_event_loop *event_loop, + struct aws_io_handle *handle); + +#else + +/** + * Subscribes on_event to events on the event-loop for handle. events is a bitwise concatenation of the events that were + * received. The definition for these values can be found in aws_io_event_type. Currently, only + * AWS_IO_EVENT_TYPE_READABLE and AWS_IO_EVENT_TYPE_WRITABLE are honored. You always are registered for error conditions + * and closure. This function may be called from outside or inside the event loop thread. However, the unsubscribe + * function must be called inside the event-loop's thread. + */ +AWS_IO_API +int aws_event_loop_subscribe_to_io_events( + struct aws_event_loop *event_loop, + struct aws_io_handle *handle, + int events, + aws_event_loop_on_event_fn *on_event, + void *user_data); + +#endif /* AWS_USE_IO_COMPLETION_PORTS */ + +/** + * Creates an instance of the default event loop implementation for the current architecture and operating system. + */ +AWS_IO_API +struct aws_event_loop *aws_event_loop_new_default(struct aws_allocator *alloc, aws_io_clock_fn *clock); + +/** + * Creates an instance of the default event loop implementation for the current architecture and operating system using + * extendable options. + */ +AWS_IO_API +struct aws_event_loop *aws_event_loop_new_default_with_options( + struct aws_allocator *alloc, + const struct aws_event_loop_options *options); + +/** + * Initializes common event-loop data structures. + * This is only called from the *new() function of event loop implementations. + */ +AWS_IO_API +int aws_event_loop_init_base(struct aws_event_loop *event_loop, struct aws_allocator *alloc, aws_io_clock_fn *clock); + +/** + * Fetches an object from the event-loop's data store. Key will be taken as the memory address of the memory pointed to + * by key. This function is not thread safe and should be called inside the event-loop's thread. + */ +AWS_IO_API +int aws_event_loop_fetch_local_object( + struct aws_event_loop *event_loop, + void *key, + struct aws_event_loop_local_object *obj); + +/** + * Puts an item object the event-loop's data store. Key will be taken as the memory address of the memory pointed to by + * key. The lifetime of item must live until remove or a put item overrides it. This function is not thread safe and + * should be called inside the event-loop's thread. + */ +AWS_IO_API +int aws_event_loop_put_local_object(struct aws_event_loop *event_loop, struct aws_event_loop_local_object *obj); + +/** + * Removes an object from the event-loop's data store. Key will be taken as the memory address of the memory pointed to + * by key. If removed_item is not null, the removed item will be moved to it if it exists. Otherwise, the default + * deallocation strategy will be used. This function is not thread safe and should be called inside the event-loop's + * thread. + */ +AWS_IO_API +int aws_event_loop_remove_local_object( + struct aws_event_loop *event_loop, + void *key, + struct aws_event_loop_local_object *removed_obj); + +/** + * Triggers the running of the event loop. This function must not block. The event loop is not active until this + * function is invoked. This function can be called again on an event loop after calling aws_event_loop_stop() and + * aws_event_loop_wait_for_stop_completion(). + */ +AWS_IO_API +int aws_event_loop_run(struct aws_event_loop *event_loop); + +/** + * Triggers the event loop to stop, but does not wait for the loop to stop completely. + * This function may be called from outside or inside the event loop thread. It is safe to call multiple times. + * This function is called from destroy(). + * + * If you do not call destroy(), an event loop can be run again by calling stop(), wait_for_stop_completion(), run(). + */ +AWS_IO_API +int aws_event_loop_stop(struct aws_event_loop *event_loop); + +/** + * For event-loop implementations to use for providing metrics info to the base event-loop. This enables the + * event-loop load balancer to take into account load when vending another event-loop to a caller. + * + * Call this function at the beginning of your event-loop tick: after wake-up, but before processing any IO or tasks. + */ +AWS_IO_API +void aws_event_loop_register_tick_start(struct aws_event_loop *event_loop); + +/** + * For event-loop implementations to use for providing metrics info to the base event-loop. This enables the + * event-loop load balancer to take into account load when vending another event-loop to a caller. + * + * Call this function at the end of your event-loop tick: after processing IO and tasks. + */ +AWS_IO_API +void aws_event_loop_register_tick_end(struct aws_event_loop *event_loop); + +/** + * Returns the current load factor (however that may be calculated). If the event-loop is not invoking + * aws_event_loop_register_tick_start() and aws_event_loop_register_tick_end(), this value will always be 0. + */ +AWS_IO_API +size_t aws_event_loop_get_load_factor(struct aws_event_loop *event_loop); + +/** + * Blocks until the event loop stops completely. + * If you want to call aws_event_loop_run() again, you must call this after aws_event_loop_stop(). + * It is not safe to call this function from inside the event loop thread. + */ +AWS_IO_API +int aws_event_loop_wait_for_stop_completion(struct aws_event_loop *event_loop); + +/** + * Unsubscribes handle from event-loop notifications. + * This function is not thread safe and should be called inside the event-loop's thread. + * + * NOTE: if you are using io completion ports, this is a risky call. We use it in places, but only when we're certain + * there's no pending events. If you want to use it, it's your job to make sure you don't have pending events before + * calling it. + */ +AWS_IO_API +int aws_event_loop_unsubscribe_from_io_events(struct aws_event_loop *event_loop, struct aws_io_handle *handle); + +/** + * Cleans up resources (user_data) associated with the I/O eventing subsystem for a given handle. This should only + * ever be necessary in the case where you are cleaning up an event loop during shutdown and its thread has already + * been joined. + */ +AWS_IO_API +void aws_event_loop_free_io_event_resources(struct aws_event_loop *event_loop, struct aws_io_handle *handle); + +AWS_IO_API +struct aws_event_loop_group *aws_event_loop_group_new_internal( + struct aws_allocator *allocator, + const struct aws_event_loop_group_options *options, + aws_new_event_loop_fn *new_loop_fn, + void *new_loop_user_data); + +AWS_EXTERN_C_END + +AWS_POP_SANE_WARNING_LEVEL + +#endif /* AWS_IO_EVENT_LOOP_IMPL_H */ diff --git a/include/aws/testing/io_testing_channel.h b/include/aws/testing/io_testing_channel.h index 192a269b0..c202168cb 100644 --- a/include/aws/testing/io_testing_channel.h +++ b/include/aws/testing/io_testing_channel.h @@ -9,10 +9,12 @@ #include #include #include +// #include #include #include struct testing_loop { + struct aws_allocator *allocator; struct aws_task_scheduler scheduler; bool mock_on_callers_thread; }; @@ -33,7 +35,7 @@ static int s_testing_loop_wait_for_stop_completion(struct aws_event_loop *event_ } static void s_testing_loop_schedule_task_now(struct aws_event_loop *event_loop, struct aws_task *task) { - struct testing_loop *testing_loop = (struct testing_loop *)event_loop->impl_data; + struct testing_loop *testing_loop = (struct testing_loop *)aws_event_loop_get_impl(event_loop); aws_task_scheduler_schedule_now(&testing_loop->scheduler, task); } @@ -42,26 +44,27 @@ static void s_testing_loop_schedule_task_future( struct aws_task *task, uint64_t run_at_nanos) { - struct testing_loop *testing_loop = (struct testing_loop *)event_loop->impl_data; + struct testing_loop *testing_loop = (struct testing_loop *)aws_event_loop_get_impl(event_loop); aws_task_scheduler_schedule_future(&testing_loop->scheduler, task, run_at_nanos); } static void s_testing_loop_cancel_task(struct aws_event_loop *event_loop, struct aws_task *task) { - struct testing_loop *testing_loop = (struct testing_loop *)event_loop->impl_data; + struct testing_loop *testing_loop = (struct testing_loop *)aws_event_loop_get_impl(event_loop); aws_task_scheduler_cancel_task(&testing_loop->scheduler, task); } static bool s_testing_loop_is_on_callers_thread(struct aws_event_loop *event_loop) { - struct testing_loop *testing_loop = (struct testing_loop *)event_loop->impl_data; + struct testing_loop *testing_loop = (struct testing_loop *)aws_event_loop_get_impl(event_loop); return testing_loop->mock_on_callers_thread; } static void s_testing_loop_destroy(struct aws_event_loop *event_loop) { - struct testing_loop *testing_loop = (struct testing_loop *)event_loop->impl_data; + struct testing_loop *testing_loop = (struct testing_loop *)aws_event_loop_get_impl(event_loop); + struct aws_allocator *allocator = testing_loop->allocator; aws_task_scheduler_clean_up(&testing_loop->scheduler); - aws_mem_release(event_loop->alloc, testing_loop); + aws_mem_release(allocator, testing_loop); aws_event_loop_clean_up_base(event_loop); - aws_mem_release(event_loop->alloc, event_loop); + aws_mem_release(allocator, event_loop); } static struct aws_event_loop_vtable s_testing_loop_vtable = { @@ -76,18 +79,14 @@ static struct aws_event_loop_vtable s_testing_loop_vtable = { }; static struct aws_event_loop *s_testing_loop_new(struct aws_allocator *allocator, aws_io_clock_fn clock) { - struct aws_event_loop *event_loop = - (struct aws_event_loop *)aws_mem_acquire(allocator, sizeof(struct aws_event_loop)); - aws_event_loop_init_base(event_loop, allocator, clock); - struct testing_loop *testing_loop = (struct testing_loop *)aws_mem_calloc(allocator, 1, sizeof(struct testing_loop)); + aws_task_scheduler_init(&testing_loop->scheduler, allocator); testing_loop->mock_on_callers_thread = true; - event_loop->impl_data = testing_loop; - event_loop->vtable = &s_testing_loop_vtable; + testing_loop->allocator = allocator; - return event_loop; + return aws_event_loop_new_base(allocator, clock, &s_testing_loop_vtable, testing_loop); } typedef void(testing_channel_handler_on_shutdown_fn)( @@ -396,7 +395,7 @@ static inline int testing_channel_init( AWS_ZERO_STRUCT(*testing); testing->loop = s_testing_loop_new(allocator, options->clock_fn); - testing->loop_impl = (struct testing_loop *)testing->loop->impl_data; + testing->loop_impl = (struct testing_loop *)aws_event_loop_get_impl(testing->loop); struct aws_channel_options args = { .on_setup_completed = s_testing_channel_on_setup_completed, diff --git a/source/bsd/kqueue_event_loop.c b/source/bsd/kqueue_event_loop.c index 33a517e7b..e0f8ed63b 100644 --- a/source/bsd/kqueue_event_loop.c +++ b/source/bsd/kqueue_event_loop.c @@ -5,14 +5,14 @@ #include -#include - #include #include #include #include #include #include +#include +#include #if defined(__FreeBSD__) || defined(__NetBSD__) # define __BSD_VISIBLE 1 diff --git a/source/channel.c b/source/channel.c index 55903fc1d..3e3d33932 100644 --- a/source/channel.c +++ b/source/channel.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #ifdef _MSC_VER diff --git a/source/event_loop.c b/source/event_loop.c index 1e7aef676..2e0861964 100644 --- a/source/event_loop.c +++ b/source/event_loop.c @@ -5,6 +5,9 @@ #include +#include +#include + #include #include #include @@ -72,30 +75,32 @@ static void s_aws_event_loop_group_shutdown_async(struct aws_event_loop_group *e aws_thread_launch(&cleanup_thread, s_event_loop_destroy_async_thread_fn, el_group, &thread_options); } -static struct aws_event_loop_group *s_event_loop_group_new( - struct aws_allocator *alloc, - aws_io_clock_fn *clock, - uint16_t el_count, - uint16_t cpu_group, - bool pin_threads, +struct aws_event_loop_group *aws_event_loop_group_new_internal( + struct aws_allocator *allocator, + const struct aws_event_loop_group_options *options, aws_new_event_loop_fn *new_loop_fn, - void *new_loop_user_data, - const struct aws_shutdown_callback_options *shutdown_options) { - AWS_ASSERT(new_loop_fn); + void *new_loop_user_data) { + AWS_FATAL_ASSERT(new_loop_fn); + + aws_io_clock_fn *clock = options->clock_override; + if (!clock) { + clock = aws_high_res_clock_get_ticks; + } size_t group_cpu_count = 0; struct aws_cpu_info *usable_cpus = NULL; + bool pin_threads = options->cpu_group != NULL; if (pin_threads) { + uint16_t cpu_group = *options->cpu_group; group_cpu_count = aws_get_cpu_count_for_group(cpu_group); - if (!group_cpu_count) { + // LOG THIS aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } - usable_cpus = aws_mem_calloc(alloc, group_cpu_count, sizeof(struct aws_cpu_info)); - + usable_cpus = aws_mem_calloc(allocator, group_cpu_count, sizeof(struct aws_cpu_info)); if (usable_cpus == NULL) { return NULL; } @@ -103,16 +108,23 @@ static struct aws_event_loop_group *s_event_loop_group_new( aws_get_cpu_ids_for_group(cpu_group, usable_cpus, group_cpu_count); } - struct aws_event_loop_group *el_group = aws_mem_calloc(alloc, 1, sizeof(struct aws_event_loop_group)); + struct aws_event_loop_group *el_group = aws_mem_calloc(allocator, 1, sizeof(struct aws_event_loop_group)); if (el_group == NULL) { return NULL; } - el_group->allocator = alloc; + el_group->allocator = allocator; aws_ref_count_init( &el_group->ref_count, el_group, (aws_simple_completion_callback *)s_aws_event_loop_group_shutdown_async); - if (aws_array_list_init_dynamic(&el_group->event_loops, alloc, el_count, sizeof(struct aws_event_loop *))) { + uint16_t el_count = options->loop_count; + if (el_count == 0) { + uint16_t processor_count = (uint16_t)aws_system_info_processor_count(); + /* cut them in half to avoid using hyper threads for the IO work. */ + el_count = processor_count > 1 ? processor_count / 2 : processor_count; + } + + if (aws_array_list_init_dynamic(&el_group->event_loops, allocator, el_count, sizeof(struct aws_event_loop *))) { goto on_error; } @@ -121,7 +133,7 @@ static struct aws_event_loop_group *s_event_loop_group_new( if (!pin_threads || (i < group_cpu_count && !usable_cpus[i].suspected_hyper_thread)) { struct aws_thread_options thread_options = *aws_default_thread_options(); - struct aws_event_loop_options options = { + struct aws_event_loop_options el_options = { .clock = clock, .thread_options = &thread_options, }; @@ -138,8 +150,7 @@ static struct aws_event_loop_group *s_event_loop_group_new( } thread_options.name = aws_byte_cursor_from_c_str(thread_name); - struct aws_event_loop *loop = new_loop_fn(alloc, &options, new_loop_user_data); - + struct aws_event_loop *loop = new_loop_fn(allocator, &el_options, new_loop_user_data); if (!loop) { goto on_error; } @@ -155,12 +166,12 @@ static struct aws_event_loop_group *s_event_loop_group_new( } } - if (shutdown_options != NULL) { - el_group->shutdown_options = *shutdown_options; + if (options->shutdown_options != NULL) { + el_group->shutdown_options = *options->shutdown_options; } if (pin_threads) { - aws_mem_release(alloc, usable_cpus); + aws_mem_release(allocator, usable_cpus); } return el_group; @@ -169,7 +180,7 @@ on_error:; /* cache the error code to prevent any potential side effects */ int cached_error_code = aws_last_error(); - aws_mem_release(alloc, usable_cpus); + aws_mem_release(allocator, usable_cpus); s_aws_event_loop_group_shutdown_sync(el_group); s_event_loop_group_thread_exit(el_group); @@ -178,20 +189,6 @@ on_error:; return NULL; } -struct aws_event_loop_group *aws_event_loop_group_new( - struct aws_allocator *alloc, - aws_io_clock_fn *clock, - uint16_t el_count, - aws_new_event_loop_fn *new_loop_fn, - void *new_loop_user_data, - const struct aws_shutdown_callback_options *shutdown_options) { - - AWS_ASSERT(new_loop_fn); - AWS_ASSERT(el_count); - - return s_event_loop_group_new(alloc, clock, el_count, 0, false, new_loop_fn, new_loop_user_data, shutdown_options); -} - static struct aws_event_loop *s_default_new_event_loop( struct aws_allocator *allocator, const struct aws_event_loop_options *options, @@ -201,49 +198,11 @@ static struct aws_event_loop *s_default_new_event_loop( return aws_event_loop_new_default_with_options(allocator, options); } -struct aws_event_loop_group *aws_event_loop_group_new_default( - struct aws_allocator *alloc, - uint16_t max_threads, - const struct aws_shutdown_callback_options *shutdown_options) { - if (!max_threads) { - uint16_t processor_count = (uint16_t)aws_system_info_processor_count(); - /* cut them in half to avoid using hyper threads for the IO work. */ - max_threads = processor_count > 1 ? processor_count / 2 : processor_count; - } - - return aws_event_loop_group_new( - alloc, aws_high_res_clock_get_ticks, max_threads, s_default_new_event_loop, NULL, shutdown_options); -} - -struct aws_event_loop_group *aws_event_loop_group_new_pinned_to_cpu_group( - struct aws_allocator *alloc, - aws_io_clock_fn *clock, - uint16_t el_count, - uint16_t cpu_group, - aws_new_event_loop_fn *new_loop_fn, - void *new_loop_user_data, - const struct aws_shutdown_callback_options *shutdown_options) { - AWS_ASSERT(new_loop_fn); - AWS_ASSERT(el_count); - - return s_event_loop_group_new( - alloc, clock, el_count, cpu_group, true, new_loop_fn, new_loop_user_data, shutdown_options); -} - -struct aws_event_loop_group *aws_event_loop_group_new_default_pinned_to_cpu_group( - struct aws_allocator *alloc, - uint16_t max_threads, - uint16_t cpu_group, - const struct aws_shutdown_callback_options *shutdown_options) { - - if (!max_threads) { - uint16_t processor_count = (uint16_t)aws_system_info_processor_count(); - /* cut them in half to avoid using hyper threads for the IO work. */ - max_threads = processor_count > 1 ? processor_count / 2 : processor_count; - } +struct aws_event_loop_group *aws_event_loop_group_new( + struct aws_allocator *allocator, + const struct aws_event_loop_group_options *options) { - return aws_event_loop_group_new_pinned_to_cpu_group( - alloc, aws_high_res_clock_get_ticks, max_threads, cpu_group, s_default_new_event_loop, NULL, shutdown_options); + return aws_event_loop_group_new_internal(allocator, options, s_default_new_event_loop, NULL); } struct aws_event_loop_group *aws_event_loop_group_acquire(struct aws_event_loop_group *el_group) { @@ -260,7 +219,7 @@ void aws_event_loop_group_release(struct aws_event_loop_group *el_group) { } } -size_t aws_event_loop_group_get_loop_count(struct aws_event_loop_group *el_group) { +size_t aws_event_loop_group_get_loop_count(const struct aws_event_loop_group *el_group) { return aws_array_list_length(&el_group->event_loops); } @@ -524,7 +483,52 @@ bool aws_event_loop_thread_is_callers_thread(struct aws_event_loop *event_loop) return event_loop->vtable->is_on_callers_thread(event_loop); } -int aws_event_loop_current_clock_time(struct aws_event_loop *event_loop, uint64_t *time_nanos) { +int aws_event_loop_current_clock_time(const struct aws_event_loop *event_loop, uint64_t *time_nanos) { AWS_ASSERT(event_loop->clock); return event_loop->clock(time_nanos); } + +struct aws_event_loop_group *aws_event_loop_group_new_default( + struct aws_allocator *alloc, + uint16_t max_threads, + const struct aws_shutdown_callback_options *shutdown_options) { + + struct aws_event_loop_group_options elg_options = { + .loop_count = max_threads, + .shutdown_options = shutdown_options, + }; + + return aws_event_loop_group_new(alloc, &elg_options); +} + +struct aws_event_loop_group *aws_event_loop_group_new_default_pinned_to_cpu_group( + struct aws_allocator *alloc, + uint16_t max_threads, + uint16_t cpu_group, + const struct aws_shutdown_callback_options *shutdown_options) { + + struct aws_event_loop_group_options elg_options = { + .loop_count = max_threads, + .shutdown_options = shutdown_options, + .cpu_group = &cpu_group, + }; + + return aws_event_loop_group_new(alloc, &elg_options); +} + +void *aws_event_loop_get_impl(struct aws_event_loop *event_loop) { + return event_loop->impl_data; +} + +struct aws_event_loop *aws_event_loop_new_base( + struct aws_allocator *allocator, + aws_io_clock_fn *clock, + struct aws_event_loop_vtable *vtable, + void *impl) { + struct aws_event_loop *event_loop = aws_mem_acquire(allocator, sizeof(struct aws_event_loop)); + aws_event_loop_init_base(event_loop, allocator, clock); + event_loop->impl_data = impl; + event_loop->vtable = vtable; + + return event_loop; +} diff --git a/source/exponential_backoff_retry_strategy.c b/source/exponential_backoff_retry_strategy.c index cf2472269..2110cbd46 100644 --- a/source/exponential_backoff_retry_strategy.c +++ b/source/exponential_backoff_retry_strategy.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -55,7 +56,7 @@ static void s_exponential_retry_destroy(struct aws_retry_strategy *retry_strateg if (completion_callback != NULL) { completion_callback(completion_user_data); } - aws_ref_count_release(&el_group->ref_count); + aws_event_loop_group_release(el_group); } } @@ -361,7 +362,7 @@ struct aws_retry_strategy *aws_retry_strategy_new_exponential_backoff( aws_atomic_init_int(&exponential_backoff_strategy->base.ref_count, 1); exponential_backoff_strategy->config = *config; exponential_backoff_strategy->config.el_group = - aws_ref_count_acquire(&exponential_backoff_strategy->config.el_group->ref_count); + aws_event_loop_group_acquire(exponential_backoff_strategy->config.el_group); if (!exponential_backoff_strategy->config.generate_random && !exponential_backoff_strategy->config.generate_random_impl) { diff --git a/source/linux/epoll_event_loop.c b/source/linux/epoll_event_loop.c index 094a7836a..a99d5a8cf 100644 --- a/source/linux/epoll_event_loop.c +++ b/source/linux/epoll_event_loop.c @@ -3,17 +3,16 @@ * SPDX-License-Identifier: Apache-2.0. */ -#include - #include #include #include #include #include #include -#include - +#include #include +#include +#include #include diff --git a/source/posix/pipe.c b/source/posix/pipe.c index f727b021c..449ab1318 100644 --- a/source/posix/pipe.c +++ b/source/posix/pipe.c @@ -6,6 +6,7 @@ #include #include +#include #ifdef __GLIBC__ # define __USE_GNU diff --git a/source/posix/socket.c b/source/posix/socket.c index cdd69d2d0..49e18f47e 100644 --- a/source/posix/socket.c +++ b/source/posix/socket.c @@ -13,6 +13,7 @@ #include #include +#include #include #include diff --git a/source/s2n/s2n_tls_channel_handler.c b/source/s2n/s2n_tls_channel_handler.c index e9560608e..8326543e8 100644 --- a/source/s2n/s2n_tls_channel_handler.c +++ b/source/s2n/s2n_tls_channel_handler.c @@ -5,21 +5,20 @@ #include #include +#include #include - +#include +#include +#include #include #include #include #include +#include #include #include #include -#include -#include -#include -#include - #include #ifdef AWS_S2N_INSOURCE_PATH # include diff --git a/source/windows/iocp/iocp_event_loop.c b/source/windows/iocp/iocp_event_loop.c index 313344ab9..1d0801e4b 100644 --- a/source/windows/iocp/iocp_event_loop.c +++ b/source/windows/iocp/iocp_event_loop.c @@ -11,6 +11,7 @@ #include #include +#include #include diff --git a/source/windows/iocp/pipe.c b/source/windows/iocp/pipe.c index 04145c679..a9e2185e5 100644 --- a/source/windows/iocp/pipe.c +++ b/source/windows/iocp/pipe.c @@ -7,6 +7,7 @@ #include #include +#include #include #include diff --git a/source/windows/iocp/socket.c b/source/windows/iocp/socket.c index 755950f0c..7286bd6ba 100644 --- a/source/windows/iocp/socket.c +++ b/source/windows/iocp/socket.c @@ -26,6 +26,7 @@ below, clang-format doesn't work (at least on my version) with the c-style comme #include #include #include +#include #include #include diff --git a/tests/alpn_handler_test.c b/tests/alpn_handler_test.c index 5d83bad4e..fa6d88e27 100644 --- a/tests/alpn_handler_test.c +++ b/tests/alpn_handler_test.c @@ -5,6 +5,7 @@ #include #include +#include #include #include diff --git a/tests/byo_crypto_test.c b/tests/byo_crypto_test.c index 878889646..1414f8652 100644 --- a/tests/byo_crypto_test.c +++ b/tests/byo_crypto_test.c @@ -54,7 +54,11 @@ static struct byo_crypto_common_tester c_tester; static int s_byo_crypto_common_tester_init(struct aws_allocator *allocator, struct byo_crypto_common_tester *tester) { AWS_ZERO_STRUCT(*tester); aws_io_library_init(allocator); - tester->el_group = aws_event_loop_group_new_default(allocator, 0, NULL); + + struct aws_event_loop_group_options elg_options = { + .loop_count = 0, + }; + tester->el_group = aws_event_loop_group_new(allocator, &elg_options); struct aws_mutex mutex = AWS_MUTEX_INIT; struct aws_condition_variable condition_variable = AWS_CONDITION_VARIABLE_INIT; tester->mutex = mutex; diff --git a/tests/channel_test.c b/tests/channel_test.c index 9a730a351..8fc530f99 100644 --- a/tests/channel_test.c +++ b/tests/channel_test.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -684,7 +685,10 @@ static int s_test_channel_connect_some_hosts_timeout(struct aws_allocator *alloc .shutdown = false, }; - struct aws_event_loop_group *event_loop_group = aws_event_loop_group_new_default(allocator, 1, NULL); + struct aws_event_loop_group_options elg_options = { + .loop_count = 1, + }; + struct aws_event_loop_group *event_loop_group = aws_event_loop_group_new(allocator, &elg_options); /* resolve our s3 test bucket and an EC2 host with an ACL that blackholes the connection */ const struct aws_string *addr1_ipv4 = NULL; diff --git a/tests/default_host_resolver_test.c b/tests/default_host_resolver_test.c index 2d0178a73..f47b346bf 100644 --- a/tests/default_host_resolver_test.c +++ b/tests/default_host_resolver_test.c @@ -96,7 +96,10 @@ static int s_test_default_with_ipv6_lookup_fn(struct aws_allocator *allocator, v aws_io_library_init(allocator); - struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); + struct aws_event_loop_group_options elg_options = { + .loop_count = 1, + }; + struct aws_event_loop_group *el_group = aws_event_loop_group_new(allocator, &elg_options); struct aws_host_resolver_default_options resolver_options = { .el_group = el_group, @@ -189,7 +192,10 @@ static int s_test_default_host_resolver_ipv6_address_variations_fn(struct aws_al }; - struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); + struct aws_event_loop_group_options elg_options = { + .loop_count = 1, + }; + struct aws_event_loop_group *el_group = aws_event_loop_group_new(allocator, &elg_options); struct aws_host_resolver_default_options resolver_options = { .el_group = el_group, @@ -263,7 +269,10 @@ static int s_test_default_with_ipv4_only_lookup_fn(struct aws_allocator *allocat aws_io_library_init(allocator); - struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); + struct aws_event_loop_group_options elg_options = { + .loop_count = 1, + }; + struct aws_event_loop_group *el_group = aws_event_loop_group_new(allocator, &elg_options); struct aws_host_resolver_default_options resolver_options = { .el_group = el_group, @@ -333,7 +342,10 @@ static int s_test_default_with_multiple_lookups_fn(struct aws_allocator *allocat aws_io_library_init(allocator); - struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); + struct aws_event_loop_group_options elg_options = { + .loop_count = 1, + }; + struct aws_event_loop_group *el_group = aws_event_loop_group_new(allocator, &elg_options); struct aws_host_resolver_default_options resolver_options = { .el_group = el_group, @@ -460,7 +472,10 @@ static int s_test_resolver_ttls_fn(struct aws_allocator *allocator, void *ctx) { s_set_time(0); - struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); + struct aws_event_loop_group_options elg_options = { + .loop_count = 1, + }; + struct aws_event_loop_group *el_group = aws_event_loop_group_new(allocator, &elg_options); struct aws_host_resolver_default_options resolver_options = { .el_group = el_group, .max_entries = 10, .system_clock_override_fn = s_clock_fn}; @@ -672,7 +687,10 @@ static int s_test_resolver_connect_failure_recording_fn(struct aws_allocator *al aws_io_library_init(allocator); - struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); + struct aws_event_loop_group_options elg_options = { + .loop_count = 1, + }; + struct aws_event_loop_group *el_group = aws_event_loop_group_new(allocator, &elg_options); struct aws_host_resolver_default_options resolver_options = { .el_group = el_group, @@ -864,7 +882,10 @@ static int s_test_resolver_ttl_refreshes_on_resolve_fn(struct aws_allocator *all aws_io_library_init(allocator); - struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); + struct aws_event_loop_group_options elg_options = { + .loop_count = 1, + }; + struct aws_event_loop_group *el_group = aws_event_loop_group_new(allocator, &elg_options); struct aws_host_resolver_default_options resolver_options = { .el_group = el_group, @@ -1044,7 +1065,10 @@ static int s_test_resolver_ipv4_address_lookup_fn(struct aws_allocator *allocato aws_io_library_init(allocator); - struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); + struct aws_event_loop_group_options elg_options = { + .loop_count = 1, + }; + struct aws_event_loop_group *el_group = aws_event_loop_group_new(allocator, &elg_options); struct aws_host_resolver_default_options resolver_options = { .el_group = el_group, @@ -1105,7 +1129,10 @@ static int s_test_resolver_purge_host_cache(struct aws_allocator *allocator, voi (void)ctx; aws_io_library_init(allocator); - struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); + struct aws_event_loop_group_options elg_options = { + .loop_count = 1, + }; + struct aws_event_loop_group *el_group = aws_event_loop_group_new(allocator, &elg_options); struct aws_host_resolver_default_options resolver_options = { .el_group = el_group, @@ -1220,7 +1247,10 @@ static int s_test_resolver_purge_cache(struct aws_allocator *allocator, void *ct (void)ctx; aws_io_library_init(allocator); - struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); + struct aws_event_loop_group_options elg_options = { + .loop_count = 1, + }; + struct aws_event_loop_group *el_group = aws_event_loop_group_new(allocator, &elg_options); struct aws_host_resolver_default_options resolver_options = { .el_group = el_group, @@ -1369,7 +1399,10 @@ static int s_test_resolver_ipv6_address_lookup_fn(struct aws_allocator *allocato aws_io_library_init(allocator); - struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); + struct aws_event_loop_group_options elg_options = { + .loop_count = 1, + }; + struct aws_event_loop_group *el_group = aws_event_loop_group_new(allocator, &elg_options); struct aws_host_resolver_default_options resolver_options = { .el_group = el_group, @@ -1431,7 +1464,10 @@ static int s_test_resolver_low_frequency_starvation_fn(struct aws_allocator *all aws_io_library_init(allocator); - struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); + struct aws_event_loop_group_options elg_options = { + .loop_count = 1, + }; + struct aws_event_loop_group *el_group = aws_event_loop_group_new(allocator, &elg_options); struct aws_host_resolver_default_options resolver_options = { .el_group = el_group, diff --git a/tests/event_loop_test.c b/tests/event_loop_test.c index e86448c8b..92b739156 100644 --- a/tests/event_loop_test.c +++ b/tests/event_loop_test.c @@ -8,9 +8,9 @@ #include #include #include -#include - #include +#include +#include #include struct task_args { @@ -1041,7 +1041,10 @@ static int test_event_loop_group_setup_and_shutdown(struct aws_allocator *alloca (void)ctx; aws_io_library_init(allocator); - struct aws_event_loop_group *event_loop_group = aws_event_loop_group_new_default(allocator, 0, NULL); + struct aws_event_loop_group_options elg_options = { + .loop_count = 0, + }; + struct aws_event_loop_group *event_loop_group = aws_event_loop_group_new(allocator, &elg_options); size_t cpu_count = aws_system_info_processor_count(); size_t el_count = aws_event_loop_group_get_loop_count(event_loop_group); @@ -1074,10 +1077,16 @@ static int test_numa_aware_event_loop_group_setup_and_shutdown(struct aws_alloca size_t cpus_for_group = aws_get_cpu_count_for_group(0); size_t el_count = 1; - /* pass UINT16_MAX here to check the boundary conditions on numa cpu detection. It should never create more threads - * than hw cpus available */ - struct aws_event_loop_group *event_loop_group = - aws_event_loop_group_new_default_pinned_to_cpu_group(allocator, UINT16_MAX, 0, NULL); + uint16_t cpu_group = 0; + struct aws_event_loop_group_options elg_options = { + /* + * pass UINT16_MAX here to check the boundary conditions on numa cpu detection. It should never create more + * threads than hw cpus available + */ + .loop_count = UINT16_MAX, + .cpu_group = &cpu_group, + }; + struct aws_event_loop_group *event_loop_group = aws_event_loop_group_new(allocator, &elg_options); el_count = aws_event_loop_group_get_loop_count(event_loop_group); @@ -1154,8 +1163,12 @@ static int test_event_loop_group_setup_and_shutdown_async(struct aws_allocator * async_shutdown_options.shutdown_callback_user_data = &task_args; async_shutdown_options.shutdown_callback_fn = s_async_shutdown_complete_callback; - struct aws_event_loop_group *event_loop_group = - aws_event_loop_group_new_default(allocator, 0, &async_shutdown_options); + struct aws_event_loop_group_options elg_options = { + .loop_count = 0, + .shutdown_options = &async_shutdown_options, + }; + + struct aws_event_loop_group *event_loop_group = aws_event_loop_group_new(allocator, &elg_options); struct aws_event_loop *event_loop = aws_event_loop_group_get_next_loop(event_loop_group); diff --git a/tests/exponential_backoff_retry_test.c b/tests/exponential_backoff_retry_test.c index a3bf7bde0..779a4f50f 100644 --- a/tests/exponential_backoff_retry_test.c +++ b/tests/exponential_backoff_retry_test.c @@ -66,7 +66,10 @@ static int s_test_exponential_backoff_retry_too_many_retries_for_jitter_mode( aws_io_library_init(allocator); - struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); + struct aws_event_loop_group_options elg_options = { + .loop_count = 1, + }; + struct aws_event_loop_group *el_group = aws_event_loop_group_new(allocator, &elg_options); struct aws_exponential_backoff_retry_options config = { .max_retries = 3, .jitter_mode = jitter_mode, @@ -157,7 +160,10 @@ static int s_test_exponential_backoff_retry_client_errors_do_not_count_fn(struct aws_io_library_init(allocator); - struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); + struct aws_event_loop_group_options elg_options = { + .loop_count = 1, + }; + struct aws_event_loop_group *el_group = aws_event_loop_group_new(allocator, &elg_options); struct aws_exponential_backoff_retry_options config = { .el_group = el_group, .max_retries = 3, @@ -201,7 +207,10 @@ static int s_test_exponential_backoff_retry_no_jitter_time_taken_fn(struct aws_a aws_io_library_init(allocator); - struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); + struct aws_event_loop_group_options elg_options = { + .loop_count = 1, + }; + struct aws_event_loop_group *el_group = aws_event_loop_group_new(allocator, &elg_options); struct aws_exponential_backoff_retry_options config = { .max_retries = 3, .jitter_mode = AWS_EXPONENTIAL_BACKOFF_JITTER_NONE, @@ -253,7 +262,10 @@ static int s_test_exponential_max_backoff_retry_no_jitter_fn(struct aws_allocato aws_io_library_init(allocator); - struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); + struct aws_event_loop_group_options elg_options = { + .loop_count = 1, + }; + struct aws_event_loop_group *el_group = aws_event_loop_group_new(allocator, &elg_options); struct aws_exponential_backoff_retry_options config = { .max_retries = 3, .jitter_mode = AWS_EXPONENTIAL_BACKOFF_JITTER_NONE, @@ -310,7 +322,10 @@ static int s_test_exponential_backoff_retry_invalid_options_fn(struct aws_alloca aws_io_library_init(allocator); - struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); + struct aws_event_loop_group_options elg_options = { + .loop_count = 1, + }; + struct aws_event_loop_group *el_group = aws_event_loop_group_new(allocator, &elg_options); struct aws_exponential_backoff_retry_options config = { .max_retries = 64, .el_group = el_group, diff --git a/tests/future_test.c b/tests/future_test.c index 1ac94b551..795d30bb5 100644 --- a/tests/future_test.c +++ b/tests/future_test.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include "future_test.h" diff --git a/tests/pipe_test.c b/tests/pipe_test.c index 053c5aefd..f15f4da33 100644 --- a/tests/pipe_test.c +++ b/tests/pipe_test.c @@ -8,6 +8,7 @@ #include #include #include +#include #include enum pipe_loop_setup { diff --git a/tests/pkcs11_test.c b/tests/pkcs11_test.c index 792ed5fa4..4af9d0fb0 100644 --- a/tests/pkcs11_test.c +++ b/tests/pkcs11_test.c @@ -1653,8 +1653,10 @@ static int s_test_pkcs11_tls_negotiation_succeeds_common( ASSERT_SUCCESS(aws_mutex_init(&s_tls_tester.synced.mutex)); ASSERT_SUCCESS(aws_condition_variable_init(&s_tls_tester.synced.cvar)); - struct aws_event_loop_group *event_loop_group = - aws_event_loop_group_new_default(allocator, 1, NULL /*shutdown_opts*/); + struct aws_event_loop_group_options elg_options = { + .loop_count = 1, + }; + struct aws_event_loop_group *event_loop_group = aws_event_loop_group_new(allocator, &elg_options); ASSERT_NOT_NULL(event_loop_group); struct aws_host_resolver_default_options resolver_opts = { diff --git a/tests/socket_handler_test.c b/tests/socket_handler_test.c index 85c7c7c39..ee7290d4e 100644 --- a/tests/socket_handler_test.c +++ b/tests/socket_handler_test.c @@ -4,6 +4,7 @@ */ #include #include +#include #include #include #include @@ -59,7 +60,10 @@ static int s_socket_common_tester_init(struct aws_allocator *allocator, struct s AWS_ZERO_STRUCT(*tester); aws_io_library_init(allocator); - tester->el_group = aws_event_loop_group_new_default(allocator, 0, NULL); + struct aws_event_loop_group_options elg_options = { + .loop_count = 0, + }; + tester->el_group = aws_event_loop_group_new(allocator, &elg_options); struct aws_host_resolver_default_options resolver_options = { .el_group = tester->el_group, @@ -1006,8 +1010,13 @@ static int s_socket_common_tester_statistics_init( aws_io_library_init(allocator); AWS_ZERO_STRUCT(*tester); - tester->el_group = - aws_event_loop_group_new(allocator, s_statistic_test_clock_fn, 1, s_default_new_event_loop, NULL, NULL); + + struct aws_event_loop_group_options elg_options = { + .loop_count = 1, + .clock_override = s_statistic_test_clock_fn, + }; + tester->el_group = aws_event_loop_group_new_internal(allocator, &elg_options, s_default_new_event_loop, NULL); + struct aws_mutex mutex = AWS_MUTEX_INIT; struct aws_condition_variable condition_variable = AWS_CONDITION_VARIABLE_INIT; tester->mutex = mutex; diff --git a/tests/socket_test.c b/tests/socket_test.c index 097d6ef66..e01834a75 100644 --- a/tests/socket_test.c +++ b/tests/socket_test.c @@ -12,6 +12,7 @@ #include #include +#include #include #ifdef _MSC_VER @@ -624,7 +625,10 @@ static int s_test_connect_timeout(struct aws_allocator *allocator, void *ctx) { aws_io_library_init(allocator); - struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); + struct aws_event_loop_group_options elg_options = { + .loop_count = 1, + }; + struct aws_event_loop_group *el_group = aws_event_loop_group_new(allocator, &elg_options); struct aws_event_loop *event_loop = aws_event_loop_group_get_next_loop(el_group); ASSERT_NOT_NULL(event_loop, "Event loop creation failed with error: %s", aws_error_debug_str(aws_last_error())); @@ -704,7 +708,10 @@ static int s_test_connect_timeout_cancelation(struct aws_allocator *allocator, v aws_io_library_init(allocator); - struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); + struct aws_event_loop_group_options elg_options = { + .loop_count = 1, + }; + struct aws_event_loop_group *el_group = aws_event_loop_group_new(allocator, &elg_options); struct aws_event_loop *event_loop = aws_event_loop_group_get_next_loop(el_group); ASSERT_NOT_NULL(event_loop, "Event loop creation failed with error: %s", aws_error_debug_str(aws_last_error())); @@ -1147,7 +1154,10 @@ static int s_cleanup_before_connect_or_timeout_doesnt_explode(struct aws_allocat aws_io_library_init(allocator); - struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); + struct aws_event_loop_group_options elg_options = { + .loop_count = 1, + }; + struct aws_event_loop_group *el_group = aws_event_loop_group_new(allocator, &elg_options); struct aws_event_loop *event_loop = aws_event_loop_group_get_next_loop(el_group); ASSERT_NOT_NULL(event_loop, "Event loop creation failed with error: %s", aws_error_debug_str(aws_last_error())); diff --git a/tests/standard_retry_test.c b/tests/standard_retry_test.c index bb62de691..11991a3e0 100644 --- a/tests/standard_retry_test.c +++ b/tests/standard_retry_test.c @@ -8,6 +8,7 @@ #include #include +#include #include @@ -49,7 +50,12 @@ static int s_fixture_setup(struct aws_allocator *allocator, void *ctx) { .shutdown_callback_user_data = ctx, }; - test_data->el_group = aws_event_loop_group_new_default(allocator, 1, &shutdown_options); + struct aws_event_loop_group_options elg_options = { + .loop_count = 1, + .shutdown_options = &shutdown_options, + }; + test_data->el_group = aws_event_loop_group_new(allocator, &elg_options); + ASSERT_NOT_NULL(test_data->el_group); struct aws_standard_retry_options retry_options = { .initial_bucket_capacity = 15, diff --git a/tests/tls_handler_test.c b/tests/tls_handler_test.c index a55d26077..7b1a68c32 100644 --- a/tests/tls_handler_test.c +++ b/tests/tls_handler_test.c @@ -10,6 +10,7 @@ # include # include # include +# include # include # include @@ -177,7 +178,10 @@ static int s_tls_common_tester_init(struct aws_allocator *allocator, struct tls_ aws_atomic_store_int(&tester->current_time_ns, 0); aws_atomic_store_ptr(&tester->stats_handler, NULL); - tester->el_group = aws_event_loop_group_new_default(allocator, 0, NULL); + struct aws_event_loop_group_options elg_options = { + .loop_count = 0, + }; + tester->el_group = aws_event_loop_group_new(allocator, &elg_options); struct aws_host_resolver_default_options resolver_options = { .el_group = tester->el_group, @@ -532,7 +536,11 @@ static int s_tls_channel_server_client_tester_init(struct aws_allocator *allocat AWS_ZERO_STRUCT(s_server_client_tester); ASSERT_SUCCESS(aws_mutex_init(&s_server_client_tester.server_mutex)); ASSERT_SUCCESS(aws_condition_variable_init(&s_server_client_tester.server_condition_variable)); - s_server_client_tester.client_el_group = aws_event_loop_group_new_default(allocator, 0, NULL); + + struct aws_event_loop_group_options elg_options = { + .loop_count = 0, + }; + s_server_client_tester.client_el_group = aws_event_loop_group_new(allocator, &elg_options); ASSERT_SUCCESS(s_tls_rw_args_init( &s_server_client_tester.server_rw_args, @@ -1904,8 +1912,11 @@ static int s_tls_common_tester_statistics_init(struct aws_allocator *allocator, aws_atomic_store_int(&tester->current_time_ns, 0); aws_atomic_store_ptr(&tester->stats_handler, NULL); - tester->el_group = - aws_event_loop_group_new(allocator, s_statistic_test_clock_fn, 1, s_default_new_event_loop, NULL, NULL); + struct aws_event_loop_group_options elg_options = { + .loop_count = 1, + .clock_override = s_statistic_test_clock_fn, + }; + tester->el_group = aws_event_loop_group_new_internal(allocator, &elg_options, s_default_new_event_loop, NULL); struct aws_host_resolver_default_options resolver_options = { .el_group = tester->el_group,