From 852014d35fdb543649fe10384f5360be0466cfe0 Mon Sep 17 00:00:00 2001 From: Justin Boswell Date: Thu, 17 Oct 2019 14:54:00 -0700 Subject: [PATCH] Imported ring buffer allocator from node CRT (#527) * Imported ring buffer allocator from node, fixed bugs, added unit test * Implemented realloc as an assert to prevent use --- include/aws/common/ring_buffer.h | 11 ++++++ source/ring_buffer.c | 61 ++++++++++++++++++++++++++++++++ tests/CMakeLists.txt | 1 + tests/ring_buffer_test.c | 37 +++++++++++++++++++ 4 files changed, 110 insertions(+) diff --git a/include/aws/common/ring_buffer.h b/include/aws/common/ring_buffer.h index 9b63e8e34..64544d74d 100644 --- a/include/aws/common/ring_buffer.h +++ b/include/aws/common/ring_buffer.h @@ -101,6 +101,17 @@ AWS_COMMON_API bool aws_ring_buffer_buf_belongs_to_pool( const struct aws_ring_buffer *ring_buffer, const struct aws_byte_buf *buf); +/** + * Initializes the supplied allocator to be based on the provided ring buffer. Allocations must be allocated + * and freed in the same order, or the ring buffer will assert. + */ +AWS_COMMON_API int aws_ring_buffer_allocator_init(struct aws_allocator *allocator, struct aws_ring_buffer *ring_buffer); + +/** + * Cleans up a ring buffer allocator instance. Does not clean up the ring buffer. + */ +AWS_COMMON_API void aws_ring_buffer_allocator_clean_up(struct aws_allocator *allocator); + #ifndef AWS_NO_STATIC_IMPL # include #endif /* AWS_NO_STATIC_IMPL */ diff --git a/source/ring_buffer.c b/source/ring_buffer.c index 1d750304e..ff649f2d7 100644 --- a/source/ring_buffer.c +++ b/source/ring_buffer.c @@ -268,3 +268,64 @@ bool aws_ring_buffer_buf_belongs_to_pool(const struct aws_ring_buffer *ring_buff AWS_POSTCONDITION(aws_byte_buf_is_valid(buf)); return rval; } + +/* Ring buffer allocator implementation */ +static void *s_ring_buffer_mem_acquire(struct aws_allocator *allocator, size_t size) { + struct aws_ring_buffer *buffer = allocator->impl; + struct aws_byte_buf buf; + AWS_ZERO_STRUCT(buf); + /* allocate extra space for the size */ + if (aws_ring_buffer_acquire(buffer, size + sizeof(size_t), &buf)) { + return NULL; + } + /* store the size ahead of the allocation */ + *((size_t *)buf.buffer) = buf.capacity; + return buf.buffer + sizeof(size_t); +} + +static void s_ring_buffer_mem_release(struct aws_allocator *allocator, void *ptr) { + /* back up to where the size is stored */ + const void *addr = ((uint8_t *)ptr - sizeof(size_t)); + const size_t size = *((size_t *)addr); + + struct aws_byte_buf buf = aws_byte_buf_from_array(addr, size); + buf.allocator = allocator; + + struct aws_ring_buffer *buffer = allocator->impl; + aws_ring_buffer_release(buffer, &buf); +} + +static void *s_ring_buffer_mem_calloc(struct aws_allocator *allocator, size_t num, size_t size) { + void *mem = s_ring_buffer_mem_acquire(allocator, num * size); + if (!mem) { + return NULL; + } + memset(mem, 0, num * size); + return mem; +} + +static void *s_ring_buffer_mem_realloc(struct aws_allocator *allocator, void *ptr, size_t old_size, size_t new_size) { + (void)allocator; + (void)ptr; + (void)old_size; + (void)new_size; + AWS_FATAL_ASSERT(!"ring_buffer_allocator does not support realloc, as it breaks allocation ordering"); + return NULL; +} + +int aws_ring_buffer_allocator_init(struct aws_allocator *allocator, struct aws_ring_buffer *ring_buffer) { + if (allocator == NULL || ring_buffer == NULL) { + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + } + + allocator->impl = ring_buffer; + allocator->mem_acquire = s_ring_buffer_mem_acquire; + allocator->mem_release = s_ring_buffer_mem_release; + allocator->mem_calloc = s_ring_buffer_mem_calloc; + allocator->mem_realloc = s_ring_buffer_mem_realloc; + return AWS_OP_SUCCESS; +} + +void aws_ring_buffer_allocator_clean_up(struct aws_allocator *allocator) { + AWS_ZERO_STRUCT(*allocator); +} diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 9f6b97789..ee24db97d 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -335,6 +335,7 @@ add_test_case(ring_buffer_acquire_up_to_test) add_test_case(ring_buffer_acquire_tail_always_chases_head_test) add_test_case(ring_buffer_acquire_multi_threaded_test) add_test_case(ring_buffer_acquire_up_to_multi_threaded_test) +add_test_case(ring_buffer_allocator_test) add_test_case(test_logging_filter_at_AWS_LL_NONE_s_logf_all_levels) add_test_case(test_logging_filter_at_AWS_LL_FATAL_s_logf_all_levels) diff --git a/tests/ring_buffer_test.c b/tests/ring_buffer_test.c index f6562f6c7..d2ecd9034 100644 --- a/tests/ring_buffer_test.c +++ b/tests/ring_buffer_test.c @@ -398,3 +398,40 @@ static int s_test_acquire_up_to_multi_threaded(struct aws_allocator *allocator, } AWS_TEST_CASE(ring_buffer_acquire_up_to_multi_threaded_test, s_test_acquire_up_to_multi_threaded) + +#define RING_BUFFER_ALLOCATOR_CAPACITY (16 * 1024) + +static int s_test_ring_buffer_allocator(struct aws_allocator *allocator, void *ctx) { + (void)ctx; + struct aws_ring_buffer ring_buffer; + struct aws_allocator rb_allocator; + ASSERT_SUCCESS(aws_ring_buffer_init(&ring_buffer, allocator, RING_BUFFER_ALLOCATOR_CAPACITY)); + ASSERT_SUCCESS(aws_ring_buffer_allocator_init(&rb_allocator, &ring_buffer)); + + for (int cycles = 0; cycles < 10; ++cycles) { + size_t total_size = 0; + const size_t chunk_size = (cycles + 1) * 16; + int chunk_count = 0; + AWS_VARIABLE_LENGTH_ARRAY(void *, chunks, RING_BUFFER_ALLOCATOR_CAPACITY / (16 + sizeof(size_t))); + while ((total_size + chunk_size) <= RING_BUFFER_ALLOCATOR_CAPACITY) { + void *chunk = aws_mem_calloc(&rb_allocator, 1, chunk_size); + if (chunk == NULL) { + ASSERT_TRUE(chunk_count > 0); + break; + } + chunks[chunk_count++] = chunk; + total_size += chunk_size; + } + + for (int idx = 0; idx < chunk_count; ++idx) { + aws_mem_release(&rb_allocator, chunks[idx]); + } + } + + aws_ring_buffer_allocator_clean_up(&rb_allocator); + aws_ring_buffer_clean_up(&ring_buffer); + + return AWS_OP_SUCCESS; +} + +AWS_TEST_CASE(ring_buffer_allocator_test, s_test_ring_buffer_allocator);