Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Imported ring buffer allocator from node CRT #527

Merged
merged 8 commits into from
Oct 17, 2019
Merged
Show file tree
Hide file tree
Changes from 7 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 11 additions & 0 deletions include/aws/common/ring_buffer.h
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,17 @@ AWS_COMMON_API bool aws_ring_buffer_buf_belongs_to_pool(
const struct aws_ring_buffer *ring_buffer,
const struct aws_byte_buf *buf);

/**
* Initializes the supplied allocator to be based on the provided ring buffer. Allocations must be allocated
* and freed in the same order, or the ring buffer will assert.
*/
AWS_COMMON_API int aws_ring_buffer_allocator_init(struct aws_allocator *allocator, struct aws_ring_buffer *ring_buffer);

/**
* Cleans up a ring buffer allocator instance. Does not clean up the ring buffer.
*/
AWS_COMMON_API void aws_ring_buffer_allocator_clean_up(struct aws_allocator *allocator);

#ifndef AWS_NO_STATIC_IMPL
# include <aws/common/ring_buffer.inl>
#endif /* AWS_NO_STATIC_IMPL */
Expand Down
61 changes: 61 additions & 0 deletions source/ring_buffer.c
Original file line number Diff line number Diff line change
Expand Up @@ -268,3 +268,64 @@ bool aws_ring_buffer_buf_belongs_to_pool(const struct aws_ring_buffer *ring_buff
AWS_POSTCONDITION(aws_byte_buf_is_valid(buf));
return rval;
}

/* Ring buffer allocator implementation */
static void *s_ring_buffer_mem_acquire(struct aws_allocator *allocator, size_t size) {
struct aws_ring_buffer *buffer = allocator->impl;
struct aws_byte_buf buf;
AWS_ZERO_STRUCT(buf);
/* allocate extra space for the size */
if (aws_ring_buffer_acquire(buffer, size + sizeof(size_t), &buf)) {
return NULL;
}
/* store the size ahead of the allocation */
*((size_t *)buf.buffer) = buf.capacity;
return buf.buffer + sizeof(size_t);
}

static void s_ring_buffer_mem_release(struct aws_allocator *allocator, void *ptr) {
/* back up to where the size is stored */
const void *addr = ((uint8_t *)ptr - sizeof(size_t));
const size_t size = *((size_t *)addr);

struct aws_byte_buf buf = aws_byte_buf_from_array(addr, size);
buf.allocator = allocator;

struct aws_ring_buffer *buffer = allocator->impl;
aws_ring_buffer_release(buffer, &buf);
}

static void *s_ring_buffer_mem_calloc(struct aws_allocator *allocator, size_t num, size_t size) {
void *mem = s_ring_buffer_mem_acquire(allocator, num * size);
if (!mem) {
return NULL;
}
memset(mem, 0, num * size);
return mem;
}

static void *s_ring_buffer_mem_realloc(struct aws_allocator *allocator, void *ptr, size_t old_size, size_t new_size) {
(void)allocator;
(void)ptr;
(void)old_size;
(void)new_size;
AWS_FATAL_ASSERT(false && "ring_buffer_allocator does not support realloc, as it breaks allocation ordering");
ColdenCullen marked this conversation as resolved.
Show resolved Hide resolved
return NULL;
}

int aws_ring_buffer_allocator_init(struct aws_allocator *allocator, struct aws_ring_buffer *ring_buffer) {
if (allocator == NULL || ring_buffer == NULL) {
return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
}

allocator->impl = ring_buffer;
allocator->mem_acquire = s_ring_buffer_mem_acquire;
allocator->mem_release = s_ring_buffer_mem_release;
allocator->mem_calloc = s_ring_buffer_mem_calloc;
justinboswell marked this conversation as resolved.
Show resolved Hide resolved
allocator->mem_realloc = s_ring_buffer_mem_realloc;
return AWS_OP_SUCCESS;
}

void aws_ring_buffer_allocator_clean_up(struct aws_allocator *allocator) {
AWS_ZERO_STRUCT(*allocator);
}
1 change: 1 addition & 0 deletions tests/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -335,6 +335,7 @@ add_test_case(ring_buffer_acquire_up_to_test)
add_test_case(ring_buffer_acquire_tail_always_chases_head_test)
add_test_case(ring_buffer_acquire_multi_threaded_test)
add_test_case(ring_buffer_acquire_up_to_multi_threaded_test)
add_test_case(ring_buffer_allocator_test)

add_test_case(test_logging_filter_at_AWS_LL_NONE_s_logf_all_levels)
add_test_case(test_logging_filter_at_AWS_LL_FATAL_s_logf_all_levels)
Expand Down
37 changes: 37 additions & 0 deletions tests/ring_buffer_test.c
Original file line number Diff line number Diff line change
Expand Up @@ -398,3 +398,40 @@ static int s_test_acquire_up_to_multi_threaded(struct aws_allocator *allocator,
}

AWS_TEST_CASE(ring_buffer_acquire_up_to_multi_threaded_test, s_test_acquire_up_to_multi_threaded)

#define RING_BUFFER_ALLOCATOR_CAPACITY (16 * 1024)

static int s_test_ring_buffer_allocator(struct aws_allocator *allocator, void *ctx) {
(void)ctx;
struct aws_ring_buffer ring_buffer;
struct aws_allocator rb_allocator;
ASSERT_SUCCESS(aws_ring_buffer_init(&ring_buffer, allocator, RING_BUFFER_ALLOCATOR_CAPACITY));
ASSERT_SUCCESS(aws_ring_buffer_allocator_init(&rb_allocator, &ring_buffer));

for (int cycles = 0; cycles < 10; ++cycles) {
size_t total_size = 0;
const size_t chunk_size = (cycles + 1) * 16;
int chunk_count = 0;
AWS_VARIABLE_LENGTH_ARRAY(void *, chunks, RING_BUFFER_ALLOCATOR_CAPACITY / (16 + sizeof(size_t)));
while ((total_size + chunk_size) <= RING_BUFFER_ALLOCATOR_CAPACITY) {
void *chunk = aws_mem_calloc(&rb_allocator, 1, chunk_size);
if (chunk == NULL) {
ASSERT_TRUE(chunk_count > 0);
break;
}
chunks[chunk_count++] = chunk;
total_size += chunk_size;
}

for (int idx = 0; idx < chunk_count; ++idx) {
aws_mem_release(&rb_allocator, chunks[idx]);
}
}

aws_ring_buffer_allocator_clean_up(&rb_allocator);
aws_ring_buffer_clean_up(&ring_buffer);

return AWS_OP_SUCCESS;
}

AWS_TEST_CASE(ring_buffer_allocator_test, s_test_ring_buffer_allocator);