From d3d845a7596ea41c5119640a0f61a864ee8243f0 Mon Sep 17 00:00:00 2001 From: Michael Graeb Date: Thu, 12 Oct 2023 18:57:53 +0000 Subject: [PATCH] no one knows what "special files" means including two-days-ago me --- include/aws/common/byte_buf.h | 4 ++-- source/file.c | 15 +++++++-------- tests/file_test.c | 18 ++++++++++-------- 3 files changed, 19 insertions(+), 18 deletions(-) diff --git a/include/aws/common/byte_buf.h b/include/aws/common/byte_buf.h index dc41fc1c4..6fc5c3ff9 100644 --- a/include/aws/common/byte_buf.h +++ b/include/aws/common/byte_buf.h @@ -137,11 +137,11 @@ int aws_byte_buf_init_from_file(struct aws_byte_buf *out_buf, struct aws_allocat /** * Same as aws_byte_buf_init_from_file(), but for reading "special files" like /proc/cpuinfo. - * This files don't accurately report their size, so size_hint is used as initial buffer size, + * These files don't accurately report their size, so size_hint is used as initial buffer size, * and the buffer grows until the while file is read. */ AWS_COMMON_API -int aws_byte_buf_init_from_special_file( +int aws_byte_buf_init_from_file_with_size_hint( struct aws_byte_buf *out_buf, struct aws_allocator *alloc, const char *filename, diff --git a/source/file.c b/source/file.c index 35b90fceb..f17d9d07a 100644 --- a/source/file.c +++ b/source/file.c @@ -11,16 +11,15 @@ #include -/* For "special files", there's no point querying file size before reading. +/* For "special files", the OS often lies about size. * For example, on Amazon Linux 2: * /proc/cpuinfo: size is 0, but contents are several KB of data. * /sys/devices/virtual/dmi/id/product_name: size is 4096, but contents are "c5.2xlarge" * - * Therefore, let users pass a hint for the buffer's initial size, - * and grow the buffer as necessary as we read until EOF. + * Therefore, we may need to grow the buffer as we read until EOF. * This is the min/max step size for growth. */ -#define MIN_BUFFER_GROWTH_READING_SPECIAL_FILES 32 -#define MAX_BUFFER_GROWTH_READING_SPECIAL_FILES 4096 +#define MIN_BUFFER_GROWTH_READING_FILES 32 +#define MAX_BUFFER_GROWTH_READING_FILES 4096 FILE *aws_fopen(const char *file_path, const char *mode) { if (!file_path || strlen(file_path) == 0) { @@ -90,8 +89,8 @@ static int s_byte_buf_init_from_file( /* Expand buffer if necessary (at a reasonable rate) */ if (out_buf->len == out_buf->capacity) { size_t additional_capacity = out_buf->capacity; - additional_capacity = aws_max_size(MIN_BUFFER_GROWTH_READING_SPECIAL_FILES, additional_capacity); - additional_capacity = aws_min_size(MAX_BUFFER_GROWTH_READING_SPECIAL_FILES, additional_capacity); + additional_capacity = aws_max_size(MIN_BUFFER_GROWTH_READING_FILES, additional_capacity); + additional_capacity = aws_min_size(MAX_BUFFER_GROWTH_READING_FILES, additional_capacity); if (aws_byte_buf_reserve_relative(out_buf, additional_capacity)) { AWS_LOGF_ERROR(AWS_LS_COMMON_IO, "static: Failed to grow buffer for file:'%s'", filename); goto error; @@ -146,7 +145,7 @@ int aws_byte_buf_init_from_file(struct aws_byte_buf *out_buf, struct aws_allocat return s_byte_buf_init_from_file(out_buf, alloc, filename, true /*use_file_size_as_hint*/, 0 /*size_hint*/); } -int aws_byte_buf_init_from_special_file( +int aws_byte_buf_init_from_file_with_size_hint( struct aws_byte_buf *out_buf, struct aws_allocator *alloc, const char *filename, diff --git a/tests/file_test.c b/tests/file_test.c index 55e7b476a..6a3f4fe8b 100644 --- a/tests/file_test.c +++ b/tests/file_test.c @@ -462,29 +462,29 @@ static int s_create_file_then_read_it(struct aws_allocator *allocator, struct aw ASSERT_SUCCESS(s_check_byte_buf_from_file(&buf, contents)); aws_byte_buf_clean_up(&buf); - /* now we check aws_byte_buf_init_from_special_file() with different size_hints */ + /* now check aws_byte_buf_init_from_file_with_size_hint() ... */ /* size_hint more then big enough */ size_t size_hint = contents.len * 2; - ASSERT_SUCCESS(aws_byte_buf_init_from_special_file(&buf, allocator, filename, size_hint)); + ASSERT_SUCCESS(aws_byte_buf_init_from_file_with_size_hint(&buf, allocator, filename, size_hint)); ASSERT_SUCCESS(s_check_byte_buf_from_file(&buf, contents)); aws_byte_buf_clean_up(&buf); /* size_hint not big enough for null-terminator */ size_hint = contents.len; - ASSERT_SUCCESS(aws_byte_buf_init_from_special_file(&buf, allocator, filename, size_hint)); + ASSERT_SUCCESS(aws_byte_buf_init_from_file_with_size_hint(&buf, allocator, filename, size_hint)); ASSERT_SUCCESS(s_check_byte_buf_from_file(&buf, contents)); aws_byte_buf_clean_up(&buf); /* size_hint 0 */ size_hint = 0; - ASSERT_SUCCESS(aws_byte_buf_init_from_special_file(&buf, allocator, filename, size_hint)); + ASSERT_SUCCESS(aws_byte_buf_init_from_file_with_size_hint(&buf, allocator, filename, size_hint)); ASSERT_SUCCESS(s_check_byte_buf_from_file(&buf, contents)); aws_byte_buf_clean_up(&buf); /* size_hint 1 */ size_hint = 1; - ASSERT_SUCCESS(aws_byte_buf_init_from_special_file(&buf, allocator, filename, size_hint)); + ASSERT_SUCCESS(aws_byte_buf_init_from_file_with_size_hint(&buf, allocator, filename, size_hint)); ASSERT_SUCCESS(s_check_byte_buf_from_file(&buf, contents)); aws_byte_buf_clean_up(&buf); @@ -502,11 +502,13 @@ static int s_read_special_file(struct aws_allocator *allocator, const char *file } struct aws_byte_buf buf; - ASSERT_SUCCESS(aws_byte_buf_init_from_special_file(&buf, allocator, filename, 128)); + ASSERT_SUCCESS(aws_byte_buf_init_from_file(&buf, allocator, filename)); ASSERT_TRUE(buf.capacity > buf.len, "Buffer should end with null-terminator"); ASSERT_UINT_EQUALS(0, buf.buffer[buf.len], "Buffer should end with null-terminator"); - if (strcmp("/dev/null", filename) != 0) { + if (strcmp("/dev/null", filename) == 0) { + ASSERT_UINT_EQUALS(0, buf.len, "expected /dev/null to be empty"); + } else { ASSERT_TRUE(buf.len > 0, "expected special file to have data"); } @@ -530,7 +532,7 @@ static int s_test_byte_buf_init_from_file(struct aws_allocator *allocator, void ASSERT_SUCCESS(s_create_file_then_read_it(allocator, aws_byte_cursor_from_buf(&big_rando))); aws_byte_buf_clean_up(&big_rando); - /* test aws_byte_buf_init_from_special_file() on actual "special files" (if they exist) */ + /* test some "special files" (if they exist) */ ASSERT_SUCCESS(s_read_special_file(allocator, "/proc/cpuinfo")); ASSERT_SUCCESS(s_read_special_file(allocator, "/proc/net/tcp")); ASSERT_SUCCESS(s_read_special_file(allocator, "/sys/devices/virtual/dmi/id/sys_vendor"));