From 9416dca8bf62ae84e45d4371fba711bb421d1fc8 Mon Sep 17 00:00:00 2001 From: Protobuf Team Bot Date: Tue, 24 Jan 2023 18:51:56 -0800 Subject: [PATCH] Automated rollback of commit 6d09e2c80a6481ddebd5265a83b01a62b240a667. PiperOrigin-RevId: 504432163 --- src/google/protobuf/arena.cc | 70 +++++++++++------------ src/google/protobuf/port.h | 14 ----- src/google/protobuf/repeated_field.h | 10 +--- src/google/protobuf/repeated_ptr_field.cc | 4 +- src/google/protobuf/serial_arena.h | 9 ++- src/google/protobuf/thread_safe_arena.h | 2 +- 6 files changed, 45 insertions(+), 64 deletions(-) diff --git a/src/google/protobuf/arena.cc b/src/google/protobuf/arena.cc index 7f57ea86899e..25ddcc3f1a32 100644 --- a/src/google/protobuf/arena.cc +++ b/src/google/protobuf/arena.cc @@ -81,8 +81,8 @@ ArenaBlock* SentryArenaBlock() { } // namespace -static SizedPtr AllocateMemory(const AllocationPolicy* policy_ptr, - size_t last_size, size_t min_bytes) { +static SerialArena::Memory AllocateMemory(const AllocationPolicy* policy_ptr, + size_t last_size, size_t min_bytes) { AllocationPolicy policy; // default policy if (policy_ptr) policy = *policy_ptr; size_t size; @@ -98,10 +98,13 @@ static SizedPtr AllocateMemory(const AllocationPolicy* policy_ptr, SerialArena::kBlockHeaderSize); size = std::max(size, SerialArena::kBlockHeaderSize + min_bytes); + void* mem; if (policy.block_alloc == nullptr) { - return AllocateAtLeast(size); + mem = ::operator new(size); + } else { + mem = policy.block_alloc(size); } - return {policy.block_alloc(size), size}; + return {mem, size}; } class GetDeallocator { @@ -110,18 +113,18 @@ class GetDeallocator { : dealloc_(policy ? policy->block_dealloc : nullptr), space_allocated_(space_allocated) {} - void operator()(SizedPtr mem) const { + void operator()(SerialArena::Memory mem) const { #ifdef ADDRESS_SANITIZER // This memory was provided by the underlying allocator as unpoisoned, // so return it in an unpoisoned state. - ASAN_UNPOISON_MEMORY_REGION(mem.p, mem.n); + ASAN_UNPOISON_MEMORY_REGION(mem.ptr, mem.size); #endif // ADDRESS_SANITIZER if (dealloc_) { - dealloc_(mem.p, mem.n); + dealloc_(mem.ptr, mem.size); } else { - internal::SizedDelete(mem.p, mem.n); + internal::SizedDelete(mem.ptr, mem.size); } - *space_allocated_ += mem.n; + *space_allocated_ += mem.size; } private: @@ -165,19 +168,20 @@ void SerialArena::Init(ArenaBlock* b, size_t offset) { cached_blocks_ = nullptr; } -SerialArena* SerialArena::New(SizedPtr mem, ThreadSafeArena& parent) { - GOOGLE_ABSL_DCHECK_LE(kBlockHeaderSize + ThreadSafeArena::kSerialArenaSize, mem.n); +SerialArena* SerialArena::New(Memory mem, ThreadSafeArena& parent) { + GOOGLE_ABSL_DCHECK_LE(kBlockHeaderSize + ThreadSafeArena::kSerialArenaSize, + mem.size); ThreadSafeArenaStats::RecordAllocateStats(parent.arena_stats_.MutableStats(), - /*used=*/0, /*allocated=*/mem.n, + /*used=*/0, /*allocated=*/mem.size, /*wasted=*/0); - auto b = new (mem.p) ArenaBlock{nullptr, mem.n}; + auto b = new (mem.ptr) ArenaBlock{nullptr, mem.size}; return new (b->Pointer(kBlockHeaderSize)) SerialArena(b, parent); } template -SizedPtr SerialArena::Free(Deallocator deallocator) { +SerialArena::Memory SerialArena::Free(Deallocator deallocator) { ArenaBlock* b = head(); - SizedPtr mem = {b, b->size}; + Memory mem = {b, b->size}; while (b->next) { b = b->next; // We must first advance before deleting this block deallocator(mem); @@ -232,12 +236,12 @@ void SerialArena::AllocateNewBlock(size_t n) { // exclusive access to a cacheline. Hence we write it in terms of a // regular add. space_allocated_.store( - space_allocated_.load(std::memory_order_relaxed) + mem.n, + space_allocated_.load(std::memory_order_relaxed) + mem.size, std::memory_order_relaxed); ThreadSafeArenaStats::RecordAllocateStats(parent_.arena_stats_.MutableStats(), /*used=*/used, - /*allocated=*/mem.n, wasted); - auto* new_head = new (mem.p) ArenaBlock{old_head, mem.n}; + /*allocated=*/mem.size, wasted); + auto* new_head = new (mem.ptr) ArenaBlock{old_head, mem.size}; set_ptr(new_head->Pointer(kBlockHeaderSize)); limit_ = new_head->Limit(); // Previous writes must take effect before writing new head. @@ -486,7 +490,7 @@ ArenaBlock* ThreadSafeArena::FirstBlock(void* buf, size_t size, GOOGLE_ABSL_DCHECK_EQ(reinterpret_cast(buf) & 7, 0u); - SizedPtr mem; + SerialArena::Memory mem; if (buf == nullptr || size < kBlockHeaderSize + kAllocPolicySize) { mem = AllocateMemory(&policy, 0, kAllocPolicySize); } else { @@ -495,7 +499,7 @@ ArenaBlock* ThreadSafeArena::FirstBlock(void* buf, size_t size, alloc_policy_.set_is_user_owned_initial_block(true); } - return new (mem.p) ArenaBlock{nullptr, mem.n}; + return new (mem.ptr) ArenaBlock{nullptr, mem.size}; } void ThreadSafeArena::InitializeWithPolicy(const AllocationPolicy& policy) { @@ -562,14 +566,10 @@ ThreadSafeArena::SerialArenaChunk* ThreadSafeArena::NewSerialArenaChunk( static_cast(next_bytes - kHeaderSize) / kEntrySize; // Growth based on bytes needs to be adjusted by AllocSize. next_bytes = SerialArenaChunk::AllocSize(next_capacity); + void* mem; + mem = ::operator new(next_bytes); - // If we allocate bigger memory than requested, we should expand - // size to use that extra space, and add extra entries permitted - // by the extra space. - SizedPtr mem = AllocateAtLeast(next_bytes); - next_capacity = static_cast(mem.n - kHeaderSize) / kEntrySize; - GOOGLE_ABSL_DCHECK_LE(SerialArenaChunk::AllocSize(next_capacity), mem.n); - return new (mem.p) SerialArenaChunk{next_capacity, id, serial}; + return new (mem) SerialArenaChunk{next_capacity, id, serial}; } // Tries to reserve an entry by atomic fetch_add. If the head chunk is already @@ -627,15 +627,15 @@ ThreadSafeArena::~ThreadSafeArena() { if (alloc_policy_.is_user_owned_initial_block()) { #ifdef ADDRESS_SANITIZER // Unpoison the initial block, now that it's going back to the user. - ASAN_UNPOISON_MEMORY_REGION(mem.p, mem.n); + ASAN_UNPOISON_MEMORY_REGION(mem.ptr, mem.size); #endif // ADDRESS_SANITIZER - space_allocated += mem.n; - } else if (mem.n > 0) { + space_allocated += mem.size; + } else if (mem.size > 0) { GetDeallocator(alloc_policy_.get(), &space_allocated)(mem); } } -SizedPtr ThreadSafeArena::Free(size_t* space_allocated) { +SerialArena::Memory ThreadSafeArena::Free(size_t* space_allocated) { auto deallocator = GetDeallocator(alloc_policy_.get(), space_allocated); WalkSerialArenaChunk([deallocator](SerialArenaChunk* chunk) { @@ -647,8 +647,8 @@ SizedPtr ThreadSafeArena::Free(size_t* space_allocated) { SerialArena* serial = it->load(std::memory_order_relaxed); GOOGLE_ABSL_DCHECK_NE(serial, nullptr); // Always frees the first block of "serial" as it cannot be user-provided. - SizedPtr mem = serial->Free(deallocator); - GOOGLE_ABSL_DCHECK_NE(mem.p, nullptr); + SerialArena::Memory mem = serial->Free(deallocator); + GOOGLE_ABSL_DCHECK_NE(mem.ptr, nullptr); deallocator(mem); } @@ -670,7 +670,7 @@ uint64_t ThreadSafeArena::Reset() { // allocated, always reuse the first block for the first arena. size_t space_allocated = 0; auto mem = Free(&space_allocated); - space_allocated += mem.n; + space_allocated += mem.size; // Reset the first arena with the first block. This avoids redundant // free / allocation and re-allocating for AllocationPolicy. Adjust offset if @@ -680,7 +680,7 @@ uint64_t ThreadSafeArena::Reset() { size_t offset = alloc_policy_.get() == nullptr ? kBlockHeaderSize : kBlockHeaderSize + kAllocPolicySize; - first_arena_.Init(new (mem.p) ArenaBlock{nullptr, mem.n}, offset); + first_arena_.Init(new (mem.ptr) ArenaBlock{nullptr, mem.size}, offset); } else { first_arena_.Init(SentryArenaBlock(), 0); } diff --git a/src/google/protobuf/port.h b/src/google/protobuf/port.h index 7c8ce83da6c1..2fbca5dc6852 100644 --- a/src/google/protobuf/port.h +++ b/src/google/protobuf/port.h @@ -57,20 +57,6 @@ class MessageLite; namespace internal { -// See comments on `AllocateAtLeast` for information on size returning new. -struct SizedPtr { - void* p; - size_t n; -}; - -// Allocates at least `size` bytes. This function follows the c++ language -// proposal from D0901R10 (http://wg21.link/D0901R10) and will be implemented -// in terms of the new operator new semantics when available. The allocated -// memory should be released by a call to `SizedDelete` or `::operator delete`. -inline SizedPtr AllocateAtLeast(size_t size) { - return {::operator new(size), size}; -} - inline void SizedDelete(void* p, size_t size) { #if defined(__cpp_sized_deallocation) ::operator delete(p, size); diff --git a/src/google/protobuf/repeated_field.h b/src/google/protobuf/repeated_field.h index 5fc9e77848ef..e90ba4f623e2 100644 --- a/src/google/protobuf/repeated_field.h +++ b/src/google/protobuf/repeated_field.h @@ -957,15 +957,7 @@ PROTOBUF_NOINLINE void RepeatedField::GrowNoAnnotate(int current_size, size_t bytes = kRepHeaderSize + sizeof(Element) * static_cast(new_size); if (arena == nullptr) { - GOOGLE_ABSL_DCHECK_LE((bytes - kRepHeaderSize) / sizeof(Element), - static_cast(std::numeric_limits::max())) - << "Requested size is too large to fit element count into int."; - internal::SizedPtr res = internal::AllocateAtLeast(bytes); - size_t num_available = - std::min((res.n - kRepHeaderSize) / sizeof(Element), - static_cast(std::numeric_limits::max())); - new_size = static_cast(num_available); - new_rep = static_cast(res.p); + new_rep = static_cast(::operator new(bytes)); } else { new_rep = reinterpret_cast(Arena::CreateArray(arena, bytes)); } diff --git a/src/google/protobuf/repeated_ptr_field.cc b/src/google/protobuf/repeated_ptr_field.cc index b1476e08d8ec..025b0875773b 100644 --- a/src/google/protobuf/repeated_ptr_field.cc +++ b/src/google/protobuf/repeated_ptr_field.cc @@ -65,9 +65,7 @@ void** RepeatedPtrFieldBase::InternalExtend(int extend_amount) { << "Requested size is too large to fit into size_t."; size_t bytes = kRepHeaderSize + sizeof(old_rep->elements[0]) * new_size; if (arena == nullptr) { - internal::SizedPtr res = internal::AllocateAtLeast(bytes); - new_size = (res.n - kRepHeaderSize) / sizeof(old_rep->elements[0]); - rep_ = reinterpret_cast(res.p); + rep_ = reinterpret_cast(::operator new(bytes)); } else { rep_ = reinterpret_cast(Arena::CreateArray(arena, bytes)); } diff --git a/src/google/protobuf/serial_arena.h b/src/google/protobuf/serial_arena.h index 6c0908d98e1a..19d0e70dd160 100644 --- a/src/google/protobuf/serial_arena.h +++ b/src/google/protobuf/serial_arena.h @@ -105,6 +105,11 @@ struct FirstSerialArena { // used. class PROTOBUF_EXPORT SerialArena { public: + struct Memory { + void* ptr; + size_t size; + }; + void CleanupList(); uint64_t SpaceAllocated() const { return space_allocated_.load(std::memory_order_relaxed); @@ -310,10 +315,10 @@ class PROTOBUF_EXPORT SerialArena { // future allocations. // The `parent` arena must outlive the serial arena, which is guaranteed // because the parent manages the lifetime of the serial arenas. - static SerialArena* New(SizedPtr mem, ThreadSafeArena& parent); + static SerialArena* New(SerialArena::Memory mem, ThreadSafeArena& parent); // Free SerialArena returning the memory passed in to New template - SizedPtr Free(Deallocator deallocator); + Memory Free(Deallocator deallocator); // Members are declared here to track sizeof(SerialArena) and hotness // centrally. They are (roughly) laid out in descending order of hotness. diff --git a/src/google/protobuf/thread_safe_arena.h b/src/google/protobuf/thread_safe_arena.h index 1da9d091e693..b30d85aa2f71 100644 --- a/src/google/protobuf/thread_safe_arena.h +++ b/src/google/protobuf/thread_safe_arena.h @@ -222,7 +222,7 @@ class PROTOBUF_EXPORT ThreadSafeArena { // Releases all memory except the first block which it returns. The first // block might be owned by the user and thus need some extra checks before // deleting. - SizedPtr Free(size_t* space_allocated); + SerialArena::Memory Free(size_t* space_allocated); #ifdef _MSC_VER #pragma warning(disable : 4324)