Skip to content

Commit

Permalink
Automated rollback of commit 6d09e2c.
Browse files Browse the repository at this point in the history
PiperOrigin-RevId: 504432163
  • Loading branch information
protobuf-github-bot authored and copybara-github committed Jan 25, 2023
1 parent 5bbc6fc commit 9416dca
Show file tree
Hide file tree
Showing 6 changed files with 45 additions and 64 deletions.
70 changes: 35 additions & 35 deletions src/google/protobuf/arena.cc
Original file line number Diff line number Diff line change
Expand Up @@ -81,8 +81,8 @@ ArenaBlock* SentryArenaBlock() {

} // namespace

static SizedPtr AllocateMemory(const AllocationPolicy* policy_ptr,
size_t last_size, size_t min_bytes) {
static SerialArena::Memory AllocateMemory(const AllocationPolicy* policy_ptr,
size_t last_size, size_t min_bytes) {
AllocationPolicy policy; // default policy
if (policy_ptr) policy = *policy_ptr;
size_t size;
Expand All @@ -98,10 +98,13 @@ static SizedPtr AllocateMemory(const AllocationPolicy* policy_ptr,
SerialArena::kBlockHeaderSize);
size = std::max(size, SerialArena::kBlockHeaderSize + min_bytes);

void* mem;
if (policy.block_alloc == nullptr) {
return AllocateAtLeast(size);
mem = ::operator new(size);
} else {
mem = policy.block_alloc(size);
}
return {policy.block_alloc(size), size};
return {mem, size};
}

class GetDeallocator {
Expand All @@ -110,18 +113,18 @@ class GetDeallocator {
: dealloc_(policy ? policy->block_dealloc : nullptr),
space_allocated_(space_allocated) {}

void operator()(SizedPtr mem) const {
void operator()(SerialArena::Memory mem) const {
#ifdef ADDRESS_SANITIZER
// This memory was provided by the underlying allocator as unpoisoned,
// so return it in an unpoisoned state.
ASAN_UNPOISON_MEMORY_REGION(mem.p, mem.n);
ASAN_UNPOISON_MEMORY_REGION(mem.ptr, mem.size);
#endif // ADDRESS_SANITIZER
if (dealloc_) {
dealloc_(mem.p, mem.n);
dealloc_(mem.ptr, mem.size);
} else {
internal::SizedDelete(mem.p, mem.n);
internal::SizedDelete(mem.ptr, mem.size);
}
*space_allocated_ += mem.n;
*space_allocated_ += mem.size;
}

private:
Expand Down Expand Up @@ -165,19 +168,20 @@ void SerialArena::Init(ArenaBlock* b, size_t offset) {
cached_blocks_ = nullptr;
}

SerialArena* SerialArena::New(SizedPtr mem, ThreadSafeArena& parent) {
GOOGLE_ABSL_DCHECK_LE(kBlockHeaderSize + ThreadSafeArena::kSerialArenaSize, mem.n);
SerialArena* SerialArena::New(Memory mem, ThreadSafeArena& parent) {
GOOGLE_ABSL_DCHECK_LE(kBlockHeaderSize + ThreadSafeArena::kSerialArenaSize,
mem.size);
ThreadSafeArenaStats::RecordAllocateStats(parent.arena_stats_.MutableStats(),
/*used=*/0, /*allocated=*/mem.n,
/*used=*/0, /*allocated=*/mem.size,
/*wasted=*/0);
auto b = new (mem.p) ArenaBlock{nullptr, mem.n};
auto b = new (mem.ptr) ArenaBlock{nullptr, mem.size};
return new (b->Pointer(kBlockHeaderSize)) SerialArena(b, parent);
}

template <typename Deallocator>
SizedPtr SerialArena::Free(Deallocator deallocator) {
SerialArena::Memory SerialArena::Free(Deallocator deallocator) {
ArenaBlock* b = head();
SizedPtr mem = {b, b->size};
Memory mem = {b, b->size};
while (b->next) {
b = b->next; // We must first advance before deleting this block
deallocator(mem);
Expand Down Expand Up @@ -232,12 +236,12 @@ void SerialArena::AllocateNewBlock(size_t n) {
// exclusive access to a cacheline. Hence we write it in terms of a
// regular add.
space_allocated_.store(
space_allocated_.load(std::memory_order_relaxed) + mem.n,
space_allocated_.load(std::memory_order_relaxed) + mem.size,
std::memory_order_relaxed);
ThreadSafeArenaStats::RecordAllocateStats(parent_.arena_stats_.MutableStats(),
/*used=*/used,
/*allocated=*/mem.n, wasted);
auto* new_head = new (mem.p) ArenaBlock{old_head, mem.n};
/*allocated=*/mem.size, wasted);
auto* new_head = new (mem.ptr) ArenaBlock{old_head, mem.size};
set_ptr(new_head->Pointer(kBlockHeaderSize));
limit_ = new_head->Limit();
// Previous writes must take effect before writing new head.
Expand Down Expand Up @@ -486,7 +490,7 @@ ArenaBlock* ThreadSafeArena::FirstBlock(void* buf, size_t size,

GOOGLE_ABSL_DCHECK_EQ(reinterpret_cast<uintptr_t>(buf) & 7, 0u);

SizedPtr mem;
SerialArena::Memory mem;
if (buf == nullptr || size < kBlockHeaderSize + kAllocPolicySize) {
mem = AllocateMemory(&policy, 0, kAllocPolicySize);
} else {
Expand All @@ -495,7 +499,7 @@ ArenaBlock* ThreadSafeArena::FirstBlock(void* buf, size_t size,
alloc_policy_.set_is_user_owned_initial_block(true);
}

return new (mem.p) ArenaBlock{nullptr, mem.n};
return new (mem.ptr) ArenaBlock{nullptr, mem.size};
}

void ThreadSafeArena::InitializeWithPolicy(const AllocationPolicy& policy) {
Expand Down Expand Up @@ -562,14 +566,10 @@ ThreadSafeArena::SerialArenaChunk* ThreadSafeArena::NewSerialArenaChunk(
static_cast<uint32_t>(next_bytes - kHeaderSize) / kEntrySize;
// Growth based on bytes needs to be adjusted by AllocSize.
next_bytes = SerialArenaChunk::AllocSize(next_capacity);
void* mem;
mem = ::operator new(next_bytes);

// If we allocate bigger memory than requested, we should expand
// size to use that extra space, and add extra entries permitted
// by the extra space.
SizedPtr mem = AllocateAtLeast(next_bytes);
next_capacity = static_cast<uint32_t>(mem.n - kHeaderSize) / kEntrySize;
GOOGLE_ABSL_DCHECK_LE(SerialArenaChunk::AllocSize(next_capacity), mem.n);
return new (mem.p) SerialArenaChunk{next_capacity, id, serial};
return new (mem) SerialArenaChunk{next_capacity, id, serial};
}

// Tries to reserve an entry by atomic fetch_add. If the head chunk is already
Expand Down Expand Up @@ -627,15 +627,15 @@ ThreadSafeArena::~ThreadSafeArena() {
if (alloc_policy_.is_user_owned_initial_block()) {
#ifdef ADDRESS_SANITIZER
// Unpoison the initial block, now that it's going back to the user.
ASAN_UNPOISON_MEMORY_REGION(mem.p, mem.n);
ASAN_UNPOISON_MEMORY_REGION(mem.ptr, mem.size);
#endif // ADDRESS_SANITIZER
space_allocated += mem.n;
} else if (mem.n > 0) {
space_allocated += mem.size;
} else if (mem.size > 0) {
GetDeallocator(alloc_policy_.get(), &space_allocated)(mem);
}
}

SizedPtr ThreadSafeArena::Free(size_t* space_allocated) {
SerialArena::Memory ThreadSafeArena::Free(size_t* space_allocated) {
auto deallocator = GetDeallocator(alloc_policy_.get(), space_allocated);

WalkSerialArenaChunk([deallocator](SerialArenaChunk* chunk) {
Expand All @@ -647,8 +647,8 @@ SizedPtr ThreadSafeArena::Free(size_t* space_allocated) {
SerialArena* serial = it->load(std::memory_order_relaxed);
GOOGLE_ABSL_DCHECK_NE(serial, nullptr);
// Always frees the first block of "serial" as it cannot be user-provided.
SizedPtr mem = serial->Free(deallocator);
GOOGLE_ABSL_DCHECK_NE(mem.p, nullptr);
SerialArena::Memory mem = serial->Free(deallocator);
GOOGLE_ABSL_DCHECK_NE(mem.ptr, nullptr);
deallocator(mem);
}

Expand All @@ -670,7 +670,7 @@ uint64_t ThreadSafeArena::Reset() {
// allocated, always reuse the first block for the first arena.
size_t space_allocated = 0;
auto mem = Free(&space_allocated);
space_allocated += mem.n;
space_allocated += mem.size;

// Reset the first arena with the first block. This avoids redundant
// free / allocation and re-allocating for AllocationPolicy. Adjust offset if
Expand All @@ -680,7 +680,7 @@ uint64_t ThreadSafeArena::Reset() {
size_t offset = alloc_policy_.get() == nullptr
? kBlockHeaderSize
: kBlockHeaderSize + kAllocPolicySize;
first_arena_.Init(new (mem.p) ArenaBlock{nullptr, mem.n}, offset);
first_arena_.Init(new (mem.ptr) ArenaBlock{nullptr, mem.size}, offset);
} else {
first_arena_.Init(SentryArenaBlock(), 0);
}
Expand Down
14 changes: 0 additions & 14 deletions src/google/protobuf/port.h
Original file line number Diff line number Diff line change
Expand Up @@ -57,20 +57,6 @@ class MessageLite;
namespace internal {


// See comments on `AllocateAtLeast` for information on size returning new.
struct SizedPtr {
void* p;
size_t n;
};

// Allocates at least `size` bytes. This function follows the c++ language
// proposal from D0901R10 (http://wg21.link/D0901R10) and will be implemented
// in terms of the new operator new semantics when available. The allocated
// memory should be released by a call to `SizedDelete` or `::operator delete`.
inline SizedPtr AllocateAtLeast(size_t size) {
return {::operator new(size), size};
}

inline void SizedDelete(void* p, size_t size) {
#if defined(__cpp_sized_deallocation)
::operator delete(p, size);
Expand Down
10 changes: 1 addition & 9 deletions src/google/protobuf/repeated_field.h
Original file line number Diff line number Diff line change
Expand Up @@ -957,15 +957,7 @@ PROTOBUF_NOINLINE void RepeatedField<Element>::GrowNoAnnotate(int current_size,
size_t bytes =
kRepHeaderSize + sizeof(Element) * static_cast<size_t>(new_size);
if (arena == nullptr) {
GOOGLE_ABSL_DCHECK_LE((bytes - kRepHeaderSize) / sizeof(Element),
static_cast<size_t>(std::numeric_limits<int>::max()))
<< "Requested size is too large to fit element count into int.";
internal::SizedPtr res = internal::AllocateAtLeast(bytes);
size_t num_available =
std::min((res.n - kRepHeaderSize) / sizeof(Element),
static_cast<size_t>(std::numeric_limits<int>::max()));
new_size = static_cast<int>(num_available);
new_rep = static_cast<Rep*>(res.p);
new_rep = static_cast<Rep*>(::operator new(bytes));
} else {
new_rep = reinterpret_cast<Rep*>(Arena::CreateArray<char>(arena, bytes));
}
Expand Down
4 changes: 1 addition & 3 deletions src/google/protobuf/repeated_ptr_field.cc
Original file line number Diff line number Diff line change
Expand Up @@ -65,9 +65,7 @@ void** RepeatedPtrFieldBase::InternalExtend(int extend_amount) {
<< "Requested size is too large to fit into size_t.";
size_t bytes = kRepHeaderSize + sizeof(old_rep->elements[0]) * new_size;
if (arena == nullptr) {
internal::SizedPtr res = internal::AllocateAtLeast(bytes);
new_size = (res.n - kRepHeaderSize) / sizeof(old_rep->elements[0]);
rep_ = reinterpret_cast<Rep*>(res.p);
rep_ = reinterpret_cast<Rep*>(::operator new(bytes));
} else {
rep_ = reinterpret_cast<Rep*>(Arena::CreateArray<char>(arena, bytes));
}
Expand Down
9 changes: 7 additions & 2 deletions src/google/protobuf/serial_arena.h
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,11 @@ struct FirstSerialArena {
// used.
class PROTOBUF_EXPORT SerialArena {
public:
struct Memory {
void* ptr;
size_t size;
};

void CleanupList();
uint64_t SpaceAllocated() const {
return space_allocated_.load(std::memory_order_relaxed);
Expand Down Expand Up @@ -310,10 +315,10 @@ class PROTOBUF_EXPORT SerialArena {
// future allocations.
// The `parent` arena must outlive the serial arena, which is guaranteed
// because the parent manages the lifetime of the serial arenas.
static SerialArena* New(SizedPtr mem, ThreadSafeArena& parent);
static SerialArena* New(SerialArena::Memory mem, ThreadSafeArena& parent);
// Free SerialArena returning the memory passed in to New
template <typename Deallocator>
SizedPtr Free(Deallocator deallocator);
Memory Free(Deallocator deallocator);

// Members are declared here to track sizeof(SerialArena) and hotness
// centrally. They are (roughly) laid out in descending order of hotness.
Expand Down
2 changes: 1 addition & 1 deletion src/google/protobuf/thread_safe_arena.h
Original file line number Diff line number Diff line change
Expand Up @@ -222,7 +222,7 @@ class PROTOBUF_EXPORT ThreadSafeArena {
// Releases all memory except the first block which it returns. The first
// block might be owned by the user and thus need some extra checks before
// deleting.
SizedPtr Free(size_t* space_allocated);
SerialArena::Memory Free(size_t* space_allocated);

#ifdef _MSC_VER
#pragma warning(disable : 4324)
Expand Down

0 comments on commit 9416dca

Please sign in to comment.