Skip to content

Commit

Permalink
Optimize BitMapObjectPool::ForEachActiveObject for code size (project…
Browse files Browse the repository at this point in the history
  • Loading branch information
kghost authored and Nikita committed Sep 23, 2021
1 parent 16beb1e commit ce7381e
Show file tree
Hide file tree
Showing 2 changed files with 52 additions and 27 deletions.
28 changes: 28 additions & 0 deletions src/lib/support/Pool.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -73,4 +73,32 @@ void StaticAllocatorBitmap::Deallocate(void * element)
mAllocated--;
}

size_t StaticAllocatorBitmap::IndexOf(void * element)
{
std::ptrdiff_t diff = static_cast<uint8_t *>(element) - static_cast<uint8_t *>(mElements);
assert(diff >= 0);
assert(static_cast<size_t>(diff) % mElementSize == 0);
auto index = static_cast<size_t>(diff) / mElementSize;
assert(index < Capacity());
return index;
}

bool StaticAllocatorBitmap::ForEachActiveObjectInner(void * context, Lambda lambda)
{
for (size_t word = 0; word * kBitChunkSize < Capacity(); ++word)
{
auto & usage = mUsage[word];
auto value = usage.load(std::memory_order_relaxed);
for (size_t offset = 0; offset < kBitChunkSize && offset + word * kBitChunkSize < Capacity(); ++offset)
{
if ((value & (kBit1 << offset)) != 0)
{
if (!lambda(context, At(word * kBitChunkSize + offset)))
return false;
}
}
}
return true;
}

} // namespace chip
51 changes: 24 additions & 27 deletions src/lib/support/Pool.h
Original file line number Diff line number Diff line change
Expand Up @@ -65,15 +65,10 @@ class StaticAllocatorBitmap : public StaticAllocatorBase

protected:
void * At(size_t index) { return static_cast<uint8_t *>(mElements) + mElementSize * index; }
size_t IndexOf(void * element)
{
std::ptrdiff_t diff = static_cast<uint8_t *>(element) - static_cast<uint8_t *>(mElements);
assert(diff >= 0);
assert(static_cast<size_t>(diff) % mElementSize == 0);
auto index = static_cast<size_t>(diff) / mElementSize;
assert(index < Capacity());
return index;
}
size_t IndexOf(void * element);

using Lambda = bool (*)(void *, void *);
bool ForEachActiveObjectInner(void * context, Lambda lambda);

private:
void * mElements;
Expand Down Expand Up @@ -119,33 +114,35 @@ class BitMapObjectPool : public StaticAllocatorBitmap
* @brief
* Run a functor for each active object in the pool
*
* @param f The functor of type `bool (*)(T*)`, return false to break the iteration
* @return bool Returns false if broke during iteration
* @param function The functor of type `bool (*)(T*)`, return false to break the iteration
* @return bool Returns false if broke during iteration
*
* caution
* this function is not thread-safe, make sure all usage of the
* pool is protected by a lock, or else avoid using this function
*/
template <typename F>
bool ForEachActiveObject(F f)
template <typename Function>
bool ForEachActiveObject(Function && function)
{
for (size_t word = 0; word * kBitChunkSize < Capacity(); ++word)
{
auto & usage = mUsage[word];
auto value = usage.load(std::memory_order_relaxed);
for (size_t offset = 0; offset < kBitChunkSize && offset + word * kBitChunkSize < Capacity(); ++offset)
{
if ((value & (kBit1 << offset)) != 0)
{
if (!f(static_cast<T *>(At(word * kBitChunkSize + offset))))
return false;
}
}
}
return true;
LambdaProxy<Function> proxy(std::forward<Function>(function));
return ForEachActiveObjectInner(&proxy, &LambdaProxy<Function>::Call);
}

private:
template <typename Function>
class LambdaProxy
{
public:
LambdaProxy(Function && function) : mFunction(std::move(function)) {}
static bool Call(void * context, void * target)
{
return static_cast<LambdaProxy *>(context)->mFunction(static_cast<T *>(target));
}

private:
Function mFunction;
};

std::atomic<tBitChunkType> mUsage[(N + kBitChunkSize - 1) / kBitChunkSize];
alignas(alignof(T)) uint8_t mMemory[N * sizeof(T)];
};
Expand Down

0 comments on commit ce7381e

Please sign in to comment.