39 namespace Buffer_Namespace {
42 const size_t total_size,
44 const size_t min_slab_size,
45 const size_t max_slab_size,
46 const size_t default_slab_size,
47 const size_t page_size,
49 AbstractBufferMgr* parent_mgr)
75 auto allocated_slab =
false;
78 last_tier = allocator_type;
79 auto& [allocator, allocator_limit] =
allocators_.at(allocator_type);
81 if (allocator_limit >= allocator->bytesUsed() + slab_size) {
83 slabs_.back() =
reinterpret_cast<int8_t*
>(allocator->allocate(slab_size));
84 }
catch (std::bad_alloc&) {
91 allocated_slab =
true;
123 size_t allocator_num = 0;
124 std::stringstream ss;
125 ss <<
"TieredCpuBufferMgr:\n";
126 for (
auto& [allocator, allocator_limit] :
allocators_) {
127 ss <<
" allocator[" << allocator_num++ <<
"]\n limit = " << allocator_limit
128 <<
"\n used = " << allocator->bytesUsed() <<
"\n";
constexpr size_t kArenaBlockOverhead
std::vector< BufferList > slab_segments_
void freeAllMem() override
constexpr size_t numCpuTiers
std::map< int32_t, Arena * > slab_to_allocator_map_
std::vector< std::pair< std::unique_ptr< Arena >, size_t > > allocators_
std::string tier_to_string(CpuTier tier)
V & get_from_map(std::map< K, V, comp > &map, const K &key)
const size_t default_slab_size_
void addSlab(const size_t slab_size) override
void initializeMem() override
Arena * getAllocatorForSlab(int32_t slab_num) const
std::vector< int8_t * > slabs_
std::vector< size_t > CpuTierSizeVector
TieredCpuBufferMgr(const int device_id, const size_t total_size, CudaMgr_Namespace::CudaMgr *cuda_mgr, const size_t min_slab_size, const size_t max_slab_size, const size_t default_slab_size, const size_t page_size, const CpuTierSizeVector &cpu_tier_sizes, AbstractBufferMgr *parent_mgr=nullptr)