OmniSciDB  a5dc49c757
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
TieredCpuBufferMgr.cpp
Go to the documentation of this file.
1 /*
2  * Copyright 2022 HEAVY.AI, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
18 #include "CudaMgr/CudaMgr.h"
22 #include "Shared/misc.h"
23 
24 #include <iostream>
25 
26 namespace {
27 std::string tier_to_string(CpuTier tier) {
28  switch (tier) {
29  case DRAM:
30  return "DRAM";
31  case PMEM:
32  return "PMEM";
33  default:
34  return "<UNKNOWN>";
35  }
36 }
37 } // namespace
38 
39 namespace Buffer_Namespace {
40 
42  const size_t total_size,
44  const size_t min_slab_size,
45  const size_t max_slab_size,
46  const size_t default_slab_size,
47  const size_t page_size,
48  const CpuTierSizeVector& cpu_tier_sizes,
49  AbstractBufferMgr* parent_mgr)
50  : CpuBufferMgr(device_id,
51  total_size,
52  cuda_mgr,
53  min_slab_size,
54  max_slab_size,
55  default_slab_size,
56  page_size,
57  parent_mgr) {
58  CHECK(cpu_tier_sizes.size() == numCpuTiers);
59  allocators_.emplace_back(
60  std::make_unique<DramArena>(default_slab_size_ + kArenaBlockOverhead),
61  cpu_tier_sizes[CpuTier::DRAM]);
62  allocators_.emplace_back(
63  std::make_unique<PMemArena>(default_slab_size_ + kArenaBlockOverhead),
64  cpu_tier_sizes[CpuTier::PMEM]);
65 }
66 
69 }
70 
71 void TieredCpuBufferMgr::addSlab(const size_t slab_size) {
72  CHECK(!allocators_.empty());
73  CHECK(allocators_.begin()->first.get() != nullptr);
74  slabs_.resize(slabs_.size() + 1);
75  auto allocated_slab = false;
76  CpuTier last_tier;
77  for (auto allocator_type : {CpuTier::DRAM, CpuTier::PMEM}) {
78  last_tier = allocator_type;
79  auto& [allocator, allocator_limit] = allocators_.at(allocator_type);
80  // If there is no space in the current allocator then move to the next one.
81  if (allocator_limit >= allocator->bytesUsed() + slab_size) {
82  try {
83  slabs_.back() = reinterpret_cast<int8_t*>(allocator->allocate(slab_size));
84  } catch (std::bad_alloc&) {
85  // If anything goes wrong with an allocation, then throw an exception rather than
86  // go to the next allocator.
87  slabs_.resize(slabs_.size() - 1);
88  throw FailedToCreateSlab(slab_size);
89  }
90  slab_to_allocator_map_[slabs_.size() - 1] = allocator.get();
91  allocated_slab = true;
92  break;
93  }
94  }
95  if (allocated_slab) {
96  // We allocated a new slab, so add segments for it.
97  slab_segments_.resize(slab_segments_.size() + 1);
98  slab_segments_[slab_segments_.size() - 1].push_back(
99  BufferSeg(0, slab_size / page_size_));
100  LOG(INFO) << "Allocated slab using " << tier_to_string(last_tier) << ".";
101  } else {
102  // None of the allocators allocated a slab, so revert to original size and throw.
103  slabs_.resize(slabs_.size() - 1);
104  throw FailedToCreateSlab(slab_size);
105  }
106 }
107 
109  CHECK(!allocators_.empty());
110  CHECK(allocators_.begin()->first.get() != nullptr);
111  initializeMem();
112 }
113 
115  allocators_[CpuTier::DRAM].first =
116  std::make_unique<DramArena>(default_slab_size_ + kArenaBlockOverhead);
117  allocators_[CpuTier::PMEM].first =
118  std::make_unique<PMemArena>(default_slab_size_ + kArenaBlockOverhead);
119  slab_to_allocator_map_.clear();
120 }
121 
122 std::string TieredCpuBufferMgr::dump() const {
123  size_t allocator_num = 0;
124  std::stringstream ss;
125  ss << "TieredCpuBufferMgr:\n";
126  for (auto& [allocator, allocator_limit] : allocators_) {
127  ss << " allocator[" << allocator_num++ << "]\n limit = " << allocator_limit
128  << "\n used = " << allocator->bytesUsed() << "\n";
129  }
130  return ss.str();
131 }
132 
133 } // namespace Buffer_Namespace
constexpr size_t kArenaBlockOverhead
#define LOG(tag)
Definition: Logger.h:285
std::vector< BufferList > slab_segments_
Definition: BufferMgr.h:184
constexpr size_t numCpuTiers
std::map< int32_t, Arena * > slab_to_allocator_map_
std::vector< std::pair< std::unique_ptr< Arena >, size_t > > allocators_
V & get_from_map(std::map< K, V, comp > &map, const K &key)
Definition: misc.h:62
const size_t default_slab_size_
Definition: BufferMgr.h:179
void addSlab(const size_t slab_size) override
#define CHECK(condition)
Definition: Logger.h:291
Arena * getAllocatorForSlab(int32_t slab_num) const
std::vector< int8_t * > slabs_
Definition: BufferMgr.h:182
std::vector< size_t > CpuTierSizeVector
TieredCpuBufferMgr(const int device_id, const size_t total_size, CudaMgr_Namespace::CudaMgr *cuda_mgr, const size_t min_slab_size, const size_t max_slab_size, const size_t default_slab_size, const size_t page_size, const CpuTierSizeVector &cpu_tier_sizes, AbstractBufferMgr *parent_mgr=nullptr)