OmniSciDB  a5dc49c757
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
GpuCudaBufferMgr.cpp
Go to the documentation of this file.
1 /*
2  * Copyright 2022 HEAVY.AI, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
18 
19 #include "CudaMgr/CudaMgr.h"
21 #include "Logger/Logger.h"
22 
23 namespace Buffer_Namespace {
24 
26  const size_t max_buffer_pool_size,
28  const size_t min_slab_size,
29  const size_t max_slab_size,
30  const size_t default_slab_size,
31  const size_t page_size,
32  AbstractBufferMgr* parent_mgr)
33  : BufferMgr(device_id,
34  max_buffer_pool_size,
35  min_slab_size,
36  max_slab_size,
37  default_slab_size,
38  page_size,
39  parent_mgr)
40  , cuda_mgr_(cuda_mgr) {}
41 
43  try {
45  freeAllMem();
46 #ifdef HAVE_CUDA
47  } catch (const CudaMgr_Namespace::CudaErrorException& e) {
48  if (e.getStatus() == CUDA_ERROR_DEINITIALIZED) {
49  // TODO(adb / asuhan): Verify cuModuleUnload removes the context
50  return;
51  }
52 #endif
53  } catch (const std::runtime_error& e) {
54  LOG(ERROR) << "CUDA Error: " << e.what();
55  }
56 }
57 
58 void GpuCudaBufferMgr::addSlab(const size_t slab_size) {
59  slabs_.resize(slabs_.size() + 1);
60  try {
61  slabs_.back() =
62  cuda_mgr_->allocateDeviceMem(slab_size, device_id_, /* is_slab */ true);
63  } catch (std::runtime_error& error) {
64  slabs_.resize(slabs_.size() - 1);
65  throw FailedToCreateSlab(slab_size);
66  }
67  slab_segments_.resize(slab_segments_.size() + 1);
68  slab_segments_[slab_segments_.size() - 1].push_back(
69  BufferSeg(0, slab_size / page_size_));
70 }
71 
73  for (auto buf_it = slabs_.begin(); buf_it != slabs_.end(); ++buf_it) {
74  cuda_mgr_->freeDeviceMem(*buf_it);
75  }
76 }
77 
78 void GpuCudaBufferMgr::allocateBuffer(BufferList::iterator seg_it,
79  const size_t page_size,
80  const size_t initial_size) {
81  new GpuCudaBuffer(this,
82  seg_it,
83  device_id_,
84  cuda_mgr_,
85  page_size,
86  initial_size); // this line is admittedly a bit weird
87  // but the segment iterator passed into
88  // buffer takes the address of the new
89  // Buffer in its buffer member
90 }
91 
92 } // namespace Buffer_Namespace
#define LOG(tag)
Definition: Logger.h:285
std::vector< BufferList > slab_segments_
Definition: BufferMgr.h:184
GpuCudaBufferMgr(const int device_id, const size_t max_buffer_pool_size, CudaMgr_Namespace::CudaMgr *cuda_mgr, const size_t min_slab_size, const size_t max_slab_size, const size_t default_slab_size, const size_t page_size, AbstractBufferMgr *parent_mgr=0)
Note(s): Forbid Copying Idiom 4.1.
Definition: BufferMgr.h:96
void addSlab(const size_t slab_size) override
void freeDeviceMem(int8_t *device_ptr)
Definition: CudaMgr.cpp:392
void allocateBuffer(BufferList::iterator seg_it, const size_t page_size, const size_t initial_size) override
CudaMgr_Namespace::CudaMgr * cuda_mgr_
virtual int8_t * allocateDeviceMem(const size_t num_bytes, const int device_num, const bool is_slab=false)
Definition: CudaMgr.cpp:333
void synchronizeDevices() const
Definition: CudaMgr.cpp:120
std::vector< int8_t * > slabs_
Definition: BufferMgr.h:182