23 namespace Buffer_Namespace {
26 const size_t max_buffer_pool_size,
28 const size_t min_slab_size,
29 const size_t max_slab_size,
30 const size_t default_slab_size,
31 const size_t page_size,
32 AbstractBufferMgr* parent_mgr)
40 , cuda_mgr_(cuda_mgr) {}
47 }
catch (
const CudaMgr_Namespace::CudaErrorException& e) {
48 if (e.getStatus() == CUDA_ERROR_DEINITIALIZED) {
53 }
catch (
const std::runtime_error& e) {
54 LOG(
ERROR) <<
"CUDA Error: " << e.what();
63 }
catch (std::runtime_error& error) {
73 for (
auto buf_it =
slabs_.begin(); buf_it !=
slabs_.end(); ++buf_it) {
79 const size_t page_size,
80 const size_t initial_size) {
~GpuCudaBufferMgr() override
void freeAllMem() override
std::vector< BufferList > slab_segments_
GpuCudaBufferMgr(const int device_id, const size_t max_buffer_pool_size, CudaMgr_Namespace::CudaMgr *cuda_mgr, const size_t min_slab_size, const size_t max_slab_size, const size_t default_slab_size, const size_t page_size, AbstractBufferMgr *parent_mgr=0)
Note(s): Forbid Copying Idiom 4.1.
void addSlab(const size_t slab_size) override
void freeDeviceMem(int8_t *device_ptr)
void allocateBuffer(BufferList::iterator seg_it, const size_t page_size, const size_t initial_size) override
CudaMgr_Namespace::CudaMgr * cuda_mgr_
virtual int8_t * allocateDeviceMem(const size_t num_bytes, const int device_num, const bool is_slab=false)
void synchronizeDevices() const
std::vector< int8_t * > slabs_