OmniSciDB  a5dc49c757
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
Data_Namespace::DataMgr Class Reference

#include <DataMgr.h>

+ Collaboration diagram for Data_Namespace::DataMgr:

Classes

struct  SystemMemoryUsage
 

Public Member Functions

 DataMgr (const std::string &dataDir, const SystemParameters &system_parameters, std::unique_ptr< CudaMgr_Namespace::CudaMgr > cudaMgr, const bool useGpus, const size_t reservedGpuMem=(1<< 27), const size_t numReaderThreads=0, const File_Namespace::DiskCacheConfig cacheConfig=File_Namespace::DiskCacheConfig())
 
 ~DataMgr ()
 
AbstractBuffercreateChunkBuffer (const ChunkKey &key, const MemoryLevel memoryLevel, const int deviceId=0, const size_t page_size=0)
 
AbstractBuffergetChunkBuffer (const ChunkKey &key, const MemoryLevel memoryLevel, const int deviceId=0, const size_t numBytes=0)
 
void deleteChunk (const ChunkKey &key, const MemoryLevel mem_level, const int device_id)
 
void deleteChunksWithPrefix (const ChunkKey &keyPrefix)
 
void deleteChunksWithPrefix (const ChunkKey &keyPrefix, const MemoryLevel memLevel)
 
AbstractBufferalloc (const MemoryLevel memoryLevel, const int deviceId, const size_t numBytes)
 
void free (AbstractBuffer *buffer)
 
void copy (AbstractBuffer *destBuffer, AbstractBuffer *srcBuffer)
 
bool isBufferOnDevice (const ChunkKey &key, const MemoryLevel memLevel, const int deviceId)
 
std::vector< MemoryInfogetMemoryInfo (const MemoryLevel memLevel) const
 
std::vector< MemoryInfogetMemoryInfoUnlocked (const MemoryLevel memLevel) const
 
std::string dumpLevel (const MemoryLevel memLevel)
 
void clearMemory (const MemoryLevel memLevel)
 
const std::map< ChunkKey,
File_Namespace::FileBuffer * > & 
getChunkMap ()
 
void checkpoint (const int db_id, const int tb_id)
 
void checkpoint (const int db_id, const int table_id, const MemoryLevel memory_level)
 
void getChunkMetadataVecForKeyPrefix (ChunkMetadataVector &chunkMetadataVec, const ChunkKey &keyPrefix)
 
bool gpusPresent () const
 
void removeTableRelatedDS (const int db_id, const int tb_id)
 
void removeMutableTableDiskCacheData (const int db_id, const int tb_id) const
 
void setTableEpoch (const int db_id, const int tb_id, const int start_epoch)
 
size_t getTableEpoch (const int db_id, const int tb_id)
 
void resetTableEpochFloor (const int32_t db_id, const int32_t tb_id)
 
CudaMgr_Namespace::CudaMgrgetCudaMgr () const
 
File_Namespace::GlobalFileMgrgetGlobalFileMgr () const
 
std::shared_ptr
< ForeignStorageInterface
getForeignStorageInterface () const
 
SystemMemoryUsage getSystemMemoryUsage () const
 
PersistentStorageMgrgetPersistentStorageMgr () const
 
void resetBufferMgrs (const File_Namespace::DiskCacheConfig &cache_config, const size_t num_reader_threads, const SystemParameters &sys_params)
 
size_t getCpuBufferPoolSize () const
 
size_t getGpuBufferPoolSize () const
 
Buffer_Namespace::CpuBufferMgrgetCpuBufferMgr () const
 
Buffer_Namespace::GpuCudaBufferMgrgetGpuBufferMgr (int32_t device_id) const
 

Static Public Member Functions

static size_t getTotalSystemMemory ()
 
static void atExitHandler ()
 

Public Attributes

std::vector< int > levelSizes_
 

Private Member Functions

void populateMgrs (const SystemParameters &system_parameters, const size_t userSpecifiedNumReaderThreads, const File_Namespace::DiskCacheConfig &cache_config)
 
void convertDB (const std::string basePath)
 
void checkpoint ()
 
void createTopLevelMetadata () const
 
void allocateCpuBufferMgr (int32_t device_id, size_t total_cpu_size, size_t min_cpu_slab_size, size_t max_cpu_slab_size, size_t default_cpu_slab_size, size_t page_size, const std::vector< size_t > &cpu_tier_sizes)
 

Private Attributes

std::vector< std::vector
< AbstractBufferMgr * > > 
bufferMgrs_
 
std::unique_ptr
< CudaMgr_Namespace::CudaMgr
cudaMgr_
 
std::string dataDir_
 
bool hasGpus_
 
size_t reservedGpuMem_
 
std::mutex buffer_access_mutex_
 

Friends

class GlobalFileMgr
 

Detailed Description

Definition at line 125 of file DataMgr.h.

Constructor & Destructor Documentation

Data_Namespace::DataMgr::DataMgr ( const std::string &  dataDir,
const SystemParameters system_parameters,
std::unique_ptr< CudaMgr_Namespace::CudaMgr cudaMgr,
const bool  useGpus,
const size_t  reservedGpuMem = (1 << 27),
const size_t  numReaderThreads = 0,
const File_Namespace::DiskCacheConfig  cacheConfig = File_Namespace::DiskCacheConfig() 
)
explicit

Definition at line 77 of file DataMgr.cpp.

84  : cudaMgr_{std::move(cudaMgr)}
85  , dataDir_{dataDir}
86  , hasGpus_{false}
87  , reservedGpuMem_{reservedGpuMem} {
88  if (useGpus) {
89  if (cudaMgr_) {
90  hasGpus_ = true;
91 
92  // we register the `atExitHandler` if we create `DataMgr` having GPU
93  // to make sure we clear all allocated GPU memory when destructing this `DataMgr`
94  g_data_mgr_ptr = this;
95  std::atexit(atExitHandler);
96  } else {
97  LOG(ERROR) << "CudaMgr instance is invalid, falling back to CPU-only mode.";
98  hasGpus_ = false;
99  }
100  } else {
101  // NOTE: useGpus == false with a valid cudaMgr is a potentially valid configuration.
102  // i.e. QueryEngine can be set to cpu-only for a cuda-enabled build, but still have
103  // rendering enabled. The renderer would require a CudaMgr in this case, in addition
104  // to a GpuCudaBufferMgr for cuda-backed thrust allocations.
105  // We're still setting hasGpus_ to false in that case tho to enforce cpu-only query
106  // execution.
107  hasGpus_ = false;
108  }
109 
110  populateMgrs(system_parameters, numReaderThreads, cache_config);
112 }
#define LOG(tag)
Definition: Logger.h:285
void createTopLevelMetadata() const
Definition: DataMgr.cpp:414
static void atExitHandler()
Definition: DataMgr.cpp:67
std::unique_ptr< CudaMgr_Namespace::CudaMgr > cudaMgr_
Definition: DataMgr.h:234
void populateMgrs(const SystemParameters &system_parameters, const size_t userSpecifiedNumReaderThreads, const File_Namespace::DiskCacheConfig &cache_config)
Definition: DataMgr.cpp:273
std::string dataDir_
Definition: DataMgr.h:235
Data_Namespace::DataMgr::~DataMgr ( )

Definition at line 114 of file DataMgr.cpp.

References Data_Namespace::anonymous_namespace{DataMgr.cpp}::at_exit_called, bufferMgrs_, clearMemory(), Data_Namespace::anonymous_namespace{DataMgr.cpp}::g_data_mgr_ptr, Data_Namespace::GPU_LEVEL, and hasGpus_.

114  {
115  g_data_mgr_ptr = nullptr;
116 
117  // This duplicates atExitHandler so we still shut down in the case of a startup
118  // exception. We can request cleanup of GPU memory twice, so it's safe.
119  if (!at_exit_called && hasGpus_) {
121  }
122 
123  int numLevels = bufferMgrs_.size();
124  for (int level = numLevels - 1; level >= 0; --level) {
125  for (size_t device = 0; device < bufferMgrs_[level].size(); device++) {
126  delete bufferMgrs_[level][device];
127  }
128  }
129 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:233
void clearMemory(const MemoryLevel memLevel)
Definition: DataMgr.cpp:515

+ Here is the call graph for this function:

Member Function Documentation

AbstractBuffer * Data_Namespace::DataMgr::alloc ( const MemoryLevel  memoryLevel,
const int  deviceId,
const size_t  numBytes 
)

Definition at line 605 of file DataMgr.cpp.

References buffer_access_mutex_, bufferMgrs_, CHECK_LT, and levelSizes_.

Referenced by CpuMgrArenaAllocator::allocate(), ThrustAllocator::allocate(), ThrustAllocator::allocateScopedBuffer(), CudaAllocator::allocGpuAbstractBuffer(), and InValuesBitmap::InValuesBitmap().

607  {
608  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
609  const auto level = static_cast<int>(memoryLevel);
610  CHECK_LT(deviceId, levelSizes_[level]);
611  return bufferMgrs_[level][deviceId]->alloc(numBytes);
612 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:238
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:233
std::vector< int > levelSizes_
Definition: DataMgr.h:182
#define CHECK_LT(x, y)
Definition: Logger.h:303

+ Here is the caller graph for this function:

void Data_Namespace::DataMgr::allocateCpuBufferMgr ( int32_t  device_id,
size_t  total_cpu_size,
size_t  min_cpu_slab_size,
size_t  max_cpu_slab_size,
size_t  default_cpu_slab_size,
size_t  page_size,
const std::vector< size_t > &  cpu_tier_sizes 
)
private

Definition at line 215 of file DataMgr.cpp.

References bufferMgrs_, and cudaMgr_.

Referenced by populateMgrs().

221  {
222 #ifdef ENABLE_MEMKIND
223  if (g_enable_tiered_cpu_mem) {
224  bufferMgrs_[1].push_back(
226  total_cpu_size,
227  cudaMgr_.get(),
228  min_cpu_slab_size,
229  max_cpu_slab_size,
230  default_cpu_slab_size,
231  page_size,
232  cpu_tier_sizes,
233  bufferMgrs_[0][0]));
234  return;
235  }
236 #endif
237 
238  bufferMgrs_[1].push_back(new Buffer_Namespace::CpuBufferMgr(0,
239  total_cpu_size,
240  cudaMgr_.get(),
241  min_cpu_slab_size,
242  max_cpu_slab_size,
243  default_cpu_slab_size,
244  page_size,
245  bufferMgrs_[0][0]));
246 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:233
std::unique_ptr< CudaMgr_Namespace::CudaMgr > cudaMgr_
Definition: DataMgr.h:234

+ Here is the caller graph for this function:

void Data_Namespace::DataMgr::atExitHandler ( )
static

Definition at line 67 of file DataMgr.cpp.

References Data_Namespace::anonymous_namespace{DataMgr.cpp}::at_exit_called, clearMemory(), Data_Namespace::anonymous_namespace{DataMgr.cpp}::g_data_mgr_ptr, Data_Namespace::GPU_LEVEL, and hasGpus_.

67  {
68  at_exit_called = true;
70  // safely destroy all gpu allocations explicitly to avoid unexpected
71  // `CUDA_ERROR_DEINITIALIZED` exception while trying to synchronize
72  // devices to destroy BufferMgr for GPU, i.e., 'GpuCudaBufferMgr` and `CudaMgr`
74  }
75 }
void clearMemory(const MemoryLevel memLevel)
Definition: DataMgr.cpp:515

+ Here is the call graph for this function:

void Data_Namespace::DataMgr::checkpoint ( const int  db_id,
const int  tb_id 
)

Definition at line 634 of file DataMgr.cpp.

References buffer_access_mutex_, and bufferMgrs_.

Referenced by UpdelRoll::stageUpdate().

634  {
635  // TODO(adb): do we need a buffer mgr lock here?
636  // MAT Yes to reduce Parallel Executor TSAN issues (and correctness for now)
637  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
638  for (auto levelIt = bufferMgrs_.rbegin(); levelIt != bufferMgrs_.rend(); ++levelIt) {
639  // use reverse iterator so we start at GPU level, then CPU then DISK
640  for (auto deviceIt = levelIt->begin(); deviceIt != levelIt->end(); ++deviceIt) {
641  (*deviceIt)->checkpoint(db_id, tb_id);
642  }
643  }
644 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:238
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:233

+ Here is the caller graph for this function:

void Data_Namespace::DataMgr::checkpoint ( const int  db_id,
const int  table_id,
const MemoryLevel  memory_level 
)

Definition at line 646 of file DataMgr.cpp.

References buffer_access_mutex_, bufferMgrs_, CHECK_LT, and levelSizes_.

648  {
649  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
650  CHECK_LT(static_cast<size_t>(memory_level), bufferMgrs_.size());
651  CHECK_LT(static_cast<size_t>(memory_level), levelSizes_.size());
652  for (int device_id = 0; device_id < levelSizes_[memory_level]; device_id++) {
653  bufferMgrs_[memory_level][device_id]->checkpoint(db_id, table_id);
654  }
655 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:238
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:233
std::vector< int > levelSizes_
Definition: DataMgr.h:182
#define CHECK_LT(x, y)
Definition: Logger.h:303
void Data_Namespace::DataMgr::checkpoint ( )
private

Definition at line 657 of file DataMgr.cpp.

References buffer_access_mutex_, and bufferMgrs_.

Referenced by convertDB().

657  {
658  // TODO(adb): SAA
659  // MAT Yes to reduce Parallel Executor TSAN issues (and correctness for now)
660  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
661  for (auto levelIt = bufferMgrs_.rbegin(); levelIt != bufferMgrs_.rend(); ++levelIt) {
662  // use reverse iterator so we start at GPU level, then CPU then DISK
663  for (auto deviceIt = levelIt->begin(); deviceIt != levelIt->end(); ++deviceIt) {
664  (*deviceIt)->checkpoint();
665  }
666  }
667 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:238
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:233

+ Here is the caller graph for this function:

void Data_Namespace::DataMgr::clearMemory ( const MemoryLevel  memLevel)

Definition at line 515 of file DataMgr.cpp.

References buffer_access_mutex_, bufferMgrs_, CHECK, cudaMgr_, Data_Namespace::GPU_LEVEL, LOG, and logger::WARNING.

Referenced by atExitHandler(), Executor::clearMemory(), and ~DataMgr().

515  {
516  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
517 
518  // if gpu we need to iterate through all the buffermanagers for each card
519  if (memLevel == MemoryLevel::GPU_LEVEL) {
520  if (cudaMgr_) {
521  int numGpus = cudaMgr_->getDeviceCount();
522  for (int gpuNum = 0; gpuNum < numGpus; ++gpuNum) {
523  auto buffer_mgr_for_gpu =
524  dynamic_cast<Buffer_Namespace::BufferMgr*>(bufferMgrs_[memLevel][gpuNum]);
525  CHECK(buffer_mgr_for_gpu);
526  buffer_mgr_for_gpu->clearSlabs();
527  }
528  } else {
529  LOG(WARNING) << "Unable to clear GPU memory: No GPUs detected";
530  }
531  } else {
532  auto buffer_mgr_for_cpu =
533  dynamic_cast<Buffer_Namespace::BufferMgr*>(bufferMgrs_[memLevel][0]);
534  CHECK(buffer_mgr_for_cpu);
535  buffer_mgr_for_cpu->clearSlabs();
536  }
537 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:238
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:233
#define LOG(tag)
Definition: Logger.h:285
Note(s): Forbid Copying Idiom 4.1.
Definition: BufferMgr.h:96
std::unique_ptr< CudaMgr_Namespace::CudaMgr > cudaMgr_
Definition: DataMgr.h:234
#define CHECK(condition)
Definition: Logger.h:291

+ Here is the caller graph for this function:

void Data_Namespace::DataMgr::convertDB ( const std::string  basePath)
private

Definition at line 384 of file DataMgr.cpp.

References bufferMgrs_, CHECK, checkpoint(), logger::FATAL, getGlobalFileMgr(), logger::INFO, shared::kDataDirectoryName, and LOG.

384  {
385  // no need for locking, as this is only called in the constructor
386 
387  /* check that the data directory exists and it's empty */
388  std::string mapdDataPath(basePath + "/../" + shared::kDataDirectoryName + "/");
389  boost::filesystem::path path(mapdDataPath);
390  if (boost::filesystem::exists(path)) {
391  if (!boost::filesystem::is_directory(path)) {
392  LOG(FATAL) << "Path to directory \"" + shared::kDataDirectoryName +
393  "\" to convert DB is not a directory.";
394  }
395  } else { // data directory does not exist
396  LOG(FATAL) << "Path to directory \"" + shared::kDataDirectoryName +
397  "\" to convert DB does not exist.";
398  }
399 
400  File_Namespace::GlobalFileMgr* gfm{nullptr};
401  gfm = dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])->getGlobalFileMgr();
402  CHECK(gfm);
403 
404  LOG(INFO) << "Database conversion started.";
405  // this call also copies data into new DB structure
406  File_Namespace::FileMgr* fm_base_db = new File_Namespace::FileMgr(gfm, basePath);
407  delete fm_base_db;
408 
409  /* write content of DB into newly created/converted DB structure & location */
410  checkpoint(); // outputs data files as well as metadata files
411  LOG(INFO) << "Database conversion completed.";
412 }
const std::string kDataDirectoryName
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:233
#define LOG(tag)
Definition: Logger.h:285
File_Namespace::GlobalFileMgr * getGlobalFileMgr() const
Definition: DataMgr.cpp:699
#define CHECK(condition)
Definition: Logger.h:291

+ Here is the call graph for this function:

void Data_Namespace::DataMgr::copy ( AbstractBuffer destBuffer,
AbstractBuffer srcBuffer 
)

Definition at line 620 of file DataMgr.cpp.

References Data_Namespace::AbstractBuffer::getDeviceId(), Data_Namespace::AbstractBuffer::getMemoryPtr(), Data_Namespace::AbstractBuffer::getType(), Data_Namespace::AbstractBuffer::size(), and Data_Namespace::AbstractBuffer::write().

620  {
621  destBuffer->write(srcBuffer->getMemoryPtr(),
622  srcBuffer->size(),
623  0,
624  srcBuffer->getType(),
625  srcBuffer->getDeviceId());
626 }
virtual int8_t * getMemoryPtr()=0
virtual MemoryLevel getType() const =0
virtual void write(int8_t *src, const size_t num_bytes, const size_t offset=0, const MemoryLevel src_buffer_type=CPU_LEVEL, const int src_device_id=-1)=0

+ Here is the call graph for this function:

AbstractBuffer * Data_Namespace::DataMgr::createChunkBuffer ( const ChunkKey key,
const MemoryLevel  memoryLevel,
const int  deviceId = 0,
const size_t  page_size = 0 
)

Definition at line 552 of file DataMgr.cpp.

References buffer_access_mutex_, and bufferMgrs_.

Referenced by Chunk_NS::Chunk::createChunkBuffer().

555  {
556  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
557  int level = static_cast<int>(memoryLevel);
558  return bufferMgrs_[level][deviceId]->createBuffer(key, page_size);
559 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:238
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:233

+ Here is the caller graph for this function:

void Data_Namespace::DataMgr::createTopLevelMetadata ( ) const
private

Definition at line 414 of file DataMgr.cpp.

References bufferMgrs_, CHECK, and getGlobalFileMgr().

Referenced by resetBufferMgrs().

415  { // create metadata shared by all tables of all DBs
416  ChunkKey chunkKey(2);
417  chunkKey[0] = 0; // top level db_id
418  chunkKey[1] = 0; // top level tb_id
419 
420  File_Namespace::GlobalFileMgr* gfm{nullptr};
421  gfm = dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])->getGlobalFileMgr();
422  CHECK(gfm);
423 
424  auto fm_top = gfm->getFileMgr(chunkKey);
425  if (auto fm = dynamic_cast<File_Namespace::FileMgr*>(fm_top)) {
426  fm->createOrMigrateTopLevelMetadata();
427  }
428 }
std::vector< int > ChunkKey
Definition: types.h:36
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:233
File_Namespace::GlobalFileMgr * getGlobalFileMgr() const
Definition: DataMgr.cpp:699
#define CHECK(condition)
Definition: Logger.h:291

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

void Data_Namespace::DataMgr::deleteChunk ( const ChunkKey key,
const MemoryLevel  mem_level,
const int  device_id 
)

Definition at line 597 of file DataMgr.cpp.

References buffer_access_mutex_, bufferMgrs_, and CHECK_LT.

Referenced by AlterTableAlterColumnCommandRecoveryMgr::cleanupClearChunk().

599  {
600  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
601  CHECK_LT(memLevel, bufferMgrs_.size());
602  bufferMgrs_[memLevel][device_id]->deleteBuffer(key);
603 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:238
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:233
#define CHECK_LT(x, y)
Definition: Logger.h:303

+ Here is the caller graph for this function:

void Data_Namespace::DataMgr::deleteChunksWithPrefix ( const ChunkKey keyPrefix)

Definition at line 572 of file DataMgr.cpp.

References buffer_access_mutex_, bufferMgrs_, and levelSizes_.

Referenced by AlterTableAlterColumnCommandRecoveryMgr::cleanupDropSourceGeoColumns(), foreign_storage::anonymous_namespace{ForeignTableRefresh.cpp}::clear_cpu_and_gpu_cache(), anonymous_namespace{TableOptimizer.cpp}::delete_cpu_chunks(), AlterTableAlterColumnCommandRecoveryMgr::recoverAlterTableAlterColumnFromFile(), AlterTableAlterColumnCommandRecoveryMgr::rollback(), and UpdelRoll::updateFragmenterAndCleanupChunks().

572  {
573  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
574 
575  int numLevels = bufferMgrs_.size();
576  for (int level = numLevels - 1; level >= 0; --level) {
577  for (int device = 0; device < levelSizes_[level]; ++device) {
578  bufferMgrs_[level][device]->deleteBuffersWithPrefix(keyPrefix);
579  }
580  }
581 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:238
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:233
std::vector< int > levelSizes_
Definition: DataMgr.h:182

+ Here is the caller graph for this function:

void Data_Namespace::DataMgr::deleteChunksWithPrefix ( const ChunkKey keyPrefix,
const MemoryLevel  memLevel 
)

Definition at line 584 of file DataMgr.cpp.

References buffer_access_mutex_, bufferMgrs_, and levelSizes_.

585  {
586  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
587 
588  if (bufferMgrs_.size() <= memLevel) {
589  return;
590  }
591  for (int device = 0; device < levelSizes_[memLevel]; ++device) {
592  bufferMgrs_[memLevel][device]->deleteBuffersWithPrefix(keyPrefix);
593  }
594 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:238
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:233
std::vector< int > levelSizes_
Definition: DataMgr.h:182
std::string Data_Namespace::DataMgr::dumpLevel ( const MemoryLevel  memLevel)

Definition at line 499 of file DataMgr.cpp.

References buffer_access_mutex_, bufferMgrs_, cudaMgr_, and Data_Namespace::GPU_LEVEL.

499  {
500  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
501 
502  // if gpu we need to iterate through all the buffermanagers for each card
503  if (memLevel == MemoryLevel::GPU_LEVEL) {
504  int numGpus = cudaMgr_->getDeviceCount();
505  std::ostringstream tss;
506  for (int gpuNum = 0; gpuNum < numGpus; ++gpuNum) {
507  tss << bufferMgrs_[memLevel][gpuNum]->printSlabs();
508  }
509  return tss.str();
510  } else {
511  return bufferMgrs_[memLevel][0]->printSlabs();
512  }
513 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:238
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:233
std::unique_ptr< CudaMgr_Namespace::CudaMgr > cudaMgr_
Definition: DataMgr.h:234
void Data_Namespace::DataMgr::free ( AbstractBuffer buffer)

Definition at line 614 of file DataMgr.cpp.

References buffer_access_mutex_, bufferMgrs_, Data_Namespace::AbstractBuffer::getDeviceId(), and Data_Namespace::AbstractBuffer::getType().

Referenced by UpdelRoll::cancelUpdate(), ThrustAllocator::deallocate(), CudaAllocator::free(), CudaAllocator::freeGpuAbstractBuffer(), BaselineHashTable::~BaselineHashTable(), CpuMgrArenaAllocator::~CpuMgrArenaAllocator(), CudaAllocator::~CudaAllocator(), InValuesBitmap::~InValuesBitmap(), PerfectHashTable::~PerfectHashTable(), StringDictionaryTranslationMgr::~StringDictionaryTranslationMgr(), ThrustAllocator::~ThrustAllocator(), and TreeModelPredictionMgr::~TreeModelPredictionMgr().

614  {
615  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
616  int level = static_cast<int>(buffer->getType());
617  bufferMgrs_[level][buffer->getDeviceId()]->free(buffer);
618 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:238
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:233
virtual MemoryLevel getType() const =0

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

AbstractBuffer * Data_Namespace::DataMgr::getChunkBuffer ( const ChunkKey key,
const MemoryLevel  memoryLevel,
const int  deviceId = 0,
const size_t  numBytes = 0 
)

Definition at line 561 of file DataMgr.cpp.

References buffer_access_mutex_, bufferMgrs_, CHECK_LT, and levelSizes_.

Referenced by Chunk_NS::Chunk::getChunkBuffer().

564  {
565  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
566  const auto level = static_cast<size_t>(memoryLevel);
567  CHECK_LT(level, levelSizes_.size()); // make sure we have a legit buffermgr
568  CHECK_LT(deviceId, levelSizes_[level]); // make sure we have a legit buffermgr
569  return bufferMgrs_[level][deviceId]->getBuffer(key, numBytes);
570 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:238
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:233
std::vector< int > levelSizes_
Definition: DataMgr.h:182
#define CHECK_LT(x, y)
Definition: Logger.h:303

+ Here is the caller graph for this function:

const std::map<ChunkKey, File_Namespace::FileBuffer*>& Data_Namespace::DataMgr::getChunkMap ( )
void Data_Namespace::DataMgr::getChunkMetadataVecForKeyPrefix ( ChunkMetadataVector chunkMetadataVec,
const ChunkKey keyPrefix 
)

Definition at line 546 of file DataMgr.cpp.

References buffer_access_mutex_, and bufferMgrs_.

Referenced by AlterTableAlterColumnCommandRecoveryMgr::cleanupClearRemainingChunks(), TableOptimizer::vacuumFragments(), and anonymous_namespace{DdlCommandExecutor.cpp}::validate_alter_type_metadata().

547  {
548  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
549  bufferMgrs_[0][0]->getChunkMetadataVecForKeyPrefix(chunkMetadataVec, keyPrefix);
550 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:238
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:233

+ Here is the caller graph for this function:

Buffer_Namespace::CpuBufferMgr * Data_Namespace::DataMgr::getCpuBufferMgr ( ) const

Definition at line 750 of file DataMgr.cpp.

References bufferMgrs_, CHECK, and Data_Namespace::CPU_LEVEL.

Referenced by getCpuBufferPoolSize().

750  {
752  return dynamic_cast<Buffer_Namespace::CpuBufferMgr*>(
754 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:233
#define CHECK(condition)
Definition: Logger.h:291

+ Here is the caller graph for this function:

size_t Data_Namespace::DataMgr::getCpuBufferPoolSize ( ) const

Definition at line 733 of file DataMgr.cpp.

References getCpuBufferMgr(), and Buffer_Namespace::BufferMgr::getMaxSize().

733  {
734  return getCpuBufferMgr()->getMaxSize();
735 }
size_t getMaxSize() override
Definition: BufferMgr.cpp:515
Buffer_Namespace::CpuBufferMgr * getCpuBufferMgr() const
Definition: DataMgr.cpp:750

+ Here is the call graph for this function:

CudaMgr_Namespace::CudaMgr* Data_Namespace::DataMgr::getCudaMgr ( ) const
inline

Definition at line 177 of file DataMgr.h.

References cudaMgr_.

Referenced by Executor::blockSize(), copy_to_nvidia_gpu(), CudaAllocator::copyFromDevice(), CudaAllocator::copyToDevice(), CudaAllocator::CudaAllocator(), Executor::cudaMgr(), get_available_gpus(), Executor::gridSize(), Executor::isCPUOnly(), Executor::logSystemGPUMemoryStatus(), CudaAllocator::setDeviceMem(), and CudaAllocator::zeroDeviceMem().

177 { return cudaMgr_.get(); }
std::unique_ptr< CudaMgr_Namespace::CudaMgr > cudaMgr_
Definition: DataMgr.h:234

+ Here is the caller graph for this function:

std::shared_ptr< ForeignStorageInterface > Data_Namespace::DataMgr::getForeignStorageInterface ( ) const

Definition at line 707 of file DataMgr.cpp.

References bufferMgrs_.

707  {
708  return dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])
710 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:233
std::shared_ptr< ForeignStorageInterface > getForeignStorageInterface() const
Definition: DataMgr.cpp:707
File_Namespace::GlobalFileMgr * Data_Namespace::DataMgr::getGlobalFileMgr ( ) const

Definition at line 699 of file DataMgr.cpp.

References bufferMgrs_, and CHECK.

Referenced by convertDB(), createTopLevelMetadata(), TableArchiver::dumpTable(), anonymous_namespace{DdlCommandExecutor.cpp}::get_agg_storage_stats(), getTableEpoch(), foreign_storage::InternalStorageStatsDataWrapper::initializeObjectsForTable(), resetTableEpochFloor(), TableArchiver::restoreTable(), setTableEpoch(), and TableOptimizer::vacuumDeletedRows().

699  {
700  File_Namespace::GlobalFileMgr* global_file_mgr{nullptr};
701  global_file_mgr =
702  dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])->getGlobalFileMgr();
703  CHECK(global_file_mgr);
704  return global_file_mgr;
705 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:233
File_Namespace::GlobalFileMgr * getGlobalFileMgr() const
Definition: DataMgr.cpp:699
#define CHECK(condition)
Definition: Logger.h:291

+ Here is the caller graph for this function:

Buffer_Namespace::GpuCudaBufferMgr * Data_Namespace::DataMgr::getGpuBufferMgr ( int32_t  device_id) const

Definition at line 756 of file DataMgr.cpp.

References bufferMgrs_, CHECK_GT, and Data_Namespace::GPU_LEVEL.

756  {
757  if (bufferMgrs_.size() > MemoryLevel::GPU_LEVEL) {
758  CHECK_GT(bufferMgrs_[MemoryLevel::GPU_LEVEL].size(), static_cast<size_t>(device_id));
759  return dynamic_cast<Buffer_Namespace::GpuCudaBufferMgr*>(
760  bufferMgrs_[MemoryLevel::GPU_LEVEL][device_id]);
761  } else {
762  return nullptr;
763  }
764 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:233
#define CHECK_GT(x, y)
Definition: Logger.h:305
size_t Data_Namespace::DataMgr::getGpuBufferPoolSize ( ) const

Definition at line 738 of file DataMgr.cpp.

References bufferMgrs_, and Data_Namespace::GPU_LEVEL.

738  {
739  if (bufferMgrs_.size() <= MemoryLevel::GPU_LEVEL) {
740  return static_cast<size_t>(0);
741  }
742  size_t total_gpu_buffer_pools_size{0};
743  for (auto const gpu_buffer_mgr : bufferMgrs_[MemoryLevel::GPU_LEVEL]) {
744  total_gpu_buffer_pools_size +=
745  dynamic_cast<Buffer_Namespace::GpuCudaBufferMgr*>(gpu_buffer_mgr)->getMaxSize();
746  }
747  return total_gpu_buffer_pools_size;
748 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:233
std::vector< MemoryInfo > Data_Namespace::DataMgr::getMemoryInfo ( const MemoryLevel  memLevel) const

Definition at line 430 of file DataMgr.cpp.

References buffer_access_mutex_, and getMemoryInfoUnlocked().

Referenced by Executor::createKernels().

430  {
431  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
432  return getMemoryInfoUnlocked(mem_level);
433 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:238
std::vector< MemoryInfo > getMemoryInfoUnlocked(const MemoryLevel memLevel) const
Definition: DataMgr.cpp:435

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

std::vector< MemoryInfo > Data_Namespace::DataMgr::getMemoryInfoUnlocked ( const MemoryLevel  memLevel) const

Definition at line 435 of file DataMgr.cpp.

References bufferMgrs_, CHECK, Data_Namespace::MemoryData::chunk_key, Data_Namespace::CPU_LEVEL, cudaMgr_, Buffer_Namespace::BufferMgr::getAllocated(), Buffer_Namespace::BufferMgr::getMaxSize(), Buffer_Namespace::BufferMgr::getPageSize(), Buffer_Namespace::BufferMgr::getSlabSegments(), Data_Namespace::GPU_LEVEL, hasGpus_, Data_Namespace::MemoryInfo::isAllocationCapped, Buffer_Namespace::BufferMgr::isAllocationCapped(), Data_Namespace::MemoryInfo::maxNumPages, Data_Namespace::MemoryData::memStatus, Data_Namespace::MemoryInfo::nodeMemoryData, Data_Namespace::MemoryInfo::numPageAllocated, Data_Namespace::MemoryData::numPages, Data_Namespace::MemoryInfo::pageSize, Data_Namespace::MemoryData::slabNum, Data_Namespace::MemoryData::startPage, and Data_Namespace::MemoryData::touch.

Referenced by getMemoryInfo().

436  {
437  std::vector<MemoryInfo> mem_info;
438  if (mem_level == MemoryLevel::CPU_LEVEL) {
439  Buffer_Namespace::CpuBufferMgr* cpu_buffer =
440  dynamic_cast<Buffer_Namespace::CpuBufferMgr*>(
442  CHECK(cpu_buffer);
443  MemoryInfo mi;
444 
445  mi.pageSize = cpu_buffer->getPageSize();
446  mi.maxNumPages = cpu_buffer->getMaxSize() / mi.pageSize;
447  mi.isAllocationCapped = cpu_buffer->isAllocationCapped();
448  mi.numPageAllocated = cpu_buffer->getAllocated() / mi.pageSize;
449 
450  const auto& slab_segments = cpu_buffer->getSlabSegments();
451  for (size_t slab_num = 0; slab_num < slab_segments.size(); ++slab_num) {
452  for (auto const& segment : slab_segments[slab_num]) {
453  MemoryData md;
454  md.slabNum = slab_num;
455  md.startPage = segment.start_page;
456  md.numPages = segment.num_pages;
457  md.touch = segment.last_touched;
458  md.memStatus = segment.mem_status;
459  md.chunk_key.insert(
460  md.chunk_key.end(), segment.chunk_key.begin(), segment.chunk_key.end());
461  mi.nodeMemoryData.push_back(md);
462  }
463  }
464  mem_info.push_back(mi);
465  } else if (hasGpus_) {
466  int numGpus = cudaMgr_->getDeviceCount();
467  for (int gpuNum = 0; gpuNum < numGpus; ++gpuNum) {
469  dynamic_cast<Buffer_Namespace::GpuCudaBufferMgr*>(
471  CHECK(gpu_buffer);
472  MemoryInfo mi;
473 
474  mi.pageSize = gpu_buffer->getPageSize();
475  mi.maxNumPages = gpu_buffer->getMaxSize() / mi.pageSize;
476  mi.isAllocationCapped = gpu_buffer->isAllocationCapped();
477  mi.numPageAllocated = gpu_buffer->getAllocated() / mi.pageSize;
478 
479  const auto& slab_segments = gpu_buffer->getSlabSegments();
480  for (size_t slab_num = 0; slab_num < slab_segments.size(); ++slab_num) {
481  for (auto const& segment : slab_segments[slab_num]) {
482  MemoryData md;
483  md.slabNum = slab_num;
484  md.startPage = segment.start_page;
485  md.numPages = segment.num_pages;
486  md.touch = segment.last_touched;
487  md.chunk_key.insert(
488  md.chunk_key.end(), segment.chunk_key.begin(), segment.chunk_key.end());
489  md.memStatus = segment.mem_status;
490  mi.nodeMemoryData.push_back(md);
491  }
492  }
493  mem_info.push_back(mi);
494  }
495  }
496  return mem_info;
497 }
size_t getAllocated() override
Definition: BufferMgr.cpp:520
std::vector< MemoryData > nodeMemoryData
Definition: DataMgr.h:75
Buffer_Namespace::MemStatus memStatus
Definition: DataMgr.h:67
size_t getMaxSize() override
Definition: BufferMgr.cpp:515
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:233
bool isAllocationCapped() override
Definition: BufferMgr.cpp:525
std::unique_ptr< CudaMgr_Namespace::CudaMgr > cudaMgr_
Definition: DataMgr.h:234
const std::vector< BufferList > & getSlabSegments()
Definition: BufferMgr.cpp:931
#define CHECK(condition)
Definition: Logger.h:291
std::vector< int32_t > chunk_key
Definition: DataMgr.h:66

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

PersistentStorageMgr * Data_Namespace::DataMgr::getPersistentStorageMgr ( ) const

Definition at line 729 of file DataMgr.cpp.

References bufferMgrs_, and Data_Namespace::DISK_LEVEL.

Referenced by Catalog_Namespace::anonymous_namespace{Catalog.cpp}::clear_cached_table_data(), removeMutableTableDiskCacheData(), and anonymous_namespace{RelAlgExecutor.cpp}::set_parallelism_hints().

729  {
730  return dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[MemoryLevel::DISK_LEVEL][0]);
731 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:233

+ Here is the caller graph for this function:

DataMgr::SystemMemoryUsage Data_Namespace::DataMgr::getSystemMemoryUsage ( ) const

Definition at line 131 of file DataMgr.cpp.

References Data_Namespace::DataMgr::SystemMemoryUsage::avail_pages, Data_Namespace::DataMgr::SystemMemoryUsage::frag, Data_Namespace::DataMgr::SystemMemoryUsage::free, Data_Namespace::DataMgr::SystemMemoryUsage::high_blocks, Data_Namespace::ProcBuddyinfoParser::parseBuddyinfo(), Data_Namespace::DataMgr::SystemMemoryUsage::regular, Data_Namespace::DataMgr::SystemMemoryUsage::resident, Data_Namespace::DataMgr::SystemMemoryUsage::shared, Data_Namespace::DataMgr::SystemMemoryUsage::total, and Data_Namespace::DataMgr::SystemMemoryUsage::vtotal.

Referenced by anonymous_namespace{DBHandler.cpp}::log_system_cpu_memory_status(), and Executor::logSystemCPUMemoryStatus().

131  {
132  SystemMemoryUsage usage;
133 #ifdef __linux__
134 
135  // Determine Linux available memory and total memory.
136  // Available memory is different from free memory because
137  // when Linux sees free memory, it tries to use it for
138  // stuff like disk caching. However, the memory is not
139  // reserved and is still available to be allocated by
140  // user processes.
141  // Parsing /proc/meminfo for this info isn't very elegant
142  // but as a virtual file it should be reasonably fast.
143  // See also:
144  // https://github.com/torvalds/linux/commit/34e431b0ae398fc54ea69ff85ec700722c9da773
146  usage.free = mi["MemAvailable"];
147  usage.total = mi["MemTotal"];
148 
149  // Determine process memory in use.
150  // See also:
151  // https://stackoverflow.com/questions/669438/how-to-get-memory-usage-at-runtime-using-c
152  // http://man7.org/linux/man-pages/man5/proc.5.html
153  int64_t size = 0;
154  int64_t resident = 0;
155  int64_t shared = 0;
156 
157  std::ifstream fstatm("/proc/self/statm");
158  fstatm >> size >> resident >> shared;
159  fstatm.close();
160 
161  long page_size =
162  sysconf(_SC_PAGE_SIZE); // in case x86-64 is configured to use 2MB pages
163 
164  usage.resident = resident * page_size;
165  usage.vtotal = size * page_size;
166  usage.regular = (resident - shared) * page_size;
167  usage.shared = shared * page_size;
168 
169  ProcBuddyinfoParser bi{};
170  bi.parseBuddyinfo();
171  usage.frag = bi.getFragmentationPercent();
172  usage.avail_pages = bi.getSumAvailPages();
173  usage.high_blocks = bi.getSumHighestBlocks();
174 
175 #else
176 
177  usage.total = 0;
178  usage.free = 0;
179  usage.resident = 0;
180  usage.vtotal = 0;
181  usage.regular = 0;
182  usage.shared = 0;
183  usage.frag = 0.0;
184  usage.avail_pages = 0;
185  usage.high_blocks = 0;
186 
187 #endif
188 
189  return usage;
190 }
Parse /proc/meminfo into key/value pairs.
Definition: DataMgr.h:79
Parse /proc/buddyinfo into a few fragmentation-related data.
Definition: DataMgr.h:112

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

size_t Data_Namespace::DataMgr::getTableEpoch ( const int  db_id,
const int  tb_id 
)

Definition at line 685 of file DataMgr.cpp.

References bufferMgrs_, CHECK, and getGlobalFileMgr().

685  {
686  File_Namespace::GlobalFileMgr* gfm{nullptr};
687  gfm = dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])->getGlobalFileMgr();
688  CHECK(gfm);
689  return gfm->getTableEpoch(db_id, tb_id);
690 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:233
File_Namespace::GlobalFileMgr * getGlobalFileMgr() const
Definition: DataMgr.cpp:699
#define CHECK(condition)
Definition: Logger.h:291

+ Here is the call graph for this function:

size_t Data_Namespace::DataMgr::getTotalSystemMemory ( )
static

Definition at line 192 of file DataMgr.cpp.

Referenced by populateMgrs().

192  {
193 #ifdef __APPLE__
194  int mib[2];
195  size_t physical_memory;
196  size_t length;
197  // Get the Physical memory size
198  mib[0] = CTL_HW;
199  mib[1] = HW_MEMSIZE;
200  length = sizeof(size_t);
201  sysctl(mib, 2, &physical_memory, &length, NULL, 0);
202  return physical_memory;
203 #elif defined(_MSC_VER)
204  MEMORYSTATUSEX status;
205  status.dwLength = sizeof(status);
206  GlobalMemoryStatusEx(&status);
207  return status.ullTotalPhys;
208 #else // Linux
209  long pages = sysconf(_SC_PHYS_PAGES);
210  long page_size = sysconf(_SC_PAGE_SIZE);
211  return pages * page_size;
212 #endif
213 }

+ Here is the caller graph for this function:

bool Data_Namespace::DataMgr::gpusPresent ( ) const
inline

Definition at line 170 of file DataMgr.h.

References hasGpus_.

Referenced by get_available_gpus().

170 { return hasGpus_; }

+ Here is the caller graph for this function:

bool Data_Namespace::DataMgr::isBufferOnDevice ( const ChunkKey key,
const MemoryLevel  memLevel,
const int  deviceId 
)

Definition at line 539 of file DataMgr.cpp.

References buffer_access_mutex_, and bufferMgrs_.

Referenced by Chunk_NS::Chunk::isChunkOnDevice().

541  {
542  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
543  return bufferMgrs_[memLevel][deviceId]->isBufferOnDevice(key);
544 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:238
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:233

+ Here is the caller graph for this function:

void Data_Namespace::DataMgr::populateMgrs ( const SystemParameters system_parameters,
const size_t  userSpecifiedNumReaderThreads,
const File_Namespace::DiskCacheConfig cache_config 
)
private

Definition at line 273 of file DataMgr.cpp.

References allocateCpuBufferMgr(), SystemParameters::buffer_page_size, bufferMgrs_, CHECK_GT, SystemParameters::cpu_buffer_mem_bytes, cudaMgr_, dataDir_, SystemParameters::default_cpu_slab_size, SystemParameters::default_gpu_slab_size, Data_Namespace::DRAM, g_pmem_size, g_use_cpu_mem_pool_size_for_max_cpu_slab_size, Data_Namespace::anonymous_namespace{DataMgr.cpp}::get_slab_size(), getTotalSystemMemory(), SystemParameters::gpu_buffer_mem_bytes, hasGpus_, logger::INFO, levelSizes_, LOG, SystemParameters::max_cpu_slab_size, SystemParameters::max_gpu_slab_size, SystemParameters::min_cpu_slab_size, SystemParameters::min_gpu_slab_size, Data_Namespace::numCpuTiers, Data_Namespace::PMEM, reservedGpuMem_, and VLOG.

Referenced by resetBufferMgrs().

275  {
276  // no need for locking, as this is only called in the constructor
277  bufferMgrs_.resize(2);
278  bufferMgrs_[0].push_back(
279  new PersistentStorageMgr(dataDir_, userSpecifiedNumReaderThreads, cache_config));
280 
281  levelSizes_.push_back(1);
282  auto page_size = system_parameters.buffer_page_size;
283  CHECK_GT(page_size, size_t(0));
284  auto cpu_buffer_size = system_parameters.cpu_buffer_mem_bytes;
285  if (cpu_buffer_size == 0) { // if size is not specified
286  const auto total_system_memory = getTotalSystemMemory();
287  VLOG(1) << "Detected " << (float)total_system_memory / (1024 * 1024)
288  << "M of total system memory.";
289  cpu_buffer_size = total_system_memory *
290  0.8; // should get free memory instead of this ugly heuristic
291  }
292  auto min_cpu_slab_size =
293  get_slab_size(system_parameters.min_cpu_slab_size, cpu_buffer_size, page_size);
294  auto max_cpu_slab_size =
296  ? cpu_buffer_size
297  : get_slab_size(
298  system_parameters.max_cpu_slab_size, cpu_buffer_size, page_size);
299  auto default_cpu_slab_size =
300  get_slab_size(system_parameters.default_cpu_slab_size, cpu_buffer_size, page_size);
301  LOG(INFO) << "Min CPU Slab Size is " << float(min_cpu_slab_size) / (1024 * 1024)
302  << "MB";
303  LOG(INFO) << "Max CPU Slab Size is " << float(max_cpu_slab_size) / (1024 * 1024)
304  << "MB";
305  LOG(INFO) << "Default CPU Slab Size is " << float(default_cpu_slab_size) / (1024 * 1024)
306  << "MB";
307  LOG(INFO) << "Max memory pool size for CPU is "
308  << float(cpu_buffer_size) / (1024 * 1024) << "MB";
309 
310  size_t total_cpu_size = 0;
311 
312 #ifdef ENABLE_MEMKIND
313  CpuTierSizeVector cpu_tier_sizes(numCpuTiers, 0);
314  cpu_tier_sizes[CpuTier::DRAM] = cpuBufferSize;
315  if (g_enable_tiered_cpu_mem) {
316  cpu_tier_sizes[CpuTier::PMEM] = g_pmem_size;
317  LOG(INFO) << "Max memory pool size for PMEM is " << (float)g_pmem_size / (1024 * 1024)
318  << "MB";
319  }
320  for (auto cpu_tier_size : cpu_tier_sizes) {
321  total_cpu_size += cpu_tier_size;
322  }
323 #else
324  CpuTierSizeVector cpu_tier_sizes{};
325  total_cpu_size = cpu_buffer_size;
326 #endif
327 
328  if (hasGpus_ || cudaMgr_) {
329  LOG(INFO) << "Reserved GPU memory is " << (float)reservedGpuMem_ / (1024 * 1024)
330  << "MB includes render buffer allocation";
331  bufferMgrs_.resize(3);
333  total_cpu_size,
334  min_cpu_slab_size,
335  max_cpu_slab_size,
336  default_cpu_slab_size,
337  page_size,
338  cpu_tier_sizes);
339 
340  levelSizes_.push_back(1);
341  auto num_gpus = cudaMgr_->getDeviceCount();
342  for (int gpu_num = 0; gpu_num < num_gpus; ++gpu_num) {
343  auto gpu_max_mem_size =
344  system_parameters.gpu_buffer_mem_bytes != 0
345  ? system_parameters.gpu_buffer_mem_bytes
346  : (cudaMgr_->getDeviceProperties(gpu_num)->globalMem) - (reservedGpuMem_);
347  auto min_gpu_slab_size =
348  get_slab_size(system_parameters.min_gpu_slab_size, gpu_max_mem_size, page_size);
349  auto max_gpu_slab_size =
350  get_slab_size(system_parameters.max_gpu_slab_size, gpu_max_mem_size, page_size);
351  auto default_gpu_slab_size = get_slab_size(
352  system_parameters.default_gpu_slab_size, gpu_max_mem_size, page_size);
353  LOG(INFO) << "Min GPU Slab size for GPU " << gpu_num << " is "
354  << float(min_gpu_slab_size) / (1024 * 1024) << "MB";
355  LOG(INFO) << "Max GPU Slab size for GPU " << gpu_num << " is "
356  << float(max_gpu_slab_size) / (1024 * 1024) << "MB";
357  LOG(INFO) << "Default GPU Slab size for GPU " << gpu_num << " is "
358  << float(default_gpu_slab_size) / (1024 * 1024) << "MB";
359  LOG(INFO) << "Max memory pool size for GPU " << gpu_num << " is "
360  << float(gpu_max_mem_size) / (1024 * 1024) << "MB";
361  bufferMgrs_[2].push_back(
363  gpu_max_mem_size,
364  cudaMgr_.get(),
365  min_gpu_slab_size,
366  max_gpu_slab_size,
367  default_gpu_slab_size,
368  page_size,
369  bufferMgrs_[1][0]));
370  }
371  levelSizes_.push_back(num_gpus);
372  } else {
374  total_cpu_size,
375  min_cpu_slab_size,
376  max_cpu_slab_size,
377  default_cpu_slab_size,
378  page_size,
379  cpu_tier_sizes);
380  levelSizes_.push_back(1);
381  }
382 }
size_t default_gpu_slab_size
size_t g_pmem_size
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:233
size_t get_slab_size(size_t initial_slab_size, size_t buffer_pool_size, size_t page_size)
Definition: DataMgr.cpp:264
std::vector< int > levelSizes_
Definition: DataMgr.h:182
#define LOG(tag)
Definition: Logger.h:285
bool g_use_cpu_mem_pool_size_for_max_cpu_slab_size
Definition: DataMgr.cpp:56
#define CHECK_GT(x, y)
Definition: Logger.h:305
constexpr size_t numCpuTiers
void allocateCpuBufferMgr(int32_t device_id, size_t total_cpu_size, size_t min_cpu_slab_size, size_t max_cpu_slab_size, size_t default_cpu_slab_size, size_t page_size, const std::vector< size_t > &cpu_tier_sizes)
Definition: DataMgr.cpp:215
static size_t getTotalSystemMemory()
Definition: DataMgr.cpp:192
std::unique_ptr< CudaMgr_Namespace::CudaMgr > cudaMgr_
Definition: DataMgr.h:234
std::vector< size_t > CpuTierSizeVector
#define VLOG(n)
Definition: Logger.h:388
size_t default_cpu_slab_size
std::string dataDir_
Definition: DataMgr.h:235

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

void Data_Namespace::DataMgr::removeMutableTableDiskCacheData ( const int  db_id,
const int  tb_id 
) const

Definition at line 674 of file DataMgr.cpp.

References getPersistentStorageMgr(), and PersistentStorageMgr::removeMutableTableCacheData().

674  {
676 }
PersistentStorageMgr * getPersistentStorageMgr() const
Definition: DataMgr.cpp:729
void removeMutableTableCacheData(const int db_id, const int table_id) const

+ Here is the call graph for this function:

void Data_Namespace::DataMgr::removeTableRelatedDS ( const int  db_id,
const int  tb_id 
)

Definition at line 669 of file DataMgr.cpp.

References buffer_access_mutex_, and bufferMgrs_.

669  {
670  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
671  bufferMgrs_[0][0]->removeTableRelatedDS(db_id, tb_id);
672 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:238
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:233
void Data_Namespace::DataMgr::resetBufferMgrs ( const File_Namespace::DiskCacheConfig cache_config,
const size_t  num_reader_threads,
const SystemParameters sys_params 
)

Definition at line 249 of file DataMgr.cpp.

References bufferMgrs_, createTopLevelMetadata(), and populateMgrs().

251  {
252  int numLevels = bufferMgrs_.size();
253  for (int level = numLevels - 1; level >= 0; --level) {
254  for (size_t device = 0; device < bufferMgrs_[level].size(); device++) {
255  delete bufferMgrs_[level][device];
256  }
257  }
258  bufferMgrs_.clear();
259  populateMgrs(sys_params, num_reader_threads, cache_config);
261 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:233
void createTopLevelMetadata() const
Definition: DataMgr.cpp:414
void populateMgrs(const SystemParameters &system_parameters, const size_t userSpecifiedNumReaderThreads, const File_Namespace::DiskCacheConfig &cache_config)
Definition: DataMgr.cpp:273

+ Here is the call graph for this function:

void Data_Namespace::DataMgr::resetTableEpochFloor ( const int32_t  db_id,
const int32_t  tb_id 
)

Definition at line 692 of file DataMgr.cpp.

References bufferMgrs_, CHECK, and getGlobalFileMgr().

692  {
693  File_Namespace::GlobalFileMgr* gfm{nullptr};
694  gfm = dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])->getGlobalFileMgr();
695  CHECK(gfm);
696  gfm->resetTableEpochFloor(db_id, tb_id);
697 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:233
File_Namespace::GlobalFileMgr * getGlobalFileMgr() const
Definition: DataMgr.cpp:699
#define CHECK(condition)
Definition: Logger.h:291

+ Here is the call graph for this function:

void Data_Namespace::DataMgr::setTableEpoch ( const int  db_id,
const int  tb_id,
const int  start_epoch 
)

Definition at line 678 of file DataMgr.cpp.

References bufferMgrs_, CHECK, and getGlobalFileMgr().

678  {
679  File_Namespace::GlobalFileMgr* gfm{nullptr};
680  gfm = dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])->getGlobalFileMgr();
681  CHECK(gfm);
682  gfm->setTableEpoch(db_id, tb_id, start_epoch);
683 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:233
File_Namespace::GlobalFileMgr * getGlobalFileMgr() const
Definition: DataMgr.cpp:699
#define CHECK(condition)
Definition: Logger.h:291

+ Here is the call graph for this function:

Friends And Related Function Documentation

friend class GlobalFileMgr
friend

Definition at line 126 of file DataMgr.h.

Member Data Documentation

std::unique_ptr<CudaMgr_Namespace::CudaMgr> Data_Namespace::DataMgr::cudaMgr_
private
std::string Data_Namespace::DataMgr::dataDir_
private

Definition at line 235 of file DataMgr.h.

Referenced by populateMgrs().

bool Data_Namespace::DataMgr::hasGpus_
private

Definition at line 236 of file DataMgr.h.

Referenced by atExitHandler(), getMemoryInfoUnlocked(), gpusPresent(), populateMgrs(), and ~DataMgr().

std::vector<int> Data_Namespace::DataMgr::levelSizes_

Definition at line 182 of file DataMgr.h.

Referenced by alloc(), checkpoint(), deleteChunksWithPrefix(), getChunkBuffer(), and populateMgrs().

size_t Data_Namespace::DataMgr::reservedGpuMem_
private

Definition at line 237 of file DataMgr.h.

Referenced by populateMgrs().


The documentation for this class was generated from the following files: