OmniSciDB  72c90bc290
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
Data_Namespace::DataMgr Class Reference

#include <DataMgr.h>

+ Collaboration diagram for Data_Namespace::DataMgr:

Classes

struct  SystemMemoryUsage
 

Public Member Functions

 DataMgr (const std::string &dataDir, const SystemParameters &system_parameters, std::unique_ptr< CudaMgr_Namespace::CudaMgr > cudaMgr, const bool useGpus, const size_t reservedGpuMem=(1<< 27), const size_t numReaderThreads=0, const File_Namespace::DiskCacheConfig cacheConfig=File_Namespace::DiskCacheConfig())
 
 ~DataMgr ()
 
AbstractBuffercreateChunkBuffer (const ChunkKey &key, const MemoryLevel memoryLevel, const int deviceId=0, const size_t page_size=0)
 
AbstractBuffergetChunkBuffer (const ChunkKey &key, const MemoryLevel memoryLevel, const int deviceId=0, const size_t numBytes=0)
 
void deleteChunk (const ChunkKey &key, const MemoryLevel mem_level, const int device_id)
 
void deleteChunksWithPrefix (const ChunkKey &keyPrefix)
 
void deleteChunksWithPrefix (const ChunkKey &keyPrefix, const MemoryLevel memLevel)
 
AbstractBufferalloc (const MemoryLevel memoryLevel, const int deviceId, const size_t numBytes)
 
void free (AbstractBuffer *buffer)
 
void copy (AbstractBuffer *destBuffer, AbstractBuffer *srcBuffer)
 
bool isBufferOnDevice (const ChunkKey &key, const MemoryLevel memLevel, const int deviceId)
 
std::vector< MemoryInfogetMemoryInfo (const MemoryLevel memLevel) const
 
std::vector< MemoryInfogetMemoryInfoUnlocked (const MemoryLevel memLevel) const
 
std::string dumpLevel (const MemoryLevel memLevel)
 
void clearMemory (const MemoryLevel memLevel)
 
const std::map< ChunkKey,
File_Namespace::FileBuffer * > & 
getChunkMap ()
 
void checkpoint (const int db_id, const int tb_id)
 
void checkpoint (const int db_id, const int table_id, const MemoryLevel memory_level)
 
void getChunkMetadataVecForKeyPrefix (ChunkMetadataVector &chunkMetadataVec, const ChunkKey &keyPrefix)
 
bool gpusPresent () const
 
void removeTableRelatedDS (const int db_id, const int tb_id)
 
void removeMutableTableDiskCacheData (const int db_id, const int tb_id) const
 
void setTableEpoch (const int db_id, const int tb_id, const int start_epoch)
 
size_t getTableEpoch (const int db_id, const int tb_id)
 
void resetTableEpochFloor (const int32_t db_id, const int32_t tb_id)
 
CudaMgr_Namespace::CudaMgrgetCudaMgr () const
 
File_Namespace::GlobalFileMgrgetGlobalFileMgr () const
 
std::shared_ptr
< ForeignStorageInterface
getForeignStorageInterface () const
 
SystemMemoryUsage getSystemMemoryUsage () const
 
PersistentStorageMgrgetPersistentStorageMgr () const
 
void resetBufferMgrs (const File_Namespace::DiskCacheConfig &cache_config, const size_t num_reader_threads, const SystemParameters &sys_params)
 
size_t getCpuBufferPoolSize () const
 
size_t getGpuBufferPoolSize () const
 
Buffer_Namespace::CpuBufferMgrgetCpuBufferMgr () const
 
Buffer_Namespace::GpuCudaBufferMgrgetGpuBufferMgr (int32_t device_id) const
 

Static Public Member Functions

static size_t getTotalSystemMemory ()
 
static void atExitHandler ()
 

Public Attributes

std::vector< int > levelSizes_
 

Private Member Functions

void populateMgrs (const SystemParameters &system_parameters, const size_t userSpecifiedNumReaderThreads, const File_Namespace::DiskCacheConfig &cache_config)
 
void convertDB (const std::string basePath)
 
void checkpoint ()
 
void createTopLevelMetadata () const
 
void allocateCpuBufferMgr (int32_t device_id, size_t total_cpu_size, size_t minCpuSlabSize, size_t maxCpuSlabSize, size_t page_size, const std::vector< size_t > &cpu_tier_sizes)
 

Private Attributes

std::vector< std::vector
< AbstractBufferMgr * > > 
bufferMgrs_
 
std::unique_ptr
< CudaMgr_Namespace::CudaMgr
cudaMgr_
 
std::string dataDir_
 
bool hasGpus_
 
size_t reservedGpuMem_
 
std::mutex buffer_access_mutex_
 

Friends

class GlobalFileMgr
 

Detailed Description

Definition at line 183 of file DataMgr.h.

Constructor & Destructor Documentation

Data_Namespace::DataMgr::DataMgr ( const std::string &  dataDir,
const SystemParameters system_parameters,
std::unique_ptr< CudaMgr_Namespace::CudaMgr cudaMgr,
const bool  useGpus,
const size_t  reservedGpuMem = (1 << 27),
const size_t  numReaderThreads = 0,
const File_Namespace::DiskCacheConfig  cacheConfig = File_Namespace::DiskCacheConfig() 
)
explicit

Definition at line 69 of file DataMgr.cpp.

76  : cudaMgr_{std::move(cudaMgr)}
77  , dataDir_{dataDir}
78  , hasGpus_{false}
79  , reservedGpuMem_{reservedGpuMem} {
80  if (useGpus) {
81  if (cudaMgr_) {
82  hasGpus_ = true;
83 
84  // we register the `atExitHandler` if we create `DataMgr` having GPU
85  // to make sure we clear all allocated GPU memory when destructing this `DataMgr`
86  g_data_mgr_ptr = this;
87  std::atexit(atExitHandler);
88  } else {
89  LOG(ERROR) << "CudaMgr instance is invalid, falling back to CPU-only mode.";
90  hasGpus_ = false;
91  }
92  } else {
93  // NOTE: useGpus == false with a valid cudaMgr is a potentially valid configuration.
94  // i.e. QueryEngine can be set to cpu-only for a cuda-enabled build, but still have
95  // rendering enabled. The renderer would require a CudaMgr in this case, in addition
96  // to a GpuCudaBufferMgr for cuda-backed thrust allocations.
97  // We're still setting hasGpus_ to false in that case tho to enforce cpu-only query
98  // execution.
99  hasGpus_ = false;
100  }
101 
102  populateMgrs(system_parameters, numReaderThreads, cache_config);
104 }
#define LOG(tag)
Definition: Logger.h:285
void createTopLevelMetadata() const
Definition: DataMgr.cpp:364
static void atExitHandler()
Definition: DataMgr.cpp:59
std::unique_ptr< CudaMgr_Namespace::CudaMgr > cudaMgr_
Definition: DataMgr.h:289
void populateMgrs(const SystemParameters &system_parameters, const size_t userSpecifiedNumReaderThreads, const File_Namespace::DiskCacheConfig &cache_config)
Definition: DataMgr.cpp:246
std::string dataDir_
Definition: DataMgr.h:290
Data_Namespace::DataMgr::~DataMgr ( )

Definition at line 106 of file DataMgr.cpp.

References Data_Namespace::anonymous_namespace{DataMgr.cpp}::at_exit_called, bufferMgrs_, clearMemory(), Data_Namespace::anonymous_namespace{DataMgr.cpp}::g_data_mgr_ptr, Data_Namespace::GPU_LEVEL, and hasGpus_.

106  {
107  g_data_mgr_ptr = nullptr;
108 
109  // This duplicates atExitHandler so we still shut down in the case of a startup
110  // exception. We can request cleanup of GPU memory twice, so it's safe.
111  if (!at_exit_called && hasGpus_) {
113  }
114 
115  int numLevels = bufferMgrs_.size();
116  for (int level = numLevels - 1; level >= 0; --level) {
117  for (size_t device = 0; device < bufferMgrs_[level].size(); device++) {
118  delete bufferMgrs_[level][device];
119  }
120  }
121 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:288
void clearMemory(const MemoryLevel memLevel)
Definition: DataMgr.cpp:465

+ Here is the call graph for this function:

Member Function Documentation

AbstractBuffer * Data_Namespace::DataMgr::alloc ( const MemoryLevel  memoryLevel,
const int  deviceId,
const size_t  numBytes 
)

Definition at line 555 of file DataMgr.cpp.

References buffer_access_mutex_, bufferMgrs_, CHECK_LT, and levelSizes_.

Referenced by ThrustAllocator::allocate(), ThrustAllocator::allocateScopedBuffer(), CudaAllocator::allocGpuAbstractBuffer(), and InValuesBitmap::InValuesBitmap().

557  {
558  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
559  const auto level = static_cast<int>(memoryLevel);
560  CHECK_LT(deviceId, levelSizes_[level]);
561  return bufferMgrs_[level][deviceId]->alloc(numBytes);
562 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:293
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:288
std::vector< int > levelSizes_
Definition: DataMgr.h:240
#define CHECK_LT(x, y)
Definition: Logger.h:303

+ Here is the caller graph for this function:

void Data_Namespace::DataMgr::allocateCpuBufferMgr ( int32_t  device_id,
size_t  total_cpu_size,
size_t  minCpuSlabSize,
size_t  maxCpuSlabSize,
size_t  page_size,
const std::vector< size_t > &  cpu_tier_sizes 
)
private

Definition at line 202 of file DataMgr.cpp.

References bufferMgrs_, and cudaMgr_.

Referenced by populateMgrs().

207  {
208 #ifdef ENABLE_MEMKIND
209  if (g_enable_tiered_cpu_mem) {
211  total_cpu_size,
212  cudaMgr_.get(),
213  minCpuSlabSize,
214  maxCpuSlabSize,
215  page_size,
216  cpu_tier_sizes,
217  bufferMgrs_[0][0]));
218  return;
219  }
220 #endif
221 
222  bufferMgrs_[1].push_back(new Buffer_Namespace::CpuBufferMgr(0,
223  total_cpu_size,
224  cudaMgr_.get(),
225  minCpuSlabSize,
226  maxCpuSlabSize,
227  page_size,
228  bufferMgrs_[0][0]));
229 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:288
std::unique_ptr< CudaMgr_Namespace::CudaMgr > cudaMgr_
Definition: DataMgr.h:289

+ Here is the caller graph for this function:

void Data_Namespace::DataMgr::atExitHandler ( )
static

Definition at line 59 of file DataMgr.cpp.

References Data_Namespace::anonymous_namespace{DataMgr.cpp}::at_exit_called, clearMemory(), Data_Namespace::anonymous_namespace{DataMgr.cpp}::g_data_mgr_ptr, Data_Namespace::GPU_LEVEL, and hasGpus_.

59  {
60  at_exit_called = true;
62  // safely destroy all gpu allocations explicitly to avoid unexpected
63  // `CUDA_ERROR_DEINITIALIZED` exception while trying to synchronize
64  // devices to destroy BufferMgr for GPU, i.e., 'GpuCudaBufferMgr` and `CudaMgr`
66  }
67 }
void clearMemory(const MemoryLevel memLevel)
Definition: DataMgr.cpp:465

+ Here is the call graph for this function:

void Data_Namespace::DataMgr::checkpoint ( const int  db_id,
const int  tb_id 
)

Definition at line 584 of file DataMgr.cpp.

References buffer_access_mutex_, and bufferMgrs_.

Referenced by UpdelRoll::stageUpdate().

584  {
585  // TODO(adb): do we need a buffer mgr lock here?
586  // MAT Yes to reduce Parallel Executor TSAN issues (and correctness for now)
587  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
588  for (auto levelIt = bufferMgrs_.rbegin(); levelIt != bufferMgrs_.rend(); ++levelIt) {
589  // use reverse iterator so we start at GPU level, then CPU then DISK
590  for (auto deviceIt = levelIt->begin(); deviceIt != levelIt->end(); ++deviceIt) {
591  (*deviceIt)->checkpoint(db_id, tb_id);
592  }
593  }
594 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:293
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:288

+ Here is the caller graph for this function:

void Data_Namespace::DataMgr::checkpoint ( const int  db_id,
const int  table_id,
const MemoryLevel  memory_level 
)

Definition at line 596 of file DataMgr.cpp.

References buffer_access_mutex_, bufferMgrs_, CHECK_LT, and levelSizes_.

598  {
599  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
600  CHECK_LT(static_cast<size_t>(memory_level), bufferMgrs_.size());
601  CHECK_LT(static_cast<size_t>(memory_level), levelSizes_.size());
602  for (int device_id = 0; device_id < levelSizes_[memory_level]; device_id++) {
603  bufferMgrs_[memory_level][device_id]->checkpoint(db_id, table_id);
604  }
605 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:293
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:288
std::vector< int > levelSizes_
Definition: DataMgr.h:240
#define CHECK_LT(x, y)
Definition: Logger.h:303
void Data_Namespace::DataMgr::checkpoint ( )
private

Definition at line 607 of file DataMgr.cpp.

References buffer_access_mutex_, and bufferMgrs_.

Referenced by convertDB().

607  {
608  // TODO(adb): SAA
609  // MAT Yes to reduce Parallel Executor TSAN issues (and correctness for now)
610  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
611  for (auto levelIt = bufferMgrs_.rbegin(); levelIt != bufferMgrs_.rend(); ++levelIt) {
612  // use reverse iterator so we start at GPU level, then CPU then DISK
613  for (auto deviceIt = levelIt->begin(); deviceIt != levelIt->end(); ++deviceIt) {
614  (*deviceIt)->checkpoint();
615  }
616  }
617 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:293
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:288

+ Here is the caller graph for this function:

void Data_Namespace::DataMgr::clearMemory ( const MemoryLevel  memLevel)

Definition at line 465 of file DataMgr.cpp.

References buffer_access_mutex_, bufferMgrs_, CHECK, cudaMgr_, Data_Namespace::GPU_LEVEL, LOG, and logger::WARNING.

Referenced by atExitHandler(), Executor::clearMemory(), and ~DataMgr().

465  {
466  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
467 
468  // if gpu we need to iterate through all the buffermanagers for each card
469  if (memLevel == MemoryLevel::GPU_LEVEL) {
470  if (cudaMgr_) {
471  int numGpus = cudaMgr_->getDeviceCount();
472  for (int gpuNum = 0; gpuNum < numGpus; ++gpuNum) {
473  auto buffer_mgr_for_gpu =
474  dynamic_cast<Buffer_Namespace::BufferMgr*>(bufferMgrs_[memLevel][gpuNum]);
475  CHECK(buffer_mgr_for_gpu);
476  buffer_mgr_for_gpu->clearSlabs();
477  }
478  } else {
479  LOG(WARNING) << "Unable to clear GPU memory: No GPUs detected";
480  }
481  } else {
482  auto buffer_mgr_for_cpu =
483  dynamic_cast<Buffer_Namespace::BufferMgr*>(bufferMgrs_[memLevel][0]);
484  CHECK(buffer_mgr_for_cpu);
485  buffer_mgr_for_cpu->clearSlabs();
486  }
487 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:293
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:288
#define LOG(tag)
Definition: Logger.h:285
Note(s): Forbid Copying Idiom 4.1.
Definition: BufferMgr.h:96
std::unique_ptr< CudaMgr_Namespace::CudaMgr > cudaMgr_
Definition: DataMgr.h:289
#define CHECK(condition)
Definition: Logger.h:291

+ Here is the caller graph for this function:

void Data_Namespace::DataMgr::convertDB ( const std::string  basePath)
private

Definition at line 334 of file DataMgr.cpp.

References bufferMgrs_, CHECK, checkpoint(), logger::FATAL, getGlobalFileMgr(), logger::INFO, shared::kDataDirectoryName, and LOG.

334  {
335  // no need for locking, as this is only called in the constructor
336 
337  /* check that the data directory exists and it's empty */
338  std::string mapdDataPath(basePath + "/../" + shared::kDataDirectoryName + "/");
339  boost::filesystem::path path(mapdDataPath);
340  if (boost::filesystem::exists(path)) {
341  if (!boost::filesystem::is_directory(path)) {
342  LOG(FATAL) << "Path to directory \"" + shared::kDataDirectoryName +
343  "\" to convert DB is not a directory.";
344  }
345  } else { // data directory does not exist
346  LOG(FATAL) << "Path to directory \"" + shared::kDataDirectoryName +
347  "\" to convert DB does not exist.";
348  }
349 
350  File_Namespace::GlobalFileMgr* gfm{nullptr};
351  gfm = dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])->getGlobalFileMgr();
352  CHECK(gfm);
353 
354  LOG(INFO) << "Database conversion started.";
355  // this call also copies data into new DB structure
356  File_Namespace::FileMgr* fm_base_db = new File_Namespace::FileMgr(gfm, basePath);
357  delete fm_base_db;
358 
359  /* write content of DB into newly created/converted DB structure & location */
360  checkpoint(); // outputs data files as well as metadata files
361  LOG(INFO) << "Database conversion completed.";
362 }
const std::string kDataDirectoryName
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:288
#define LOG(tag)
Definition: Logger.h:285
File_Namespace::GlobalFileMgr * getGlobalFileMgr() const
Definition: DataMgr.cpp:649
#define CHECK(condition)
Definition: Logger.h:291

+ Here is the call graph for this function:

void Data_Namespace::DataMgr::copy ( AbstractBuffer destBuffer,
AbstractBuffer srcBuffer 
)

Definition at line 570 of file DataMgr.cpp.

References Data_Namespace::AbstractBuffer::getDeviceId(), Data_Namespace::AbstractBuffer::getMemoryPtr(), Data_Namespace::AbstractBuffer::getType(), Data_Namespace::AbstractBuffer::size(), and Data_Namespace::AbstractBuffer::write().

570  {
571  destBuffer->write(srcBuffer->getMemoryPtr(),
572  srcBuffer->size(),
573  0,
574  srcBuffer->getType(),
575  srcBuffer->getDeviceId());
576 }
virtual int8_t * getMemoryPtr()=0
virtual MemoryLevel getType() const =0
virtual void write(int8_t *src, const size_t num_bytes, const size_t offset=0, const MemoryLevel src_buffer_type=CPU_LEVEL, const int src_device_id=-1)=0

+ Here is the call graph for this function:

AbstractBuffer * Data_Namespace::DataMgr::createChunkBuffer ( const ChunkKey key,
const MemoryLevel  memoryLevel,
const int  deviceId = 0,
const size_t  page_size = 0 
)

Definition at line 502 of file DataMgr.cpp.

References buffer_access_mutex_, and bufferMgrs_.

Referenced by Chunk_NS::Chunk::createChunkBuffer().

505  {
506  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
507  int level = static_cast<int>(memoryLevel);
508  return bufferMgrs_[level][deviceId]->createBuffer(key, page_size);
509 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:293
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:288

+ Here is the caller graph for this function:

void Data_Namespace::DataMgr::createTopLevelMetadata ( ) const
private

Definition at line 364 of file DataMgr.cpp.

References bufferMgrs_, CHECK, and getGlobalFileMgr().

Referenced by resetBufferMgrs().

365  { // create metadata shared by all tables of all DBs
366  ChunkKey chunkKey(2);
367  chunkKey[0] = 0; // top level db_id
368  chunkKey[1] = 0; // top level tb_id
369 
370  File_Namespace::GlobalFileMgr* gfm{nullptr};
371  gfm = dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])->getGlobalFileMgr();
372  CHECK(gfm);
373 
374  auto fm_top = gfm->getFileMgr(chunkKey);
375  if (auto fm = dynamic_cast<File_Namespace::FileMgr*>(fm_top)) {
376  fm->createOrMigrateTopLevelMetadata();
377  }
378 }
std::vector< int > ChunkKey
Definition: types.h:36
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:288
File_Namespace::GlobalFileMgr * getGlobalFileMgr() const
Definition: DataMgr.cpp:649
#define CHECK(condition)
Definition: Logger.h:291

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

void Data_Namespace::DataMgr::deleteChunk ( const ChunkKey key,
const MemoryLevel  mem_level,
const int  device_id 
)

Definition at line 547 of file DataMgr.cpp.

References buffer_access_mutex_, bufferMgrs_, and CHECK_LT.

Referenced by AlterTableAlterColumnCommandRecoveryMgr::cleanupClearChunk().

549  {
550  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
551  CHECK_LT(memLevel, bufferMgrs_.size());
552  bufferMgrs_[memLevel][device_id]->deleteBuffer(key);
553 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:293
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:288
#define CHECK_LT(x, y)
Definition: Logger.h:303

+ Here is the caller graph for this function:

void Data_Namespace::DataMgr::deleteChunksWithPrefix ( const ChunkKey keyPrefix)

Definition at line 522 of file DataMgr.cpp.

References buffer_access_mutex_, bufferMgrs_, and levelSizes_.

Referenced by AlterTableAlterColumnCommandRecoveryMgr::cleanupDropSourceGeoColumns(), foreign_storage::anonymous_namespace{ForeignTableRefresh.cpp}::clear_cpu_and_gpu_cache(), anonymous_namespace{TableOptimizer.cpp}::delete_cpu_chunks(), AlterTableAlterColumnCommandRecoveryMgr::recoverAlterTableAlterColumnFromFile(), AlterTableAlterColumnCommandRecoveryMgr::rollback(), and UpdelRoll::updateFragmenterAndCleanupChunks().

522  {
523  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
524 
525  int numLevels = bufferMgrs_.size();
526  for (int level = numLevels - 1; level >= 0; --level) {
527  for (int device = 0; device < levelSizes_[level]; ++device) {
528  bufferMgrs_[level][device]->deleteBuffersWithPrefix(keyPrefix);
529  }
530  }
531 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:293
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:288
std::vector< int > levelSizes_
Definition: DataMgr.h:240

+ Here is the caller graph for this function:

void Data_Namespace::DataMgr::deleteChunksWithPrefix ( const ChunkKey keyPrefix,
const MemoryLevel  memLevel 
)

Definition at line 534 of file DataMgr.cpp.

References buffer_access_mutex_, bufferMgrs_, and levelSizes_.

535  {
536  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
537 
538  if (bufferMgrs_.size() <= memLevel) {
539  return;
540  }
541  for (int device = 0; device < levelSizes_[memLevel]; ++device) {
542  bufferMgrs_[memLevel][device]->deleteBuffersWithPrefix(keyPrefix);
543  }
544 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:293
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:288
std::vector< int > levelSizes_
Definition: DataMgr.h:240
std::string Data_Namespace::DataMgr::dumpLevel ( const MemoryLevel  memLevel)

Definition at line 449 of file DataMgr.cpp.

References buffer_access_mutex_, bufferMgrs_, cudaMgr_, and Data_Namespace::GPU_LEVEL.

449  {
450  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
451 
452  // if gpu we need to iterate through all the buffermanagers for each card
453  if (memLevel == MemoryLevel::GPU_LEVEL) {
454  int numGpus = cudaMgr_->getDeviceCount();
455  std::ostringstream tss;
456  for (int gpuNum = 0; gpuNum < numGpus; ++gpuNum) {
457  tss << bufferMgrs_[memLevel][gpuNum]->printSlabs();
458  }
459  return tss.str();
460  } else {
461  return bufferMgrs_[memLevel][0]->printSlabs();
462  }
463 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:293
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:288
std::unique_ptr< CudaMgr_Namespace::CudaMgr > cudaMgr_
Definition: DataMgr.h:289
void Data_Namespace::DataMgr::free ( AbstractBuffer buffer)

Definition at line 564 of file DataMgr.cpp.

References buffer_access_mutex_, bufferMgrs_, Data_Namespace::AbstractBuffer::getDeviceId(), and Data_Namespace::AbstractBuffer::getType().

Referenced by UpdelRoll::cancelUpdate(), ThrustAllocator::deallocate(), CudaAllocator::free(), CudaAllocator::freeGpuAbstractBuffer(), BaselineHashTable::~BaselineHashTable(), CudaAllocator::~CudaAllocator(), InValuesBitmap::~InValuesBitmap(), PerfectHashTable::~PerfectHashTable(), StringDictionaryTranslationMgr::~StringDictionaryTranslationMgr(), ThrustAllocator::~ThrustAllocator(), and TreeModelPredictionMgr::~TreeModelPredictionMgr().

564  {
565  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
566  int level = static_cast<int>(buffer->getType());
567  bufferMgrs_[level][buffer->getDeviceId()]->free(buffer);
568 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:293
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:288
virtual MemoryLevel getType() const =0

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

AbstractBuffer * Data_Namespace::DataMgr::getChunkBuffer ( const ChunkKey key,
const MemoryLevel  memoryLevel,
const int  deviceId = 0,
const size_t  numBytes = 0 
)

Definition at line 511 of file DataMgr.cpp.

References buffer_access_mutex_, bufferMgrs_, CHECK_LT, and levelSizes_.

Referenced by Chunk_NS::Chunk::getChunkBuffer().

514  {
515  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
516  const auto level = static_cast<size_t>(memoryLevel);
517  CHECK_LT(level, levelSizes_.size()); // make sure we have a legit buffermgr
518  CHECK_LT(deviceId, levelSizes_[level]); // make sure we have a legit buffermgr
519  return bufferMgrs_[level][deviceId]->getBuffer(key, numBytes);
520 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:293
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:288
std::vector< int > levelSizes_
Definition: DataMgr.h:240
#define CHECK_LT(x, y)
Definition: Logger.h:303

+ Here is the caller graph for this function:

const std::map<ChunkKey, File_Namespace::FileBuffer*>& Data_Namespace::DataMgr::getChunkMap ( )
void Data_Namespace::DataMgr::getChunkMetadataVecForKeyPrefix ( ChunkMetadataVector chunkMetadataVec,
const ChunkKey keyPrefix 
)

Definition at line 496 of file DataMgr.cpp.

References buffer_access_mutex_, and bufferMgrs_.

Referenced by AlterTableAlterColumnCommandRecoveryMgr::cleanupClearRemainingChunks(), TableOptimizer::vacuumFragments(), and anonymous_namespace{DdlCommandExecutor.cpp}::validate_alter_type_metadata().

497  {
498  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
499  bufferMgrs_[0][0]->getChunkMetadataVecForKeyPrefix(chunkMetadataVec, keyPrefix);
500 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:293
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:288

+ Here is the caller graph for this function:

Buffer_Namespace::CpuBufferMgr * Data_Namespace::DataMgr::getCpuBufferMgr ( ) const

Definition at line 698 of file DataMgr.cpp.

References bufferMgrs_, CHECK, and Data_Namespace::CPU_LEVEL.

Referenced by getCpuBufferPoolSize().

698  {
700  return dynamic_cast<Buffer_Namespace::CpuBufferMgr*>(
702 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:288
#define CHECK(condition)
Definition: Logger.h:291

+ Here is the caller graph for this function:

size_t Data_Namespace::DataMgr::getCpuBufferPoolSize ( ) const

Definition at line 681 of file DataMgr.cpp.

References getCpuBufferMgr(), and Buffer_Namespace::BufferMgr::getMaxSize().

681  {
682  return getCpuBufferMgr()->getMaxSize();
683 }
size_t getMaxSize() override
Definition: BufferMgr.cpp:498
Buffer_Namespace::CpuBufferMgr * getCpuBufferMgr() const
Definition: DataMgr.cpp:698

+ Here is the call graph for this function:

CudaMgr_Namespace::CudaMgr* Data_Namespace::DataMgr::getCudaMgr ( ) const
inline

Definition at line 235 of file DataMgr.h.

References cudaMgr_.

Referenced by Executor::blockSize(), copy_to_nvidia_gpu(), CudaAllocator::copyFromDevice(), CudaAllocator::copyToDevice(), CudaAllocator::CudaAllocator(), Executor::cudaMgr(), get_available_gpus(), Executor::gridSize(), Executor::isCPUOnly(), Executor::logSystemGPUMemoryStatus(), CudaAllocator::setDeviceMem(), and CudaAllocator::zeroDeviceMem().

235 { return cudaMgr_.get(); }
std::unique_ptr< CudaMgr_Namespace::CudaMgr > cudaMgr_
Definition: DataMgr.h:289

+ Here is the caller graph for this function:

std::shared_ptr< ForeignStorageInterface > Data_Namespace::DataMgr::getForeignStorageInterface ( ) const

Definition at line 657 of file DataMgr.cpp.

References bufferMgrs_.

657  {
658  return dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])
660 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:288
std::shared_ptr< ForeignStorageInterface > getForeignStorageInterface() const
Definition: DataMgr.cpp:657
File_Namespace::GlobalFileMgr * Data_Namespace::DataMgr::getGlobalFileMgr ( ) const

Definition at line 649 of file DataMgr.cpp.

References bufferMgrs_, and CHECK.

Referenced by convertDB(), createTopLevelMetadata(), TableArchiver::dumpTable(), anonymous_namespace{DdlCommandExecutor.cpp}::get_agg_storage_stats(), getTableEpoch(), foreign_storage::InternalStorageStatsDataWrapper::initializeObjectsForTable(), resetTableEpochFloor(), TableArchiver::restoreTable(), setTableEpoch(), and TableOptimizer::vacuumDeletedRows().

649  {
650  File_Namespace::GlobalFileMgr* global_file_mgr{nullptr};
651  global_file_mgr =
652  dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])->getGlobalFileMgr();
653  CHECK(global_file_mgr);
654  return global_file_mgr;
655 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:288
File_Namespace::GlobalFileMgr * getGlobalFileMgr() const
Definition: DataMgr.cpp:649
#define CHECK(condition)
Definition: Logger.h:291

+ Here is the caller graph for this function:

Buffer_Namespace::GpuCudaBufferMgr * Data_Namespace::DataMgr::getGpuBufferMgr ( int32_t  device_id) const

Definition at line 704 of file DataMgr.cpp.

References bufferMgrs_, CHECK_GT, and Data_Namespace::GPU_LEVEL.

704  {
705  if (bufferMgrs_.size() > MemoryLevel::GPU_LEVEL) {
706  CHECK_GT(bufferMgrs_[MemoryLevel::GPU_LEVEL].size(), static_cast<size_t>(device_id));
707  return dynamic_cast<Buffer_Namespace::GpuCudaBufferMgr*>(
708  bufferMgrs_[MemoryLevel::GPU_LEVEL][device_id]);
709  } else {
710  return nullptr;
711  }
712 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:288
#define CHECK_GT(x, y)
Definition: Logger.h:305
size_t Data_Namespace::DataMgr::getGpuBufferPoolSize ( ) const

Definition at line 686 of file DataMgr.cpp.

References bufferMgrs_, and Data_Namespace::GPU_LEVEL.

686  {
687  if (bufferMgrs_.size() <= MemoryLevel::GPU_LEVEL) {
688  return static_cast<size_t>(0);
689  }
690  size_t total_gpu_buffer_pools_size{0};
691  for (auto const gpu_buffer_mgr : bufferMgrs_[MemoryLevel::GPU_LEVEL]) {
692  total_gpu_buffer_pools_size +=
693  dynamic_cast<Buffer_Namespace::GpuCudaBufferMgr*>(gpu_buffer_mgr)->getMaxSize();
694  }
695  return total_gpu_buffer_pools_size;
696 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:288
std::vector< MemoryInfo > Data_Namespace::DataMgr::getMemoryInfo ( const MemoryLevel  memLevel) const

Definition at line 380 of file DataMgr.cpp.

References buffer_access_mutex_, and getMemoryInfoUnlocked().

Referenced by Executor::createKernels().

380  {
381  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
382  return getMemoryInfoUnlocked(mem_level);
383 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:293
std::vector< MemoryInfo > getMemoryInfoUnlocked(const MemoryLevel memLevel) const
Definition: DataMgr.cpp:385

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

std::vector< MemoryInfo > Data_Namespace::DataMgr::getMemoryInfoUnlocked ( const MemoryLevel  memLevel) const

Definition at line 385 of file DataMgr.cpp.

References bufferMgrs_, CHECK, Data_Namespace::MemoryData::chunk_key, Data_Namespace::CPU_LEVEL, cudaMgr_, Buffer_Namespace::BufferMgr::getAllocated(), Buffer_Namespace::BufferMgr::getMaxSize(), Buffer_Namespace::BufferMgr::getPageSize(), Buffer_Namespace::BufferMgr::getSlabSegments(), Data_Namespace::GPU_LEVEL, hasGpus_, Data_Namespace::MemoryInfo::isAllocationCapped, Buffer_Namespace::BufferMgr::isAllocationCapped(), Data_Namespace::MemoryInfo::maxNumPages, Data_Namespace::MemoryData::memStatus, Data_Namespace::MemoryInfo::nodeMemoryData, Data_Namespace::MemoryInfo::numPageAllocated, Data_Namespace::MemoryData::numPages, Data_Namespace::MemoryInfo::pageSize, Data_Namespace::MemoryData::slabNum, Data_Namespace::MemoryData::startPage, and Data_Namespace::MemoryData::touch.

Referenced by getMemoryInfo().

386  {
387  std::vector<MemoryInfo> mem_info;
388  if (mem_level == MemoryLevel::CPU_LEVEL) {
389  Buffer_Namespace::CpuBufferMgr* cpu_buffer =
390  dynamic_cast<Buffer_Namespace::CpuBufferMgr*>(
392  CHECK(cpu_buffer);
393  MemoryInfo mi;
394 
395  mi.pageSize = cpu_buffer->getPageSize();
396  mi.maxNumPages = cpu_buffer->getMaxSize() / mi.pageSize;
397  mi.isAllocationCapped = cpu_buffer->isAllocationCapped();
398  mi.numPageAllocated = cpu_buffer->getAllocated() / mi.pageSize;
399 
400  const auto& slab_segments = cpu_buffer->getSlabSegments();
401  for (size_t slab_num = 0; slab_num < slab_segments.size(); ++slab_num) {
402  for (auto const& segment : slab_segments[slab_num]) {
403  MemoryData md;
404  md.slabNum = slab_num;
405  md.startPage = segment.start_page;
406  md.numPages = segment.num_pages;
407  md.touch = segment.last_touched;
408  md.memStatus = segment.mem_status;
409  md.chunk_key.insert(
410  md.chunk_key.end(), segment.chunk_key.begin(), segment.chunk_key.end());
411  mi.nodeMemoryData.push_back(md);
412  }
413  }
414  mem_info.push_back(mi);
415  } else if (hasGpus_) {
416  int numGpus = cudaMgr_->getDeviceCount();
417  for (int gpuNum = 0; gpuNum < numGpus; ++gpuNum) {
419  dynamic_cast<Buffer_Namespace::GpuCudaBufferMgr*>(
421  CHECK(gpu_buffer);
422  MemoryInfo mi;
423 
424  mi.pageSize = gpu_buffer->getPageSize();
425  mi.maxNumPages = gpu_buffer->getMaxSize() / mi.pageSize;
426  mi.isAllocationCapped = gpu_buffer->isAllocationCapped();
427  mi.numPageAllocated = gpu_buffer->getAllocated() / mi.pageSize;
428 
429  const auto& slab_segments = gpu_buffer->getSlabSegments();
430  for (size_t slab_num = 0; slab_num < slab_segments.size(); ++slab_num) {
431  for (auto const& segment : slab_segments[slab_num]) {
432  MemoryData md;
433  md.slabNum = slab_num;
434  md.startPage = segment.start_page;
435  md.numPages = segment.num_pages;
436  md.touch = segment.last_touched;
437  md.chunk_key.insert(
438  md.chunk_key.end(), segment.chunk_key.begin(), segment.chunk_key.end());
439  md.memStatus = segment.mem_status;
440  mi.nodeMemoryData.push_back(md);
441  }
442  }
443  mem_info.push_back(mi);
444  }
445  }
446  return mem_info;
447 }
size_t getAllocated() override
Definition: BufferMgr.cpp:503
std::vector< MemoryData > nodeMemoryData
Definition: DataMgr.h:75
Buffer_Namespace::MemStatus memStatus
Definition: DataMgr.h:67
size_t getMaxSize() override
Definition: BufferMgr.cpp:498
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:288
bool isAllocationCapped() override
Definition: BufferMgr.cpp:508
std::unique_ptr< CudaMgr_Namespace::CudaMgr > cudaMgr_
Definition: DataMgr.h:289
const std::vector< BufferList > & getSlabSegments()
Definition: BufferMgr.cpp:914
#define CHECK(condition)
Definition: Logger.h:291
std::vector< int32_t > chunk_key
Definition: DataMgr.h:66

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

PersistentStorageMgr * Data_Namespace::DataMgr::getPersistentStorageMgr ( ) const

Definition at line 677 of file DataMgr.cpp.

References bufferMgrs_, and Data_Namespace::DISK_LEVEL.

Referenced by Catalog_Namespace::anonymous_namespace{Catalog.cpp}::clear_cached_table_data(), removeMutableTableDiskCacheData(), and anonymous_namespace{RelAlgExecutor.cpp}::set_parallelism_hints().

677  {
678  return dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[MemoryLevel::DISK_LEVEL][0]);
679 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:288

+ Here is the caller graph for this function:

DataMgr::SystemMemoryUsage Data_Namespace::DataMgr::getSystemMemoryUsage ( ) const

Definition at line 123 of file DataMgr.cpp.

References Data_Namespace::DataMgr::SystemMemoryUsage::frag, Data_Namespace::DataMgr::SystemMemoryUsage::free, Data_Namespace::ProcBuddyinfoParser::getFragmentationPercent(), Data_Namespace::DataMgr::SystemMemoryUsage::regular, Data_Namespace::DataMgr::SystemMemoryUsage::resident, Data_Namespace::DataMgr::SystemMemoryUsage::shared, Data_Namespace::DataMgr::SystemMemoryUsage::total, and Data_Namespace::DataMgr::SystemMemoryUsage::vtotal.

Referenced by anonymous_namespace{DBHandler.cpp}::log_system_cpu_memory_status(), and Executor::logSystemCPUMemoryStatus().

123  {
124  SystemMemoryUsage usage;
125 #ifdef __linux__
126 
127  // Determine Linux available memory and total memory.
128  // Available memory is different from free memory because
129  // when Linux sees free memory, it tries to use it for
130  // stuff like disk caching. However, the memory is not
131  // reserved and is still available to be allocated by
132  // user processes.
133  // Parsing /proc/meminfo for this info isn't very elegant
134  // but as a virtual file it should be reasonably fast.
135  // See also:
136  // https://github.com/torvalds/linux/commit/34e431b0ae398fc54ea69ff85ec700722c9da773
138  usage.free = mi["MemAvailable"];
139  usage.total = mi["MemTotal"];
140 
141  // Determine process memory in use.
142  // See also:
143  // https://stackoverflow.com/questions/669438/how-to-get-memory-usage-at-runtime-using-c
144  // http://man7.org/linux/man-pages/man5/proc.5.html
145  int64_t size = 0;
146  int64_t resident = 0;
147  int64_t shared = 0;
148 
149  std::ifstream fstatm("/proc/self/statm");
150  fstatm >> size >> resident >> shared;
151  fstatm.close();
152 
153  long page_size =
154  sysconf(_SC_PAGE_SIZE); // in case x86-64 is configured to use 2MB pages
155 
156  usage.resident = resident * page_size;
157  usage.vtotal = size * page_size;
158  usage.regular = (resident - shared) * page_size;
159  usage.shared = shared * page_size;
160 
162  usage.frag = bi.getFragmentationPercent();
163 
164 #else
165 
166  usage.total = 0;
167  usage.free = 0;
168  usage.resident = 0;
169  usage.vtotal = 0;
170  usage.regular = 0;
171  usage.shared = 0;
172  usage.frag = 0;
173 
174 #endif
175 
176  return usage;
177 }
Parse /proc/meminfo into key/value pairs.
Definition: DataMgr.h:79
Parse /proc/buddyinfo into a Fragmentation health score.
Definition: DataMgr.h:112

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

size_t Data_Namespace::DataMgr::getTableEpoch ( const int  db_id,
const int  tb_id 
)

Definition at line 635 of file DataMgr.cpp.

References bufferMgrs_, CHECK, and getGlobalFileMgr().

635  {
636  File_Namespace::GlobalFileMgr* gfm{nullptr};
637  gfm = dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])->getGlobalFileMgr();
638  CHECK(gfm);
639  return gfm->getTableEpoch(db_id, tb_id);
640 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:288
File_Namespace::GlobalFileMgr * getGlobalFileMgr() const
Definition: DataMgr.cpp:649
#define CHECK(condition)
Definition: Logger.h:291

+ Here is the call graph for this function:

size_t Data_Namespace::DataMgr::getTotalSystemMemory ( )
static

Definition at line 179 of file DataMgr.cpp.

Referenced by populateMgrs().

179  {
180 #ifdef __APPLE__
181  int mib[2];
182  size_t physical_memory;
183  size_t length;
184  // Get the Physical memory size
185  mib[0] = CTL_HW;
186  mib[1] = HW_MEMSIZE;
187  length = sizeof(size_t);
188  sysctl(mib, 2, &physical_memory, &length, NULL, 0);
189  return physical_memory;
190 #elif defined(_MSC_VER)
191  MEMORYSTATUSEX status;
192  status.dwLength = sizeof(status);
193  GlobalMemoryStatusEx(&status);
194  return status.ullTotalPhys;
195 #else // Linux
196  long pages = sysconf(_SC_PHYS_PAGES);
197  long page_size = sysconf(_SC_PAGE_SIZE);
198  return pages * page_size;
199 #endif
200 }

+ Here is the caller graph for this function:

bool Data_Namespace::DataMgr::gpusPresent ( ) const
inline

Definition at line 228 of file DataMgr.h.

References hasGpus_.

Referenced by get_available_gpus().

228 { return hasGpus_; }

+ Here is the caller graph for this function:

bool Data_Namespace::DataMgr::isBufferOnDevice ( const ChunkKey key,
const MemoryLevel  memLevel,
const int  deviceId 
)

Definition at line 489 of file DataMgr.cpp.

References buffer_access_mutex_, and bufferMgrs_.

Referenced by Chunk_NS::Chunk::isChunkOnDevice().

491  {
492  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
493  return bufferMgrs_[memLevel][deviceId]->isBufferOnDevice(key);
494 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:293
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:288

+ Here is the caller graph for this function:

void Data_Namespace::DataMgr::populateMgrs ( const SystemParameters system_parameters,
const size_t  userSpecifiedNumReaderThreads,
const File_Namespace::DiskCacheConfig cache_config 
)
private

Definition at line 246 of file DataMgr.cpp.

References allocateCpuBufferMgr(), SystemParameters::buffer_page_size, bufferMgrs_, CHECK_GT, SystemParameters::cpu_buffer_mem_bytes, cudaMgr_, dataDir_, Data_Namespace::DRAM, g_pmem_size, getTotalSystemMemory(), SystemParameters::gpu_buffer_mem_bytes, hasGpus_, logger::INFO, levelSizes_, LOG, SystemParameters::max_cpu_slab_size, SystemParameters::max_gpu_slab_size, SystemParameters::min_cpu_slab_size, SystemParameters::min_gpu_slab_size, Data_Namespace::numCpuTiers, Data_Namespace::PMEM, reservedGpuMem_, and VLOG.

Referenced by resetBufferMgrs().

248  {
249  // no need for locking, as this is only called in the constructor
250  bufferMgrs_.resize(2);
251  bufferMgrs_[0].push_back(
252  new PersistentStorageMgr(dataDir_, userSpecifiedNumReaderThreads, cache_config));
253 
254  levelSizes_.push_back(1);
255  auto page_size = system_parameters.buffer_page_size;
256  CHECK_GT(page_size, size_t(0));
257  size_t cpuBufferSize = system_parameters.cpu_buffer_mem_bytes;
258  if (cpuBufferSize == 0) { // if size is not specified
259  const auto total_system_memory = getTotalSystemMemory();
260  VLOG(1) << "Detected " << (float)total_system_memory / (1024 * 1024)
261  << "M of total system memory.";
262  cpuBufferSize = total_system_memory *
263  0.8; // should get free memory instead of this ugly heuristic
264  }
265  size_t minCpuSlabSize = std::min(system_parameters.min_cpu_slab_size, cpuBufferSize);
266  minCpuSlabSize = (minCpuSlabSize / page_size) * page_size;
267  size_t maxCpuSlabSize = std::min(system_parameters.max_cpu_slab_size, cpuBufferSize);
268  maxCpuSlabSize = (maxCpuSlabSize / page_size) * page_size;
269  LOG(INFO) << "Min CPU Slab Size is " << (float)minCpuSlabSize / (1024 * 1024) << "MB";
270  LOG(INFO) << "Max CPU Slab Size is " << (float)maxCpuSlabSize / (1024 * 1024) << "MB";
271  LOG(INFO) << "Max memory pool size for CPU is " << (float)cpuBufferSize / (1024 * 1024)
272  << "MB";
273 
274  size_t total_cpu_size = 0;
275 
276 #ifdef ENABLE_MEMKIND
277  CpuTierSizeVector cpu_tier_sizes(numCpuTiers, 0);
278  cpu_tier_sizes[CpuTier::DRAM] = cpuBufferSize;
279  if (g_enable_tiered_cpu_mem) {
280  cpu_tier_sizes[CpuTier::PMEM] = g_pmem_size;
281  LOG(INFO) << "Max memory pool size for PMEM is " << (float)g_pmem_size / (1024 * 1024)
282  << "MB";
283  }
284  for (auto cpu_tier_size : cpu_tier_sizes) {
285  total_cpu_size += cpu_tier_size;
286  }
287 #else
288  CpuTierSizeVector cpu_tier_sizes{};
289  total_cpu_size = cpuBufferSize;
290 #endif
291 
292  if (hasGpus_ || cudaMgr_) {
293  LOG(INFO) << "Reserved GPU memory is " << (float)reservedGpuMem_ / (1024 * 1024)
294  << "MB includes render buffer allocation";
295  bufferMgrs_.resize(3);
297  0, total_cpu_size, minCpuSlabSize, maxCpuSlabSize, page_size, cpu_tier_sizes);
298 
299  levelSizes_.push_back(1);
300  int numGpus = cudaMgr_->getDeviceCount();
301  for (int gpuNum = 0; gpuNum < numGpus; ++gpuNum) {
302  size_t gpuMaxMemSize =
303  system_parameters.gpu_buffer_mem_bytes != 0
304  ? system_parameters.gpu_buffer_mem_bytes
305  : (cudaMgr_->getDeviceProperties(gpuNum)->globalMem) - (reservedGpuMem_);
306  size_t minGpuSlabSize =
307  std::min(system_parameters.min_gpu_slab_size, gpuMaxMemSize);
308  minGpuSlabSize = (minGpuSlabSize / page_size) * page_size;
309  size_t maxGpuSlabSize =
310  std::min(system_parameters.max_gpu_slab_size, gpuMaxMemSize);
311  maxGpuSlabSize = (maxGpuSlabSize / page_size) * page_size;
312  LOG(INFO) << "Min GPU Slab size for GPU " << gpuNum << " is "
313  << (float)minGpuSlabSize / (1024 * 1024) << "MB";
314  LOG(INFO) << "Max GPU Slab size for GPU " << gpuNum << " is "
315  << (float)maxGpuSlabSize / (1024 * 1024) << "MB";
316  LOG(INFO) << "Max memory pool size for GPU " << gpuNum << " is "
317  << (float)gpuMaxMemSize / (1024 * 1024) << "MB";
318  bufferMgrs_[2].push_back(new Buffer_Namespace::GpuCudaBufferMgr(gpuNum,
319  gpuMaxMemSize,
320  cudaMgr_.get(),
321  minGpuSlabSize,
322  maxGpuSlabSize,
323  page_size,
324  bufferMgrs_[1][0]));
325  }
326  levelSizes_.push_back(numGpus);
327  } else {
329  0, total_cpu_size, minCpuSlabSize, maxCpuSlabSize, page_size, cpu_tier_sizes);
330  levelSizes_.push_back(1);
331  }
332 }
size_t g_pmem_size
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:288
std::vector< int > levelSizes_
Definition: DataMgr.h:240
#define LOG(tag)
Definition: Logger.h:285
#define CHECK_GT(x, y)
Definition: Logger.h:305
constexpr size_t numCpuTiers
static size_t getTotalSystemMemory()
Definition: DataMgr.cpp:179
std::unique_ptr< CudaMgr_Namespace::CudaMgr > cudaMgr_
Definition: DataMgr.h:289
void allocateCpuBufferMgr(int32_t device_id, size_t total_cpu_size, size_t minCpuSlabSize, size_t maxCpuSlabSize, size_t page_size, const std::vector< size_t > &cpu_tier_sizes)
Definition: DataMgr.cpp:202
std::vector< size_t > CpuTierSizeVector
#define VLOG(n)
Definition: Logger.h:388
std::string dataDir_
Definition: DataMgr.h:290

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

void Data_Namespace::DataMgr::removeMutableTableDiskCacheData ( const int  db_id,
const int  tb_id 
) const

Definition at line 624 of file DataMgr.cpp.

References getPersistentStorageMgr(), and PersistentStorageMgr::removeMutableTableCacheData().

624  {
626 }
PersistentStorageMgr * getPersistentStorageMgr() const
Definition: DataMgr.cpp:677
void removeMutableTableCacheData(const int db_id, const int table_id) const

+ Here is the call graph for this function:

void Data_Namespace::DataMgr::removeTableRelatedDS ( const int  db_id,
const int  tb_id 
)

Definition at line 619 of file DataMgr.cpp.

References buffer_access_mutex_, and bufferMgrs_.

619  {
620  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
621  bufferMgrs_[0][0]->removeTableRelatedDS(db_id, tb_id);
622 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:293
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:288
void Data_Namespace::DataMgr::resetBufferMgrs ( const File_Namespace::DiskCacheConfig cache_config,
const size_t  num_reader_threads,
const SystemParameters sys_params 
)

Definition at line 232 of file DataMgr.cpp.

References bufferMgrs_, createTopLevelMetadata(), and populateMgrs().

234  {
235  int numLevels = bufferMgrs_.size();
236  for (int level = numLevels - 1; level >= 0; --level) {
237  for (size_t device = 0; device < bufferMgrs_[level].size(); device++) {
238  delete bufferMgrs_[level][device];
239  }
240  }
241  bufferMgrs_.clear();
242  populateMgrs(sys_params, num_reader_threads, cache_config);
244 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:288
void createTopLevelMetadata() const
Definition: DataMgr.cpp:364
void populateMgrs(const SystemParameters &system_parameters, const size_t userSpecifiedNumReaderThreads, const File_Namespace::DiskCacheConfig &cache_config)
Definition: DataMgr.cpp:246

+ Here is the call graph for this function:

void Data_Namespace::DataMgr::resetTableEpochFloor ( const int32_t  db_id,
const int32_t  tb_id 
)

Definition at line 642 of file DataMgr.cpp.

References bufferMgrs_, CHECK, and getGlobalFileMgr().

642  {
643  File_Namespace::GlobalFileMgr* gfm{nullptr};
644  gfm = dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])->getGlobalFileMgr();
645  CHECK(gfm);
646  gfm->resetTableEpochFloor(db_id, tb_id);
647 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:288
File_Namespace::GlobalFileMgr * getGlobalFileMgr() const
Definition: DataMgr.cpp:649
#define CHECK(condition)
Definition: Logger.h:291

+ Here is the call graph for this function:

void Data_Namespace::DataMgr::setTableEpoch ( const int  db_id,
const int  tb_id,
const int  start_epoch 
)

Definition at line 628 of file DataMgr.cpp.

References bufferMgrs_, CHECK, and getGlobalFileMgr().

628  {
629  File_Namespace::GlobalFileMgr* gfm{nullptr};
630  gfm = dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])->getGlobalFileMgr();
631  CHECK(gfm);
632  gfm->setTableEpoch(db_id, tb_id, start_epoch);
633 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:288
File_Namespace::GlobalFileMgr * getGlobalFileMgr() const
Definition: DataMgr.cpp:649
#define CHECK(condition)
Definition: Logger.h:291

+ Here is the call graph for this function:

Friends And Related Function Documentation

friend class GlobalFileMgr
friend

Definition at line 184 of file DataMgr.h.

Member Data Documentation

std::unique_ptr<CudaMgr_Namespace::CudaMgr> Data_Namespace::DataMgr::cudaMgr_
private
std::string Data_Namespace::DataMgr::dataDir_
private

Definition at line 290 of file DataMgr.h.

Referenced by populateMgrs().

bool Data_Namespace::DataMgr::hasGpus_
private

Definition at line 291 of file DataMgr.h.

Referenced by atExitHandler(), getMemoryInfoUnlocked(), gpusPresent(), populateMgrs(), and ~DataMgr().

std::vector<int> Data_Namespace::DataMgr::levelSizes_

Definition at line 240 of file DataMgr.h.

Referenced by alloc(), checkpoint(), deleteChunksWithPrefix(), getChunkBuffer(), and populateMgrs().

size_t Data_Namespace::DataMgr::reservedGpuMem_
private

Definition at line 292 of file DataMgr.h.

Referenced by populateMgrs().


The documentation for this class was generated from the following files: