OmniSciDB  c1a53651b2
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
Data_Namespace::DataMgr Class Reference

#include <DataMgr.h>

+ Collaboration diagram for Data_Namespace::DataMgr:

Classes

struct  SystemMemoryUsage
 

Public Member Functions

 DataMgr (const std::string &dataDir, const SystemParameters &system_parameters, std::unique_ptr< CudaMgr_Namespace::CudaMgr > cudaMgr, const bool useGpus, const size_t reservedGpuMem=(1<< 27), const size_t numReaderThreads=0, const File_Namespace::DiskCacheConfig cacheConfig=File_Namespace::DiskCacheConfig())
 
 ~DataMgr ()
 
AbstractBuffercreateChunkBuffer (const ChunkKey &key, const MemoryLevel memoryLevel, const int deviceId=0, const size_t page_size=0)
 
AbstractBuffergetChunkBuffer (const ChunkKey &key, const MemoryLevel memoryLevel, const int deviceId=0, const size_t numBytes=0)
 
void deleteChunksWithPrefix (const ChunkKey &keyPrefix)
 
void deleteChunksWithPrefix (const ChunkKey &keyPrefix, const MemoryLevel memLevel)
 
AbstractBufferalloc (const MemoryLevel memoryLevel, const int deviceId, const size_t numBytes)
 
void free (AbstractBuffer *buffer)
 
void copy (AbstractBuffer *destBuffer, AbstractBuffer *srcBuffer)
 
bool isBufferOnDevice (const ChunkKey &key, const MemoryLevel memLevel, const int deviceId)
 
std::vector< MemoryInfogetMemoryInfo (const MemoryLevel memLevel) const
 
std::vector< MemoryInfogetMemoryInfoUnlocked (const MemoryLevel memLevel) const
 
std::string dumpLevel (const MemoryLevel memLevel)
 
void clearMemory (const MemoryLevel memLevel)
 
const std::map< ChunkKey,
File_Namespace::FileBuffer * > & 
getChunkMap ()
 
void checkpoint (const int db_id, const int tb_id)
 
void checkpoint (const int db_id, const int table_id, const MemoryLevel memory_level)
 
void getChunkMetadataVecForKeyPrefix (ChunkMetadataVector &chunkMetadataVec, const ChunkKey &keyPrefix)
 
bool gpusPresent () const
 
void removeTableRelatedDS (const int db_id, const int tb_id)
 
void setTableEpoch (const int db_id, const int tb_id, const int start_epoch)
 
size_t getTableEpoch (const int db_id, const int tb_id)
 
void resetTableEpochFloor (const int32_t db_id, const int32_t tb_id)
 
CudaMgr_Namespace::CudaMgrgetCudaMgr () const
 
File_Namespace::GlobalFileMgrgetGlobalFileMgr () const
 
std::shared_ptr
< ForeignStorageInterface
getForeignStorageInterface () const
 
SystemMemoryUsage getSystemMemoryUsage () const
 
PersistentStorageMgrgetPersistentStorageMgr () const
 
void resetPersistentStorage (const File_Namespace::DiskCacheConfig &cache_config, const size_t num_reader_threads, const SystemParameters &sys_params)
 
Buffer_Namespace::CpuBufferMgrgetCpuBufferMgr () const
 
Buffer_Namespace::GpuCudaBufferMgrgetGpuBufferMgr (int32_t device_id) const
 

Static Public Member Functions

static size_t getTotalSystemMemory ()
 

Public Attributes

std::vector< int > levelSizes_
 

Private Member Functions

void populateMgrs (const SystemParameters &system_parameters, const size_t userSpecifiedNumReaderThreads, const File_Namespace::DiskCacheConfig &cache_config)
 
void convertDB (const std::string basePath)
 
void checkpoint ()
 
void createTopLevelMetadata () const
 
void allocateCpuBufferMgr (int32_t device_id, size_t total_cpu_size, size_t minCpuSlabSize, size_t maxCpuSlabSize, size_t page_size, const std::vector< size_t > &cpu_tier_sizes)
 

Private Attributes

std::vector< std::vector
< AbstractBufferMgr * > > 
bufferMgrs_
 
std::unique_ptr
< CudaMgr_Namespace::CudaMgr
cudaMgr_
 
std::string dataDir_
 
bool hasGpus_
 
size_t reservedGpuMem_
 
std::mutex buffer_access_mutex_
 

Friends

class GlobalFileMgr
 

Detailed Description

Definition at line 174 of file DataMgr.h.

Constructor & Destructor Documentation

Data_Namespace::DataMgr::DataMgr ( const std::string &  dataDir,
const SystemParameters system_parameters,
std::unique_ptr< CudaMgr_Namespace::CudaMgr cudaMgr,
const bool  useGpus,
const size_t  reservedGpuMem = (1 << 27),
const size_t  numReaderThreads = 0,
const File_Namespace::DiskCacheConfig  cacheConfig = File_Namespace::DiskCacheConfig() 
)
explicit

Definition at line 51 of file DataMgr.cpp.

58  : cudaMgr_{std::move(cudaMgr)}
59  , dataDir_{dataDir}
60  , hasGpus_{false}
61  , reservedGpuMem_{reservedGpuMem} {
62  if (useGpus) {
63  if (cudaMgr_) {
64  hasGpus_ = true;
65  } else {
66  LOG(ERROR) << "CudaMgr instance is invalid, falling back to CPU-only mode.";
67  hasGpus_ = false;
68  }
69  } else {
70  // NOTE: useGpus == false with a valid cudaMgr is a potentially valid configuration.
71  // i.e. QueryEngine can be set to cpu-only for a cuda-enabled build, but still have
72  // rendering enabled. The renderer would require a CudaMgr in this case, in addition
73  // to a GpuCudaBufferMgr for cuda-backed thrust allocations.
74  // We're still setting hasGpus_ to false in that case tho to enforce cpu-only query
75  // execution.
76  hasGpus_ = false;
77  }
78 
79  populateMgrs(system_parameters, numReaderThreads, cache_config);
81 }
#define LOG(tag)
Definition: Logger.h:285
void createTopLevelMetadata() const
Definition: DataMgr.cpp:333
std::unique_ptr< CudaMgr_Namespace::CudaMgr > cudaMgr_
Definition: DataMgr.h:273
void populateMgrs(const SystemParameters &system_parameters, const size_t userSpecifiedNumReaderThreads, const File_Namespace::DiskCacheConfig &cache_config)
Definition: DataMgr.cpp:216
std::string dataDir_
Definition: DataMgr.h:274
Data_Namespace::DataMgr::~DataMgr ( )

Definition at line 83 of file DataMgr.cpp.

References bufferMgrs_.

83  {
84  int numLevels = bufferMgrs_.size();
85  for (int level = numLevels - 1; level >= 0; --level) {
86  for (size_t device = 0; device < bufferMgrs_[level].size(); device++) {
87  delete bufferMgrs_[level][device];
88  }
89  }
90 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:272

Member Function Documentation

AbstractBuffer * Data_Namespace::DataMgr::alloc ( const MemoryLevel  memoryLevel,
const int  deviceId,
const size_t  numBytes 
)

Definition at line 516 of file DataMgr.cpp.

References buffer_access_mutex_, bufferMgrs_, CHECK_LT, and levelSizes_.

Referenced by ThrustAllocator::allocate(), ThrustAllocator::allocateScopedBuffer(), CudaAllocator::allocGpuAbstractBuffer(), and InValuesBitmap::InValuesBitmap().

518  {
519  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
520  const auto level = static_cast<int>(memoryLevel);
521  CHECK_LT(deviceId, levelSizes_[level]);
522  return bufferMgrs_[level][deviceId]->alloc(numBytes);
523 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:277
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:272
std::vector< int > levelSizes_
Definition: DataMgr.h:229
#define CHECK_LT(x, y)
Definition: Logger.h:303

+ Here is the caller graph for this function:

void Data_Namespace::DataMgr::allocateCpuBufferMgr ( int32_t  device_id,
size_t  total_cpu_size,
size_t  minCpuSlabSize,
size_t  maxCpuSlabSize,
size_t  page_size,
const std::vector< size_t > &  cpu_tier_sizes 
)
private

Definition at line 172 of file DataMgr.cpp.

References bufferMgrs_, and cudaMgr_.

Referenced by populateMgrs().

177  {
178 #ifdef ENABLE_MEMKIND
179  if (g_enable_tiered_cpu_mem) {
181  total_cpu_size,
182  cudaMgr_.get(),
183  minCpuSlabSize,
184  maxCpuSlabSize,
185  page_size,
186  cpu_tier_sizes,
187  bufferMgrs_[0][0]));
188  return;
189  }
190 #endif
191 
192  bufferMgrs_[1].push_back(new Buffer_Namespace::CpuBufferMgr(0,
193  total_cpu_size,
194  cudaMgr_.get(),
195  minCpuSlabSize,
196  maxCpuSlabSize,
197  page_size,
198  bufferMgrs_[0][0]));
199 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:272
std::unique_ptr< CudaMgr_Namespace::CudaMgr > cudaMgr_
Definition: DataMgr.h:273

+ Here is the caller graph for this function:

void Data_Namespace::DataMgr::checkpoint ( const int  db_id,
const int  tb_id 
)

Definition at line 545 of file DataMgr.cpp.

References buffer_access_mutex_, and bufferMgrs_.

Referenced by UpdelRoll::stageUpdate().

545  {
546  // TODO(adb): do we need a buffer mgr lock here?
547  // MAT Yes to reduce Parallel Executor TSAN issues (and correctness for now)
548  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
549  for (auto levelIt = bufferMgrs_.rbegin(); levelIt != bufferMgrs_.rend(); ++levelIt) {
550  // use reverse iterator so we start at GPU level, then CPU then DISK
551  for (auto deviceIt = levelIt->begin(); deviceIt != levelIt->end(); ++deviceIt) {
552  (*deviceIt)->checkpoint(db_id, tb_id);
553  }
554  }
555 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:277
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:272

+ Here is the caller graph for this function:

void Data_Namespace::DataMgr::checkpoint ( const int  db_id,
const int  table_id,
const MemoryLevel  memory_level 
)

Definition at line 557 of file DataMgr.cpp.

References buffer_access_mutex_, bufferMgrs_, CHECK_LT, and levelSizes_.

559  {
560  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
561  CHECK_LT(static_cast<size_t>(memory_level), bufferMgrs_.size());
562  CHECK_LT(static_cast<size_t>(memory_level), levelSizes_.size());
563  for (int device_id = 0; device_id < levelSizes_[memory_level]; device_id++) {
564  bufferMgrs_[memory_level][device_id]->checkpoint(db_id, table_id);
565  }
566 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:277
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:272
std::vector< int > levelSizes_
Definition: DataMgr.h:229
#define CHECK_LT(x, y)
Definition: Logger.h:303
void Data_Namespace::DataMgr::checkpoint ( )
private

Definition at line 568 of file DataMgr.cpp.

References buffer_access_mutex_, and bufferMgrs_.

Referenced by convertDB().

568  {
569  // TODO(adb): SAA
570  // MAT Yes to reduce Parallel Executor TSAN issues (and correctness for now)
571  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
572  for (auto levelIt = bufferMgrs_.rbegin(); levelIt != bufferMgrs_.rend(); ++levelIt) {
573  // use reverse iterator so we start at GPU level, then CPU then DISK
574  for (auto deviceIt = levelIt->begin(); deviceIt != levelIt->end(); ++deviceIt) {
575  (*deviceIt)->checkpoint();
576  }
577  }
578 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:277
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:272

+ Here is the caller graph for this function:

void Data_Namespace::DataMgr::clearMemory ( const MemoryLevel  memLevel)

Definition at line 434 of file DataMgr.cpp.

References buffer_access_mutex_, bufferMgrs_, CHECK, cudaMgr_, Data_Namespace::GPU_LEVEL, logger::INFO, LOG, and logger::WARNING.

Referenced by Executor::clearMemory().

434  {
435  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
436 
437  // if gpu we need to iterate through all the buffermanagers for each card
438  if (memLevel == MemoryLevel::GPU_LEVEL) {
439  if (cudaMgr_) {
440  int numGpus = cudaMgr_->getDeviceCount();
441  for (int gpuNum = 0; gpuNum < numGpus; ++gpuNum) {
442  LOG(INFO) << "clear slabs on gpu " << gpuNum;
443  auto buffer_mgr_for_gpu =
444  dynamic_cast<Buffer_Namespace::BufferMgr*>(bufferMgrs_[memLevel][gpuNum]);
445  CHECK(buffer_mgr_for_gpu);
446  buffer_mgr_for_gpu->clearSlabs();
447  }
448  } else {
449  LOG(WARNING) << "Unable to clear GPU memory: No GPUs detected";
450  }
451  } else {
452  auto buffer_mgr_for_cpu =
453  dynamic_cast<Buffer_Namespace::BufferMgr*>(bufferMgrs_[memLevel][0]);
454  CHECK(buffer_mgr_for_cpu);
455  buffer_mgr_for_cpu->clearSlabs();
456  }
457 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:277
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:272
#define LOG(tag)
Definition: Logger.h:285
Note(s): Forbid Copying Idiom 4.1.
Definition: BufferMgr.h:96
std::unique_ptr< CudaMgr_Namespace::CudaMgr > cudaMgr_
Definition: DataMgr.h:273
#define CHECK(condition)
Definition: Logger.h:291

+ Here is the caller graph for this function:

void Data_Namespace::DataMgr::convertDB ( const std::string  basePath)
private

Definition at line 303 of file DataMgr.cpp.

References bufferMgrs_, CHECK, checkpoint(), logger::FATAL, getGlobalFileMgr(), logger::INFO, shared::kDataDirectoryName, and LOG.

303  {
304  // no need for locking, as this is only called in the constructor
305 
306  /* check that the data directory exists and it's empty */
307  std::string mapdDataPath(basePath + "/../" + shared::kDataDirectoryName + "/");
308  boost::filesystem::path path(mapdDataPath);
309  if (boost::filesystem::exists(path)) {
310  if (!boost::filesystem::is_directory(path)) {
311  LOG(FATAL) << "Path to directory \"" + shared::kDataDirectoryName +
312  "\" to convert DB is not a directory.";
313  }
314  } else { // data directory does not exist
315  LOG(FATAL) << "Path to directory \"" + shared::kDataDirectoryName +
316  "\" to convert DB does not exist.";
317  }
318 
319  File_Namespace::GlobalFileMgr* gfm{nullptr};
320  gfm = dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])->getGlobalFileMgr();
321  CHECK(gfm);
322 
323  LOG(INFO) << "Database conversion started.";
324  // this call also copies data into new DB structure
325  File_Namespace::FileMgr* fm_base_db = new File_Namespace::FileMgr(gfm, basePath);
326  delete fm_base_db;
327 
328  /* write content of DB into newly created/converted DB structure & location */
329  checkpoint(); // outputs data files as well as metadata files
330  LOG(INFO) << "Database conversion completed.";
331 }
const std::string kDataDirectoryName
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:272
#define LOG(tag)
Definition: Logger.h:285
File_Namespace::GlobalFileMgr * getGlobalFileMgr() const
Definition: DataMgr.cpp:606
#define CHECK(condition)
Definition: Logger.h:291

+ Here is the call graph for this function:

void Data_Namespace::DataMgr::copy ( AbstractBuffer destBuffer,
AbstractBuffer srcBuffer 
)

Definition at line 531 of file DataMgr.cpp.

References Data_Namespace::AbstractBuffer::getDeviceId(), Data_Namespace::AbstractBuffer::getMemoryPtr(), Data_Namespace::AbstractBuffer::getType(), Data_Namespace::AbstractBuffer::size(), and Data_Namespace::AbstractBuffer::write().

531  {
532  destBuffer->write(srcBuffer->getMemoryPtr(),
533  srcBuffer->size(),
534  0,
535  srcBuffer->getType(),
536  srcBuffer->getDeviceId());
537 }
virtual int8_t * getMemoryPtr()=0
virtual MemoryLevel getType() const =0
virtual void write(int8_t *src, const size_t num_bytes, const size_t offset=0, const MemoryLevel src_buffer_type=CPU_LEVEL, const int src_device_id=-1)=0

+ Here is the call graph for this function:

AbstractBuffer * Data_Namespace::DataMgr::createChunkBuffer ( const ChunkKey key,
const MemoryLevel  memoryLevel,
const int  deviceId = 0,
const size_t  page_size = 0 
)

Definition at line 472 of file DataMgr.cpp.

References buffer_access_mutex_, and bufferMgrs_.

Referenced by Chunk_NS::Chunk::createChunkBuffer().

475  {
476  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
477  int level = static_cast<int>(memoryLevel);
478  return bufferMgrs_[level][deviceId]->createBuffer(key, page_size);
479 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:277
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:272

+ Here is the caller graph for this function:

void Data_Namespace::DataMgr::createTopLevelMetadata ( ) const
private

Definition at line 333 of file DataMgr.cpp.

References bufferMgrs_, CHECK, and getGlobalFileMgr().

Referenced by resetPersistentStorage().

334  { // create metadata shared by all tables of all DBs
335  ChunkKey chunkKey(2);
336  chunkKey[0] = 0; // top level db_id
337  chunkKey[1] = 0; // top level tb_id
338 
339  File_Namespace::GlobalFileMgr* gfm{nullptr};
340  gfm = dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])->getGlobalFileMgr();
341  CHECK(gfm);
342 
343  auto fm_top = gfm->getFileMgr(chunkKey);
344  if (dynamic_cast<File_Namespace::FileMgr*>(fm_top)) {
345  static_cast<File_Namespace::FileMgr*>(fm_top)->createTopLevelMetadata();
346  }
347 }
std::vector< int > ChunkKey
Definition: types.h:36
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:272
void createTopLevelMetadata() const
Definition: DataMgr.cpp:333
File_Namespace::GlobalFileMgr * getGlobalFileMgr() const
Definition: DataMgr.cpp:606
#define CHECK(condition)
Definition: Logger.h:291

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

void Data_Namespace::DataMgr::deleteChunksWithPrefix ( const ChunkKey keyPrefix)

Definition at line 492 of file DataMgr.cpp.

References buffer_access_mutex_, bufferMgrs_, and levelSizes_.

Referenced by foreign_storage::anonymous_namespace{ForeignTableRefresh.cpp}::clear_cpu_and_gpu_cache(), and UpdelRoll::updateFragmenterAndCleanupChunks().

492  {
493  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
494 
495  int numLevels = bufferMgrs_.size();
496  for (int level = numLevels - 1; level >= 0; --level) {
497  for (int device = 0; device < levelSizes_[level]; ++device) {
498  bufferMgrs_[level][device]->deleteBuffersWithPrefix(keyPrefix);
499  }
500  }
501 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:277
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:272
std::vector< int > levelSizes_
Definition: DataMgr.h:229

+ Here is the caller graph for this function:

void Data_Namespace::DataMgr::deleteChunksWithPrefix ( const ChunkKey keyPrefix,
const MemoryLevel  memLevel 
)

Definition at line 504 of file DataMgr.cpp.

References buffer_access_mutex_, bufferMgrs_, and levelSizes_.

505  {
506  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
507 
508  if (bufferMgrs_.size() <= memLevel) {
509  return;
510  }
511  for (int device = 0; device < levelSizes_[memLevel]; ++device) {
512  bufferMgrs_[memLevel][device]->deleteBuffersWithPrefix(keyPrefix);
513  }
514 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:277
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:272
std::vector< int > levelSizes_
Definition: DataMgr.h:229
std::string Data_Namespace::DataMgr::dumpLevel ( const MemoryLevel  memLevel)

Definition at line 418 of file DataMgr.cpp.

References buffer_access_mutex_, bufferMgrs_, cudaMgr_, and Data_Namespace::GPU_LEVEL.

418  {
419  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
420 
421  // if gpu we need to iterate through all the buffermanagers for each card
422  if (memLevel == MemoryLevel::GPU_LEVEL) {
423  int numGpus = cudaMgr_->getDeviceCount();
424  std::ostringstream tss;
425  for (int gpuNum = 0; gpuNum < numGpus; ++gpuNum) {
426  tss << bufferMgrs_[memLevel][gpuNum]->printSlabs();
427  }
428  return tss.str();
429  } else {
430  return bufferMgrs_[memLevel][0]->printSlabs();
431  }
432 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:277
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:272
std::unique_ptr< CudaMgr_Namespace::CudaMgr > cudaMgr_
Definition: DataMgr.h:273
void Data_Namespace::DataMgr::free ( AbstractBuffer buffer)

Definition at line 525 of file DataMgr.cpp.

References buffer_access_mutex_, bufferMgrs_, Data_Namespace::AbstractBuffer::getDeviceId(), and Data_Namespace::AbstractBuffer::getType().

Referenced by UpdelRoll::cancelUpdate(), ThrustAllocator::deallocate(), CudaAllocator::free(), CudaAllocator::freeGpuAbstractBuffer(), CudaAllocator::~CudaAllocator(), InValuesBitmap::~InValuesBitmap(), PerfectHashTable::~PerfectHashTable(), StringDictionaryTranslationMgr::~StringDictionaryTranslationMgr(), and ThrustAllocator::~ThrustAllocator().

525  {
526  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
527  int level = static_cast<int>(buffer->getType());
528  bufferMgrs_[level][buffer->getDeviceId()]->free(buffer);
529 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:277
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:272
virtual MemoryLevel getType() const =0

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

AbstractBuffer * Data_Namespace::DataMgr::getChunkBuffer ( const ChunkKey key,
const MemoryLevel  memoryLevel,
const int  deviceId = 0,
const size_t  numBytes = 0 
)

Definition at line 481 of file DataMgr.cpp.

References buffer_access_mutex_, bufferMgrs_, CHECK_LT, and levelSizes_.

Referenced by Chunk_NS::Chunk::getChunkBuffer().

484  {
485  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
486  const auto level = static_cast<size_t>(memoryLevel);
487  CHECK_LT(level, levelSizes_.size()); // make sure we have a legit buffermgr
488  CHECK_LT(deviceId, levelSizes_[level]); // make sure we have a legit buffermgr
489  return bufferMgrs_[level][deviceId]->getBuffer(key, numBytes);
490 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:277
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:272
std::vector< int > levelSizes_
Definition: DataMgr.h:229
#define CHECK_LT(x, y)
Definition: Logger.h:303

+ Here is the caller graph for this function:

const std::map<ChunkKey, File_Namespace::FileBuffer*>& Data_Namespace::DataMgr::getChunkMap ( )
void Data_Namespace::DataMgr::getChunkMetadataVecForKeyPrefix ( ChunkMetadataVector chunkMetadataVec,
const ChunkKey keyPrefix 
)

Definition at line 466 of file DataMgr.cpp.

References buffer_access_mutex_, and bufferMgrs_.

Referenced by TableOptimizer::vacuumFragments().

467  {
468  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
469  bufferMgrs_[0][0]->getChunkMetadataVecForKeyPrefix(chunkMetadataVec, keyPrefix);
470 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:277
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:272

+ Here is the caller graph for this function:

Buffer_Namespace::CpuBufferMgr * Data_Namespace::DataMgr::getCpuBufferMgr ( ) const

Definition at line 638 of file DataMgr.cpp.

References bufferMgrs_, and Data_Namespace::CPU_LEVEL.

638  {
639  return dynamic_cast<Buffer_Namespace::CpuBufferMgr*>(
641 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:272
CudaMgr_Namespace::CudaMgr* Data_Namespace::DataMgr::getCudaMgr ( ) const
inline

Definition at line 224 of file DataMgr.h.

References cudaMgr_.

Referenced by Executor::blockSize(), copy_to_nvidia_gpu(), CudaAllocator::copyFromDevice(), CudaAllocator::copyToDevice(), CudaAllocator::CudaAllocator(), Executor::cudaMgr(), get_available_gpus(), Executor::gridSize(), Executor::isCPUOnly(), CudaAllocator::setDeviceMem(), and CudaAllocator::zeroDeviceMem().

224 { return cudaMgr_.get(); }
std::unique_ptr< CudaMgr_Namespace::CudaMgr > cudaMgr_
Definition: DataMgr.h:273

+ Here is the caller graph for this function:

std::shared_ptr< ForeignStorageInterface > Data_Namespace::DataMgr::getForeignStorageInterface ( ) const

Definition at line 614 of file DataMgr.cpp.

References bufferMgrs_.

614  {
615  return dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])
617 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:272
std::shared_ptr< ForeignStorageInterface > getForeignStorageInterface() const
Definition: DataMgr.cpp:614
File_Namespace::GlobalFileMgr * Data_Namespace::DataMgr::getGlobalFileMgr ( ) const

Definition at line 606 of file DataMgr.cpp.

References bufferMgrs_, and CHECK.

Referenced by convertDB(), createTopLevelMetadata(), TableArchiver::dumpTable(), anonymous_namespace{DdlCommandExecutor.cpp}::get_agg_storage_stats(), getTableEpoch(), foreign_storage::InternalStorageStatsDataWrapper::initializeObjectsForTable(), resetTableEpochFloor(), TableArchiver::restoreTable(), setTableEpoch(), and TableOptimizer::vacuumDeletedRows().

606  {
607  File_Namespace::GlobalFileMgr* global_file_mgr{nullptr};
608  global_file_mgr =
609  dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])->getGlobalFileMgr();
610  CHECK(global_file_mgr);
611  return global_file_mgr;
612 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:272
File_Namespace::GlobalFileMgr * getGlobalFileMgr() const
Definition: DataMgr.cpp:606
#define CHECK(condition)
Definition: Logger.h:291

+ Here is the caller graph for this function:

Buffer_Namespace::GpuCudaBufferMgr * Data_Namespace::DataMgr::getGpuBufferMgr ( int32_t  device_id) const

Definition at line 643 of file DataMgr.cpp.

References bufferMgrs_, CHECK_GT, and Data_Namespace::GPU_LEVEL.

643  {
644  if (bufferMgrs_.size() > MemoryLevel::GPU_LEVEL) {
645  CHECK_GT(bufferMgrs_[MemoryLevel::GPU_LEVEL].size(), static_cast<size_t>(device_id));
646  return dynamic_cast<Buffer_Namespace::GpuCudaBufferMgr*>(
647  bufferMgrs_[MemoryLevel::GPU_LEVEL][device_id]);
648  } else {
649  return nullptr;
650  }
651 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:272
#define CHECK_GT(x, y)
Definition: Logger.h:305
std::vector< MemoryInfo > Data_Namespace::DataMgr::getMemoryInfo ( const MemoryLevel  memLevel) const

Definition at line 349 of file DataMgr.cpp.

References buffer_access_mutex_, and getMemoryInfoUnlocked().

Referenced by Executor::createKernels().

349  {
350  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
351  return getMemoryInfoUnlocked(mem_level);
352 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:277
std::vector< MemoryInfo > getMemoryInfoUnlocked(const MemoryLevel memLevel) const
Definition: DataMgr.cpp:354

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

std::vector< MemoryInfo > Data_Namespace::DataMgr::getMemoryInfoUnlocked ( const MemoryLevel  memLevel) const

Definition at line 354 of file DataMgr.cpp.

References bufferMgrs_, CHECK, Data_Namespace::MemoryData::chunk_key, Data_Namespace::CPU_LEVEL, cudaMgr_, Buffer_Namespace::BufferMgr::getAllocated(), Buffer_Namespace::BufferMgr::getMaxSize(), Buffer_Namespace::BufferMgr::getPageSize(), Buffer_Namespace::BufferMgr::getSlabSegments(), Data_Namespace::GPU_LEVEL, hasGpus_, Data_Namespace::MemoryInfo::isAllocationCapped, Buffer_Namespace::BufferMgr::isAllocationCapped(), Data_Namespace::MemoryInfo::maxNumPages, Data_Namespace::MemoryData::memStatus, Data_Namespace::MemoryInfo::nodeMemoryData, Data_Namespace::MemoryInfo::numPageAllocated, Data_Namespace::MemoryData::numPages, Data_Namespace::MemoryInfo::pageSize, Data_Namespace::MemoryData::slabNum, Data_Namespace::MemoryData::startPage, and Data_Namespace::MemoryData::touch.

Referenced by getMemoryInfo().

355  {
356  std::vector<MemoryInfo> mem_info;
357  if (mem_level == MemoryLevel::CPU_LEVEL) {
358  Buffer_Namespace::CpuBufferMgr* cpu_buffer =
359  dynamic_cast<Buffer_Namespace::CpuBufferMgr*>(
361  CHECK(cpu_buffer);
362  MemoryInfo mi;
363 
364  mi.pageSize = cpu_buffer->getPageSize();
365  mi.maxNumPages = cpu_buffer->getMaxSize() / mi.pageSize;
366  mi.isAllocationCapped = cpu_buffer->isAllocationCapped();
367  mi.numPageAllocated = cpu_buffer->getAllocated() / mi.pageSize;
368 
369  const auto& slab_segments = cpu_buffer->getSlabSegments();
370  for (size_t slab_num = 0; slab_num < slab_segments.size(); ++slab_num) {
371  for (auto segment : slab_segments[slab_num]) {
372  MemoryData md;
373  md.slabNum = slab_num;
374  md.startPage = segment.start_page;
375  md.numPages = segment.num_pages;
376  md.touch = segment.last_touched;
377  md.memStatus = segment.mem_status;
378  md.chunk_key.insert(
379  md.chunk_key.end(), segment.chunk_key.begin(), segment.chunk_key.end());
380  mi.nodeMemoryData.push_back(md);
381  }
382  }
383  mem_info.push_back(mi);
384  } else if (hasGpus_) {
385  int numGpus = cudaMgr_->getDeviceCount();
386  for (int gpuNum = 0; gpuNum < numGpus; ++gpuNum) {
388  dynamic_cast<Buffer_Namespace::GpuCudaBufferMgr*>(
390  CHECK(gpu_buffer);
391  MemoryInfo mi;
392 
393  mi.pageSize = gpu_buffer->getPageSize();
394  mi.maxNumPages = gpu_buffer->getMaxSize() / mi.pageSize;
395  mi.isAllocationCapped = gpu_buffer->isAllocationCapped();
396  mi.numPageAllocated = gpu_buffer->getAllocated() / mi.pageSize;
397 
398  const auto& slab_segments = gpu_buffer->getSlabSegments();
399  for (size_t slab_num = 0; slab_num < slab_segments.size(); ++slab_num) {
400  for (auto segment : slab_segments[slab_num]) {
401  MemoryData md;
402  md.slabNum = slab_num;
403  md.startPage = segment.start_page;
404  md.numPages = segment.num_pages;
405  md.touch = segment.last_touched;
406  md.chunk_key.insert(
407  md.chunk_key.end(), segment.chunk_key.begin(), segment.chunk_key.end());
408  md.memStatus = segment.mem_status;
409  mi.nodeMemoryData.push_back(md);
410  }
411  }
412  mem_info.push_back(mi);
413  }
414  }
415  return mem_info;
416 }
size_t getAllocated() override
Definition: BufferMgr.cpp:499
std::vector< MemoryData > nodeMemoryData
Definition: DataMgr.h:76
Buffer_Namespace::MemStatus memStatus
Definition: DataMgr.h:68
size_t getMaxSize() override
Definition: BufferMgr.cpp:494
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:272
bool isAllocationCapped() override
Definition: BufferMgr.cpp:504
std::unique_ptr< CudaMgr_Namespace::CudaMgr > cudaMgr_
Definition: DataMgr.h:273
const std::vector< BufferList > & getSlabSegments()
Definition: BufferMgr.cpp:916
#define CHECK(condition)
Definition: Logger.h:291
std::vector< int32_t > chunk_key
Definition: DataMgr.h:67

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

PersistentStorageMgr * Data_Namespace::DataMgr::getPersistentStorageMgr ( ) const

Definition at line 634 of file DataMgr.cpp.

References bufferMgrs_, and Data_Namespace::DISK_LEVEL.

Referenced by Catalog_Namespace::anonymous_namespace{Catalog.cpp}::clear_cached_table_data(), and anonymous_namespace{RelAlgExecutor.cpp}::set_parallelism_hints().

634  {
635  return dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[MemoryLevel::DISK_LEVEL][0]);
636 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:272

+ Here is the caller graph for this function:

DataMgr::SystemMemoryUsage Data_Namespace::DataMgr::getSystemMemoryUsage ( ) const

Definition at line 92 of file DataMgr.cpp.

References Data_Namespace::DataMgr::SystemMemoryUsage::frag, Data_Namespace::DataMgr::SystemMemoryUsage::free, Data_Namespace::ProcBuddyinfoParser::getFragmentationPercent(), Data_Namespace::DataMgr::SystemMemoryUsage::regular, Data_Namespace::DataMgr::SystemMemoryUsage::resident, Data_Namespace::DataMgr::SystemMemoryUsage::shared, Data_Namespace::DataMgr::SystemMemoryUsage::total, and Data_Namespace::DataMgr::SystemMemoryUsage::vtotal.

92  {
93  SystemMemoryUsage usage;
94 
95 #ifdef __linux__
96 
97  // Determine Linux available memory and total memory.
98  // Available memory is different from free memory because
99  // when Linux sees free memory, it tries to use it for
100  // stuff like disk caching. However, the memory is not
101  // reserved and is still available to be allocated by
102  // user processes.
103  // Parsing /proc/meminfo for this info isn't very elegant
104  // but as a virtual file it should be reasonably fast.
105  // See also:
106  // https://github.com/torvalds/linux/commit/34e431b0ae398fc54ea69ff85ec700722c9da773
108  usage.free = mi["MemAvailable"];
109  usage.total = mi["MemTotal"];
110 
111  // Determine process memory in use.
112  // See also:
113  // https://stackoverflow.com/questions/669438/how-to-get-memory-usage-at-runtime-using-c
114  // http://man7.org/linux/man-pages/man5/proc.5.html
115  int64_t size = 0;
116  int64_t resident = 0;
117  int64_t shared = 0;
118 
119  std::ifstream fstatm("/proc/self/statm");
120  fstatm >> size >> resident >> shared;
121  fstatm.close();
122 
123  long page_size =
124  sysconf(_SC_PAGE_SIZE); // in case x86-64 is configured to use 2MB pages
125 
126  usage.resident = resident * page_size;
127  usage.vtotal = size * page_size;
128  usage.regular = (resident - shared) * page_size;
129  usage.shared = shared * page_size;
130 
132  usage.frag = bi.getFragmentationPercent();
133 
134 #else
135 
136  usage.total = 0;
137  usage.free = 0;
138  usage.resident = 0;
139  usage.vtotal = 0;
140  usage.regular = 0;
141  usage.shared = 0;
142  usage.frag = 0;
143 
144 #endif
145 
146  return usage;
147 }
Parse /proc/meminfo into key/value pairs.
Definition: DataMgr.h:80
Parse /proc/buddyinfo into a Fragmentation health score.
Definition: DataMgr.h:113

+ Here is the call graph for this function:

size_t Data_Namespace::DataMgr::getTableEpoch ( const int  db_id,
const int  tb_id 
)

Definition at line 592 of file DataMgr.cpp.

References bufferMgrs_, CHECK, and getGlobalFileMgr().

592  {
593  File_Namespace::GlobalFileMgr* gfm{nullptr};
594  gfm = dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])->getGlobalFileMgr();
595  CHECK(gfm);
596  return gfm->getTableEpoch(db_id, tb_id);
597 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:272
File_Namespace::GlobalFileMgr * getGlobalFileMgr() const
Definition: DataMgr.cpp:606
#define CHECK(condition)
Definition: Logger.h:291

+ Here is the call graph for this function:

size_t Data_Namespace::DataMgr::getTotalSystemMemory ( )
static

Definition at line 149 of file DataMgr.cpp.

Referenced by populateMgrs().

149  {
150 #ifdef __APPLE__
151  int mib[2];
152  size_t physical_memory;
153  size_t length;
154  // Get the Physical memory size
155  mib[0] = CTL_HW;
156  mib[1] = HW_MEMSIZE;
157  length = sizeof(size_t);
158  sysctl(mib, 2, &physical_memory, &length, NULL, 0);
159  return physical_memory;
160 #elif defined(_MSC_VER)
161  MEMORYSTATUSEX status;
162  status.dwLength = sizeof(status);
163  GlobalMemoryStatusEx(&status);
164  return status.ullTotalPhys;
165 #else // Linux
166  long pages = sysconf(_SC_PHYS_PAGES);
167  long page_size = sysconf(_SC_PAGE_SIZE);
168  return pages * page_size;
169 #endif
170 }

+ Here is the caller graph for this function:

bool Data_Namespace::DataMgr::gpusPresent ( ) const
inline

Definition at line 218 of file DataMgr.h.

References hasGpus_.

Referenced by get_available_gpus().

218 { return hasGpus_; }

+ Here is the caller graph for this function:

bool Data_Namespace::DataMgr::isBufferOnDevice ( const ChunkKey key,
const MemoryLevel  memLevel,
const int  deviceId 
)

Definition at line 459 of file DataMgr.cpp.

References buffer_access_mutex_, and bufferMgrs_.

Referenced by Chunk_NS::Chunk::isChunkOnDevice().

461  {
462  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
463  return bufferMgrs_[memLevel][deviceId]->isBufferOnDevice(key);
464 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:277
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:272

+ Here is the caller graph for this function:

void Data_Namespace::DataMgr::populateMgrs ( const SystemParameters system_parameters,
const size_t  userSpecifiedNumReaderThreads,
const File_Namespace::DiskCacheConfig cache_config 
)
private

Definition at line 216 of file DataMgr.cpp.

References allocateCpuBufferMgr(), bufferMgrs_, SystemParameters::cpu_buffer_mem_bytes, cudaMgr_, dataDir_, Data_Namespace::DRAM, g_pmem_size, getTotalSystemMemory(), SystemParameters::gpu_buffer_mem_bytes, hasGpus_, logger::INFO, levelSizes_, LOG, SystemParameters::max_cpu_slab_size, SystemParameters::max_gpu_slab_size, SystemParameters::min_cpu_slab_size, SystemParameters::min_gpu_slab_size, Data_Namespace::numCpuTiers, Data_Namespace::PMEM, reservedGpuMem_, and VLOG.

Referenced by resetPersistentStorage().

218  {
219  // no need for locking, as this is only called in the constructor
220  bufferMgrs_.resize(2);
221  bufferMgrs_[0].push_back(
222  new PersistentStorageMgr(dataDir_, userSpecifiedNumReaderThreads, cache_config));
223 
224  levelSizes_.push_back(1);
225  size_t page_size{512};
226  size_t cpuBufferSize = system_parameters.cpu_buffer_mem_bytes;
227  if (cpuBufferSize == 0) { // if size is not specified
228  const auto total_system_memory = getTotalSystemMemory();
229  VLOG(1) << "Detected " << (float)total_system_memory / (1024 * 1024)
230  << "M of total system memory.";
231  cpuBufferSize = total_system_memory *
232  0.8; // should get free memory instead of this ugly heuristic
233  }
234  size_t minCpuSlabSize = std::min(system_parameters.min_cpu_slab_size, cpuBufferSize);
235  minCpuSlabSize = (minCpuSlabSize / page_size) * page_size;
236  size_t maxCpuSlabSize = std::min(system_parameters.max_cpu_slab_size, cpuBufferSize);
237  maxCpuSlabSize = (maxCpuSlabSize / page_size) * page_size;
238  LOG(INFO) << "Min CPU Slab Size is " << (float)minCpuSlabSize / (1024 * 1024) << "MB";
239  LOG(INFO) << "Max CPU Slab Size is " << (float)maxCpuSlabSize / (1024 * 1024) << "MB";
240  LOG(INFO) << "Max memory pool size for CPU is " << (float)cpuBufferSize / (1024 * 1024)
241  << "MB";
242 
243  size_t total_cpu_size = 0;
244 
245 #ifdef ENABLE_MEMKIND
246  CpuTierSizeVector cpu_tier_sizes(numCpuTiers, 0);
247  cpu_tier_sizes[CpuTier::DRAM] = cpuBufferSize;
248  if (g_enable_tiered_cpu_mem) {
249  cpu_tier_sizes[CpuTier::PMEM] = g_pmem_size;
250  LOG(INFO) << "Max memory pool size for PMEM is " << (float)g_pmem_size / (1024 * 1024)
251  << "MB";
252  }
253  for (auto cpu_tier_size : cpu_tier_sizes) {
254  total_cpu_size += cpu_tier_size;
255  }
256 #else
257  CpuTierSizeVector cpu_tier_sizes{};
258  total_cpu_size = cpuBufferSize;
259 #endif
260 
261  if (hasGpus_ || cudaMgr_) {
262  LOG(INFO) << "Reserved GPU memory is " << (float)reservedGpuMem_ / (1024 * 1024)
263  << "MB includes render buffer allocation";
264  bufferMgrs_.resize(3);
266  0, total_cpu_size, minCpuSlabSize, maxCpuSlabSize, page_size, cpu_tier_sizes);
267 
268  levelSizes_.push_back(1);
269  int numGpus = cudaMgr_->getDeviceCount();
270  for (int gpuNum = 0; gpuNum < numGpus; ++gpuNum) {
271  size_t gpuMaxMemSize =
272  system_parameters.gpu_buffer_mem_bytes != 0
273  ? system_parameters.gpu_buffer_mem_bytes
274  : (cudaMgr_->getDeviceProperties(gpuNum)->globalMem) - (reservedGpuMem_);
275  size_t minGpuSlabSize =
276  std::min(system_parameters.min_gpu_slab_size, gpuMaxMemSize);
277  minGpuSlabSize = (minGpuSlabSize / page_size) * page_size;
278  size_t maxGpuSlabSize =
279  std::min(system_parameters.max_gpu_slab_size, gpuMaxMemSize);
280  maxGpuSlabSize = (maxGpuSlabSize / page_size) * page_size;
281  LOG(INFO) << "Min GPU Slab size for GPU " << gpuNum << " is "
282  << (float)minGpuSlabSize / (1024 * 1024) << "MB";
283  LOG(INFO) << "Max GPU Slab size for GPU " << gpuNum << " is "
284  << (float)maxGpuSlabSize / (1024 * 1024) << "MB";
285  LOG(INFO) << "Max memory pool size for GPU " << gpuNum << " is "
286  << (float)gpuMaxMemSize / (1024 * 1024) << "MB";
287  bufferMgrs_[2].push_back(new Buffer_Namespace::GpuCudaBufferMgr(gpuNum,
288  gpuMaxMemSize,
289  cudaMgr_.get(),
290  minGpuSlabSize,
291  maxGpuSlabSize,
292  page_size,
293  bufferMgrs_[1][0]));
294  }
295  levelSizes_.push_back(numGpus);
296  } else {
298  0, total_cpu_size, minCpuSlabSize, maxCpuSlabSize, page_size, cpu_tier_sizes);
299  levelSizes_.push_back(1);
300  }
301 }
size_t g_pmem_size
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:272
std::vector< int > levelSizes_
Definition: DataMgr.h:229
#define LOG(tag)
Definition: Logger.h:285
constexpr size_t numCpuTiers
static size_t getTotalSystemMemory()
Definition: DataMgr.cpp:149
std::unique_ptr< CudaMgr_Namespace::CudaMgr > cudaMgr_
Definition: DataMgr.h:273
void allocateCpuBufferMgr(int32_t device_id, size_t total_cpu_size, size_t minCpuSlabSize, size_t maxCpuSlabSize, size_t page_size, const std::vector< size_t > &cpu_tier_sizes)
Definition: DataMgr.cpp:172
std::vector< size_t > CpuTierSizeVector
#define VLOG(n)
Definition: Logger.h:387
std::string dataDir_
Definition: DataMgr.h:274

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

void Data_Namespace::DataMgr::removeTableRelatedDS ( const int  db_id,
const int  tb_id 
)

Definition at line 580 of file DataMgr.cpp.

References buffer_access_mutex_, and bufferMgrs_.

580  {
581  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
582  bufferMgrs_[0][0]->removeTableRelatedDS(db_id, tb_id);
583 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:277
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:272
void Data_Namespace::DataMgr::resetPersistentStorage ( const File_Namespace::DiskCacheConfig cache_config,
const size_t  num_reader_threads,
const SystemParameters sys_params 
)

Definition at line 202 of file DataMgr.cpp.

References bufferMgrs_, createTopLevelMetadata(), and populateMgrs().

204  {
205  int numLevels = bufferMgrs_.size();
206  for (int level = numLevels - 1; level >= 0; --level) {
207  for (size_t device = 0; device < bufferMgrs_[level].size(); device++) {
208  delete bufferMgrs_[level][device];
209  }
210  }
211  bufferMgrs_.clear();
212  populateMgrs(sys_params, num_reader_threads, cache_config);
214 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:272
void createTopLevelMetadata() const
Definition: DataMgr.cpp:333
void populateMgrs(const SystemParameters &system_parameters, const size_t userSpecifiedNumReaderThreads, const File_Namespace::DiskCacheConfig &cache_config)
Definition: DataMgr.cpp:216

+ Here is the call graph for this function:

void Data_Namespace::DataMgr::resetTableEpochFloor ( const int32_t  db_id,
const int32_t  tb_id 
)

Definition at line 599 of file DataMgr.cpp.

References bufferMgrs_, CHECK, and getGlobalFileMgr().

599  {
600  File_Namespace::GlobalFileMgr* gfm{nullptr};
601  gfm = dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])->getGlobalFileMgr();
602  CHECK(gfm);
603  gfm->resetTableEpochFloor(db_id, tb_id);
604 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:272
File_Namespace::GlobalFileMgr * getGlobalFileMgr() const
Definition: DataMgr.cpp:606
#define CHECK(condition)
Definition: Logger.h:291

+ Here is the call graph for this function:

void Data_Namespace::DataMgr::setTableEpoch ( const int  db_id,
const int  tb_id,
const int  start_epoch 
)

Definition at line 585 of file DataMgr.cpp.

References bufferMgrs_, CHECK, and getGlobalFileMgr().

585  {
586  File_Namespace::GlobalFileMgr* gfm{nullptr};
587  gfm = dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])->getGlobalFileMgr();
588  CHECK(gfm);
589  gfm->setTableEpoch(db_id, tb_id, start_epoch);
590 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:272
File_Namespace::GlobalFileMgr * getGlobalFileMgr() const
Definition: DataMgr.cpp:606
#define CHECK(condition)
Definition: Logger.h:291

+ Here is the call graph for this function:

Friends And Related Function Documentation

friend class GlobalFileMgr
friend

Definition at line 175 of file DataMgr.h.

Member Data Documentation

std::mutex Data_Namespace::DataMgr::buffer_access_mutex_
mutableprivate
std::unique_ptr<CudaMgr_Namespace::CudaMgr> Data_Namespace::DataMgr::cudaMgr_
private
std::string Data_Namespace::DataMgr::dataDir_
private

Definition at line 274 of file DataMgr.h.

Referenced by populateMgrs().

bool Data_Namespace::DataMgr::hasGpus_
private

Definition at line 275 of file DataMgr.h.

Referenced by getMemoryInfoUnlocked(), gpusPresent(), and populateMgrs().

std::vector<int> Data_Namespace::DataMgr::levelSizes_

Definition at line 229 of file DataMgr.h.

Referenced by alloc(), checkpoint(), deleteChunksWithPrefix(), getChunkBuffer(), and populateMgrs().

size_t Data_Namespace::DataMgr::reservedGpuMem_
private

Definition at line 276 of file DataMgr.h.

Referenced by populateMgrs().


The documentation for this class was generated from the following files: