OmniSciDB  21ac014ffc
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
Data_Namespace::DataMgr Class Reference

#include <DataMgr.h>

+ Collaboration diagram for Data_Namespace::DataMgr:

Classes

struct  SystemMemoryUsage
 

Public Member Functions

 DataMgr (const std::string &dataDir, const SystemParameters &system_parameters, std::unique_ptr< CudaMgr_Namespace::CudaMgr > cudaMgr, const bool useGpus, const size_t reservedGpuMem=(1<< 27), const size_t numReaderThreads=0, const File_Namespace::DiskCacheConfig cacheConfig=File_Namespace::DiskCacheConfig())
 
 ~DataMgr ()
 
AbstractBuffercreateChunkBuffer (const ChunkKey &key, const MemoryLevel memoryLevel, const int deviceId=0, const size_t page_size=0)
 
AbstractBuffergetChunkBuffer (const ChunkKey &key, const MemoryLevel memoryLevel, const int deviceId=0, const size_t numBytes=0)
 
void deleteChunksWithPrefix (const ChunkKey &keyPrefix)
 
void deleteChunksWithPrefix (const ChunkKey &keyPrefix, const MemoryLevel memLevel)
 
AbstractBufferalloc (const MemoryLevel memoryLevel, const int deviceId, const size_t numBytes)
 
void free (AbstractBuffer *buffer)
 
void copy (AbstractBuffer *destBuffer, AbstractBuffer *srcBuffer)
 
bool isBufferOnDevice (const ChunkKey &key, const MemoryLevel memLevel, const int deviceId)
 
std::vector< MemoryInfogetMemoryInfo (const MemoryLevel memLevel)
 
std::string dumpLevel (const MemoryLevel memLevel)
 
void clearMemory (const MemoryLevel memLevel)
 
const std::map< ChunkKey,
File_Namespace::FileBuffer * > & 
getChunkMap ()
 
void checkpoint (const int db_id, const int tb_id)
 
void checkpoint (const int db_id, const int table_id, const MemoryLevel memory_level)
 
void getChunkMetadataVecForKeyPrefix (ChunkMetadataVector &chunkMetadataVec, const ChunkKey &keyPrefix)
 
bool gpusPresent () const
 
void removeTableRelatedDS (const int db_id, const int tb_id)
 
void setTableEpoch (const int db_id, const int tb_id, const int start_epoch)
 
size_t getTableEpoch (const int db_id, const int tb_id)
 
CudaMgr_Namespace::CudaMgrgetCudaMgr () const
 
File_Namespace::GlobalFileMgrgetGlobalFileMgr () const
 
std::shared_ptr
< ForeignStorageInterface
getForeignStorageInterface () const
 
SystemMemoryUsage getSystemMemoryUsage () const
 
PersistentStorageMgrgetPersistentStorageMgr () const
 
void resetPersistentStorage (const File_Namespace::DiskCacheConfig &cache_config, const size_t num_reader_threads, const SystemParameters &sys_params)
 

Static Public Member Functions

static size_t getTotalSystemMemory ()
 

Public Attributes

std::vector< int > levelSizes_
 

Private Member Functions

void populateMgrs (const SystemParameters &system_parameters, const size_t userSpecifiedNumReaderThreads, const File_Namespace::DiskCacheConfig &cache_config)
 
void convertDB (const std::string basePath)
 
void checkpoint ()
 
void createTopLevelMetadata () const
 

Private Attributes

std::vector< std::vector
< AbstractBufferMgr * > > 
bufferMgrs_
 
std::unique_ptr
< CudaMgr_Namespace::CudaMgr
cudaMgr_
 
std::string dataDir_
 
bool hasGpus_
 
size_t reservedGpuMem_
 
std::mutex buffer_access_mutex_
 

Friends

class GlobalFileMgr
 

Detailed Description

Definition at line 160 of file DataMgr.h.

Constructor & Destructor Documentation

Data_Namespace::DataMgr::DataMgr ( const std::string &  dataDir,
const SystemParameters system_parameters,
std::unique_ptr< CudaMgr_Namespace::CudaMgr cudaMgr,
const bool  useGpus,
const size_t  reservedGpuMem = (1 << 27),
const size_t  numReaderThreads = 0,
const File_Namespace::DiskCacheConfig  cacheConfig = File_Namespace::DiskCacheConfig() 
)
explicit

Definition at line 43 of file DataMgr.cpp.

50  : cudaMgr_{std::move(cudaMgr)}
51  , dataDir_{dataDir}
52  , hasGpus_{false}
53  , reservedGpuMem_{reservedGpuMem} {
54  if (useGpus) {
55  if (cudaMgr_) {
56  hasGpus_ = true;
57  } else {
58  LOG(ERROR) << "CudaMgr instance is invalid, falling back to CPU-only mode.";
59  hasGpus_ = false;
60  }
61  } else {
62  // NOTE: useGpus == false with a valid cudaMgr is a potentially valid configuration.
63  // i.e. QueryEngine can be set to cpu-only for a cuda-enabled build, but still have
64  // rendering enabled. The renderer would require a CudaMgr in this case, in addition
65  // to a GpuCudaBufferMgr for cuda-backed thrust allocations.
66  // We're still setting hasGpus_ to false in that case tho to enforce cpu-only query
67  // execution.
68  hasGpus_ = false;
69  }
70 
71  populateMgrs(system_parameters, numReaderThreads, cache_config);
73 }
#define LOG(tag)
Definition: Logger.h:200
void createTopLevelMetadata() const
Definition: DataMgr.cpp:287
std::unique_ptr< CudaMgr_Namespace::CudaMgr > cudaMgr_
Definition: DataMgr.h:242
void populateMgrs(const SystemParameters &system_parameters, const size_t userSpecifiedNumReaderThreads, const File_Namespace::DiskCacheConfig &cache_config)
Definition: DataMgr.cpp:179
std::string dataDir_
Definition: DataMgr.h:243
Data_Namespace::DataMgr::~DataMgr ( )

Definition at line 75 of file DataMgr.cpp.

References bufferMgrs_.

75  {
76  int numLevels = bufferMgrs_.size();
77  for (int level = numLevels - 1; level >= 0; --level) {
78  for (size_t device = 0; device < bufferMgrs_[level].size(); device++) {
79  delete bufferMgrs_[level][device];
80  }
81  }
82 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:241

Member Function Documentation

AbstractBuffer * Data_Namespace::DataMgr::alloc ( const MemoryLevel  memoryLevel,
const int  deviceId,
const size_t  numBytes 
)

Definition at line 460 of file DataMgr.cpp.

References buffer_access_mutex_, bufferMgrs_, CHECK_LT, and levelSizes_.

Referenced by ThrustAllocator::allocate(), ThrustAllocator::allocateScopedBuffer(), CudaAllocator::allocGpuAbstractBuffer(), ColumnFetcher::linearizeFixedLenArrayColFrags(), and ColumnFetcher::linearizeVarLenArrayColFrags().

462  {
463  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
464  const auto level = static_cast<int>(memoryLevel);
465  CHECK_LT(deviceId, levelSizes_[level]);
466  return bufferMgrs_[level][deviceId]->alloc(numBytes);
467 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:246
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:241
std::vector< int > levelSizes_
Definition: DataMgr.h:213
#define CHECK_LT(x, y)
Definition: Logger.h:216

+ Here is the caller graph for this function:

void Data_Namespace::DataMgr::checkpoint ( const int  db_id,
const int  tb_id 
)

Definition at line 489 of file DataMgr.cpp.

References buffer_access_mutex_, and bufferMgrs_.

Referenced by UpdelRoll::stageUpdate().

489  {
490  // TODO(adb): do we need a buffer mgr lock here?
491  // MAT Yes to reduce Parallel Executor TSAN issues (and correctness for now)
492  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
493  for (auto levelIt = bufferMgrs_.rbegin(); levelIt != bufferMgrs_.rend(); ++levelIt) {
494  // use reverse iterator so we start at GPU level, then CPU then DISK
495  for (auto deviceIt = levelIt->begin(); deviceIt != levelIt->end(); ++deviceIt) {
496  (*deviceIt)->checkpoint(db_id, tb_id);
497  }
498  }
499 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:246
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:241

+ Here is the caller graph for this function:

void Data_Namespace::DataMgr::checkpoint ( const int  db_id,
const int  table_id,
const MemoryLevel  memory_level 
)

Definition at line 501 of file DataMgr.cpp.

References buffer_access_mutex_, bufferMgrs_, CHECK_LT, and levelSizes_.

503  {
504  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
505  CHECK_LT(static_cast<size_t>(memory_level), bufferMgrs_.size());
506  CHECK_LT(static_cast<size_t>(memory_level), levelSizes_.size());
507  for (int device_id = 0; device_id < levelSizes_[memory_level]; device_id++) {
508  bufferMgrs_[memory_level][device_id]->checkpoint(db_id, table_id);
509  }
510 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:246
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:241
std::vector< int > levelSizes_
Definition: DataMgr.h:213
#define CHECK_LT(x, y)
Definition: Logger.h:216
void Data_Namespace::DataMgr::checkpoint ( )
private

Definition at line 512 of file DataMgr.cpp.

References buffer_access_mutex_, and bufferMgrs_.

Referenced by convertDB().

512  {
513  // TODO(adb): SAA
514  // MAT Yes to reduce Parallel Executor TSAN issues (and correctness for now)
515  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
516  for (auto levelIt = bufferMgrs_.rbegin(); levelIt != bufferMgrs_.rend(); ++levelIt) {
517  // use reverse iterator so we start at GPU level, then CPU then DISK
518  for (auto deviceIt = levelIt->begin(); deviceIt != levelIt->end(); ++deviceIt) {
519  (*deviceIt)->checkpoint();
520  }
521  }
522 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:246
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:241

+ Here is the caller graph for this function:

void Data_Namespace::DataMgr::clearMemory ( const MemoryLevel  memLevel)

Definition at line 384 of file DataMgr.cpp.

References buffer_access_mutex_, bufferMgrs_, cudaMgr_, Data_Namespace::GPU_LEVEL, logger::INFO, LOG, and logger::WARNING.

Referenced by Executor::clearMemory().

384  {
385  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
386 
387  // if gpu we need to iterate through all the buffermanagers for each card
388  if (memLevel == MemoryLevel::GPU_LEVEL) {
389  if (cudaMgr_) {
390  int numGpus = cudaMgr_->getDeviceCount();
391  for (int gpuNum = 0; gpuNum < numGpus; ++gpuNum) {
392  LOG(INFO) << "clear slabs on gpu " << gpuNum;
393  bufferMgrs_[memLevel][gpuNum]->clearSlabs();
394  }
395  } else {
396  LOG(WARNING) << "Unable to clear GPU memory: No GPUs detected";
397  }
398  } else {
399  bufferMgrs_[memLevel][0]->clearSlabs();
400  }
401 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:246
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:241
#define LOG(tag)
Definition: Logger.h:200
std::unique_ptr< CudaMgr_Namespace::CudaMgr > cudaMgr_
Definition: DataMgr.h:242

+ Here is the caller graph for this function:

void Data_Namespace::DataMgr::convertDB ( const std::string  basePath)
private

Definition at line 256 of file DataMgr.cpp.

References bufferMgrs_, CHECK, checkpoint(), logger::FATAL, getGlobalFileMgr(), logger::INFO, and LOG.

256  {
257  // no need for locking, as this is only called in the constructor
258 
259  /* check that "mapd_data" directory exists and it's empty */
260  std::string mapdDataPath(basePath + "/../mapd_data/");
261  boost::filesystem::path path(mapdDataPath);
262  if (boost::filesystem::exists(path)) {
263  if (!boost::filesystem::is_directory(path)) {
264  LOG(FATAL) << "Path to directory mapd_data to convert DB is not a directory.";
265  }
266  } else { // data directory does not exist
267  LOG(FATAL) << "Path to directory mapd_data to convert DB does not exist.";
268  }
269 
270  File_Namespace::GlobalFileMgr* gfm{nullptr};
271  gfm = dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])->getGlobalFileMgr();
272  CHECK(gfm);
273 
274  size_t defaultPageSize = gfm->getDefaultPageSize();
275  LOG(INFO) << "Database conversion started.";
277  gfm,
278  defaultPageSize,
279  basePath); // this call also copies data into new DB structure
280  delete fm_base_db;
281 
282  /* write content of DB into newly created/converted DB structure & location */
283  checkpoint(); // outputs data files as well as metadata files
284  LOG(INFO) << "Database conversion completed.";
285 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:241
#define LOG(tag)
Definition: Logger.h:200
File_Namespace::GlobalFileMgr * getGlobalFileMgr() const
Definition: DataMgr.cpp:543
#define CHECK(condition)
Definition: Logger.h:206

+ Here is the call graph for this function:

void Data_Namespace::DataMgr::copy ( AbstractBuffer destBuffer,
AbstractBuffer srcBuffer 
)

Definition at line 475 of file DataMgr.cpp.

References Data_Namespace::AbstractBuffer::getDeviceId(), Data_Namespace::AbstractBuffer::getMemoryPtr(), Data_Namespace::AbstractBuffer::getType(), Data_Namespace::AbstractBuffer::size(), and Data_Namespace::AbstractBuffer::write().

475  {
476  destBuffer->write(srcBuffer->getMemoryPtr(),
477  srcBuffer->size(),
478  0,
479  srcBuffer->getType(),
480  srcBuffer->getDeviceId());
481 }
virtual int8_t * getMemoryPtr()=0
virtual MemoryLevel getType() const =0
virtual void write(int8_t *src, const size_t num_bytes, const size_t offset=0, const MemoryLevel src_buffer_type=CPU_LEVEL, const int src_device_id=-1)=0

+ Here is the call graph for this function:

AbstractBuffer * Data_Namespace::DataMgr::createChunkBuffer ( const ChunkKey key,
const MemoryLevel  memoryLevel,
const int  deviceId = 0,
const size_t  page_size = 0 
)

Definition at line 416 of file DataMgr.cpp.

References buffer_access_mutex_, and bufferMgrs_.

Referenced by Chunk_NS::Chunk::createChunkBuffer().

419  {
420  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
421  int level = static_cast<int>(memoryLevel);
422  return bufferMgrs_[level][deviceId]->createBuffer(key, page_size);
423 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:246
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:241

+ Here is the caller graph for this function:

void Data_Namespace::DataMgr::createTopLevelMetadata ( ) const
private

Definition at line 287 of file DataMgr.cpp.

References bufferMgrs_, CHECK, and getGlobalFileMgr().

Referenced by resetPersistentStorage().

288  { // create metadata shared by all tables of all DBs
289  ChunkKey chunkKey(2);
290  chunkKey[0] = 0; // top level db_id
291  chunkKey[1] = 0; // top level tb_id
292 
293  File_Namespace::GlobalFileMgr* gfm{nullptr};
294  gfm = dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])->getGlobalFileMgr();
295  CHECK(gfm);
296 
297  auto fm_top = gfm->getFileMgr(chunkKey);
298  if (dynamic_cast<File_Namespace::FileMgr*>(fm_top)) {
299  static_cast<File_Namespace::FileMgr*>(fm_top)->createTopLevelMetadata();
300  }
301 }
std::vector< int > ChunkKey
Definition: types.h:37
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:241
void createTopLevelMetadata() const
Definition: DataMgr.cpp:287
File_Namespace::GlobalFileMgr * getGlobalFileMgr() const
Definition: DataMgr.cpp:543
#define CHECK(condition)
Definition: Logger.h:206

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

void Data_Namespace::DataMgr::deleteChunksWithPrefix ( const ChunkKey keyPrefix)

Definition at line 436 of file DataMgr.cpp.

References buffer_access_mutex_, bufferMgrs_, and levelSizes_.

Referenced by UpdelRoll::updateFragmenterAndCleanupChunks(), StorageIOFacility::yieldDeleteCallback(), and StorageIOFacility::yieldUpdateCallback().

436  {
437  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
438 
439  int numLevels = bufferMgrs_.size();
440  for (int level = numLevels - 1; level >= 0; --level) {
441  for (int device = 0; device < levelSizes_[level]; ++device) {
442  bufferMgrs_[level][device]->deleteBuffersWithPrefix(keyPrefix);
443  }
444  }
445 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:246
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:241
std::vector< int > levelSizes_
Definition: DataMgr.h:213

+ Here is the caller graph for this function:

void Data_Namespace::DataMgr::deleteChunksWithPrefix ( const ChunkKey keyPrefix,
const MemoryLevel  memLevel 
)

Definition at line 448 of file DataMgr.cpp.

References buffer_access_mutex_, bufferMgrs_, and levelSizes_.

449  {
450  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
451 
452  if (bufferMgrs_.size() <= memLevel) {
453  return;
454  }
455  for (int device = 0; device < levelSizes_[memLevel]; ++device) {
456  bufferMgrs_[memLevel][device]->deleteBuffersWithPrefix(keyPrefix);
457  }
458 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:246
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:241
std::vector< int > levelSizes_
Definition: DataMgr.h:213
std::string Data_Namespace::DataMgr::dumpLevel ( const MemoryLevel  memLevel)

Definition at line 368 of file DataMgr.cpp.

References buffer_access_mutex_, bufferMgrs_, cudaMgr_, and Data_Namespace::GPU_LEVEL.

368  {
369  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
370 
371  // if gpu we need to iterate through all the buffermanagers for each card
372  if (memLevel == MemoryLevel::GPU_LEVEL) {
373  int numGpus = cudaMgr_->getDeviceCount();
374  std::ostringstream tss;
375  for (int gpuNum = 0; gpuNum < numGpus; ++gpuNum) {
376  tss << bufferMgrs_[memLevel][gpuNum]->printSlabs();
377  }
378  return tss.str();
379  } else {
380  return bufferMgrs_[memLevel][0]->printSlabs();
381  }
382 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:246
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:241
std::unique_ptr< CudaMgr_Namespace::CudaMgr > cudaMgr_
Definition: DataMgr.h:242
void Data_Namespace::DataMgr::free ( AbstractBuffer buffer)

Definition at line 469 of file DataMgr.cpp.

References buffer_access_mutex_, bufferMgrs_, Data_Namespace::AbstractBuffer::getDeviceId(), and Data_Namespace::AbstractBuffer::getType().

Referenced by UpdelRoll::cancelUpdate(), ThrustAllocator::deallocate(), CudaAllocator::free(), CudaAllocator::freeGpuAbstractBuffer(), CudaAllocator::~CudaAllocator(), InValuesBitmap::~InValuesBitmap(), PerfectHashTable::~PerfectHashTable(), and ThrustAllocator::~ThrustAllocator().

469  {
470  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
471  int level = static_cast<int>(buffer->getType());
472  bufferMgrs_[level][buffer->getDeviceId()]->free(buffer);
473 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:246
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:241
virtual MemoryLevel getType() const =0

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

AbstractBuffer * Data_Namespace::DataMgr::getChunkBuffer ( const ChunkKey key,
const MemoryLevel  memoryLevel,
const int  deviceId = 0,
const size_t  numBytes = 0 
)

Definition at line 425 of file DataMgr.cpp.

References buffer_access_mutex_, bufferMgrs_, CHECK_LT, and levelSizes_.

Referenced by Chunk_NS::Chunk::getChunkBuffer().

428  {
429  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
430  const auto level = static_cast<size_t>(memoryLevel);
431  CHECK_LT(level, levelSizes_.size()); // make sure we have a legit buffermgr
432  CHECK_LT(deviceId, levelSizes_[level]); // make sure we have a legit buffermgr
433  return bufferMgrs_[level][deviceId]->getBuffer(key, numBytes);
434 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:246
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:241
std::vector< int > levelSizes_
Definition: DataMgr.h:213
#define CHECK_LT(x, y)
Definition: Logger.h:216

+ Here is the caller graph for this function:

const std::map<ChunkKey, File_Namespace::FileBuffer*>& Data_Namespace::DataMgr::getChunkMap ( )
void Data_Namespace::DataMgr::getChunkMetadataVecForKeyPrefix ( ChunkMetadataVector chunkMetadataVec,
const ChunkKey keyPrefix 
)

Definition at line 410 of file DataMgr.cpp.

References buffer_access_mutex_, and bufferMgrs_.

Referenced by TableOptimizer::vacuumFragments().

411  {
412  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
413  bufferMgrs_[0][0]->getChunkMetadataVecForKeyPrefix(chunkMetadataVec, keyPrefix);
414 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:246
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:241

+ Here is the caller graph for this function:

CudaMgr_Namespace::CudaMgr* Data_Namespace::DataMgr::getCudaMgr ( ) const
inline

Definition at line 208 of file DataMgr.h.

References cudaMgr_.

Referenced by copy_from_gpu(), copy_to_gpu(), CudaAllocator::copyFromDevice(), CudaAllocator::copyToDevice(), CudaAllocator::CudaAllocator(), Executor::cudaMgr(), get_available_gpus(), CudaAllocator::setDeviceMem(), and CudaAllocator::zeroDeviceMem().

208 { return cudaMgr_.get(); }
std::unique_ptr< CudaMgr_Namespace::CudaMgr > cudaMgr_
Definition: DataMgr.h:242

+ Here is the caller graph for this function:

std::shared_ptr< ForeignStorageInterface > Data_Namespace::DataMgr::getForeignStorageInterface ( ) const

Definition at line 551 of file DataMgr.cpp.

References bufferMgrs_.

551  {
552  return dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])
554 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:241
std::shared_ptr< ForeignStorageInterface > getForeignStorageInterface() const
Definition: DataMgr.cpp:551
File_Namespace::GlobalFileMgr * Data_Namespace::DataMgr::getGlobalFileMgr ( ) const

Definition at line 543 of file DataMgr.cpp.

References bufferMgrs_, and CHECK.

Referenced by convertDB(), createTopLevelMetadata(), TableArchiver::dumpTable(), anonymous_namespace{DdlCommandExecutor.cpp}::get_agg_storage_stats(), getTableEpoch(), TableArchiver::restoreTable(), setTableEpoch(), and TableOptimizer::vacuumDeletedRows().

543  {
544  File_Namespace::GlobalFileMgr* global_file_mgr{nullptr};
545  global_file_mgr =
546  dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])->getGlobalFileMgr();
547  CHECK(global_file_mgr);
548  return global_file_mgr;
549 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:241
File_Namespace::GlobalFileMgr * getGlobalFileMgr() const
Definition: DataMgr.cpp:543
#define CHECK(condition)
Definition: Logger.h:206

+ Here is the caller graph for this function:

std::vector< MemoryInfo > Data_Namespace::DataMgr::getMemoryInfo ( const MemoryLevel  memLevel)

Definition at line 303 of file DataMgr.cpp.

References buffer_access_mutex_, bufferMgrs_, CHECK, Data_Namespace::MemoryData::chunk_key, Data_Namespace::CPU_LEVEL, cudaMgr_, Buffer_Namespace::BufferMgr::getAllocated(), Buffer_Namespace::BufferMgr::getMaxSize(), Buffer_Namespace::BufferMgr::getPageSize(), Buffer_Namespace::BufferMgr::getSlabSegments(), Data_Namespace::GPU_LEVEL, hasGpus_, Data_Namespace::MemoryInfo::isAllocationCapped, Buffer_Namespace::BufferMgr::isAllocationCapped(), Data_Namespace::MemoryInfo::maxNumPages, Data_Namespace::MemoryData::memStatus, Data_Namespace::MemoryInfo::nodeMemoryData, Data_Namespace::MemoryInfo::numPageAllocated, Data_Namespace::MemoryData::numPages, Data_Namespace::MemoryInfo::pageSize, Data_Namespace::MemoryData::slabNum, Data_Namespace::MemoryData::startPage, and Data_Namespace::MemoryData::touch.

303  {
304  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
305 
306  std::vector<MemoryInfo> mem_info;
307  if (memLevel == MemoryLevel::CPU_LEVEL) {
308  Buffer_Namespace::CpuBufferMgr* cpu_buffer =
309  dynamic_cast<Buffer_Namespace::CpuBufferMgr*>(
311  CHECK(cpu_buffer);
312  MemoryInfo mi;
313 
314  mi.pageSize = cpu_buffer->getPageSize();
315  mi.maxNumPages = cpu_buffer->getMaxSize() / mi.pageSize;
316  mi.isAllocationCapped = cpu_buffer->isAllocationCapped();
317  mi.numPageAllocated = cpu_buffer->getAllocated() / mi.pageSize;
318 
319  const auto& slab_segments = cpu_buffer->getSlabSegments();
320  for (size_t slab_num = 0; slab_num < slab_segments.size(); ++slab_num) {
321  for (auto segment : slab_segments[slab_num]) {
322  MemoryData md;
323  md.slabNum = slab_num;
324  md.startPage = segment.start_page;
325  md.numPages = segment.num_pages;
326  md.touch = segment.last_touched;
327  md.memStatus = segment.mem_status;
328  md.chunk_key.insert(
329  md.chunk_key.end(), segment.chunk_key.begin(), segment.chunk_key.end());
330  mi.nodeMemoryData.push_back(md);
331  }
332  }
333  mem_info.push_back(mi);
334  } else if (hasGpus_) {
335  int numGpus = cudaMgr_->getDeviceCount();
336  for (int gpuNum = 0; gpuNum < numGpus; ++gpuNum) {
338  dynamic_cast<Buffer_Namespace::GpuCudaBufferMgr*>(
340  CHECK(gpu_buffer);
341  MemoryInfo mi;
342 
343  mi.pageSize = gpu_buffer->getPageSize();
344  mi.maxNumPages = gpu_buffer->getMaxSize() / mi.pageSize;
345  mi.isAllocationCapped = gpu_buffer->isAllocationCapped();
346  mi.numPageAllocated = gpu_buffer->getAllocated() / mi.pageSize;
347 
348  const auto& slab_segments = gpu_buffer->getSlabSegments();
349  for (size_t slab_num = 0; slab_num < slab_segments.size(); ++slab_num) {
350  for (auto segment : slab_segments[slab_num]) {
351  MemoryData md;
352  md.slabNum = slab_num;
353  md.startPage = segment.start_page;
354  md.numPages = segment.num_pages;
355  md.touch = segment.last_touched;
356  md.chunk_key.insert(
357  md.chunk_key.end(), segment.chunk_key.begin(), segment.chunk_key.end());
358  md.memStatus = segment.mem_status;
359  mi.nodeMemoryData.push_back(md);
360  }
361  }
362  mem_info.push_back(mi);
363  }
364  }
365  return mem_info;
366 }
size_t getAllocated() override
Definition: BufferMgr.cpp:494
std::mutex buffer_access_mutex_
Definition: DataMgr.h:246
std::vector< MemoryData > nodeMemoryData
Definition: DataMgr.h:65
Buffer_Namespace::MemStatus memStatus
Definition: DataMgr.h:57
size_t getMaxSize() override
Definition: BufferMgr.cpp:489
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:241
bool isAllocationCapped() override
Definition: BufferMgr.cpp:499
std::unique_ptr< CudaMgr_Namespace::CudaMgr > cudaMgr_
Definition: DataMgr.h:242
const std::vector< BufferList > & getSlabSegments()
Definition: BufferMgr.cpp:874
#define CHECK(condition)
Definition: Logger.h:206
std::vector< int32_t > chunk_key
Definition: DataMgr.h:56

+ Here is the call graph for this function:

PersistentStorageMgr * Data_Namespace::DataMgr::getPersistentStorageMgr ( ) const

Definition at line 571 of file DataMgr.cpp.

References bufferMgrs_.

Referenced by anonymous_namespace{RelAlgExecutor.cpp}::set_parallelism_hints().

571  {
572  return dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0]);
573 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:241

+ Here is the caller graph for this function:

DataMgr::SystemMemoryUsage Data_Namespace::DataMgr::getSystemMemoryUsage ( ) const

Definition at line 84 of file DataMgr.cpp.

References Data_Namespace::DataMgr::SystemMemoryUsage::frag, Data_Namespace::DataMgr::SystemMemoryUsage::free, Data_Namespace::ProcBuddyinfoParser::getFragmentationPercent(), Data_Namespace::DataMgr::SystemMemoryUsage::regular, Data_Namespace::DataMgr::SystemMemoryUsage::resident, Data_Namespace::DataMgr::SystemMemoryUsage::shared, Data_Namespace::DataMgr::SystemMemoryUsage::total, and Data_Namespace::DataMgr::SystemMemoryUsage::vtotal.

84  {
85  SystemMemoryUsage usage;
86 
87 #ifdef __linux__
88 
89  // Determine Linux available memory and total memory.
90  // Available memory is different from free memory because
91  // when Linux sees free memory, it tries to use it for
92  // stuff like disk caching. However, the memory is not
93  // reserved and is still available to be allocated by
94  // user processes.
95  // Parsing /proc/meminfo for this info isn't very elegant
96  // but as a virtual file it should be reasonably fast.
97  // See also:
98  // https://github.com/torvalds/linux/commit/34e431b0ae398fc54ea69ff85ec700722c9da773
100  usage.free = mi["MemAvailable"];
101  usage.total = mi["MemTotal"];
102 
103  // Determine process memory in use.
104  // See also:
105  // https://stackoverflow.com/questions/669438/how-to-get-memory-usage-at-runtime-using-c
106  // http://man7.org/linux/man-pages/man5/proc.5.html
107  int64_t size = 0;
108  int64_t resident = 0;
109  int64_t shared = 0;
110 
111  std::ifstream fstatm("/proc/self/statm");
112  fstatm >> size >> resident >> shared;
113  fstatm.close();
114 
115  long page_size =
116  sysconf(_SC_PAGE_SIZE); // in case x86-64 is configured to use 2MB pages
117 
118  usage.resident = resident * page_size;
119  usage.vtotal = size * page_size;
120  usage.regular = (resident - shared) * page_size;
121  usage.shared = shared * page_size;
122 
124  usage.frag = bi.getFragmentationPercent();
125 
126 #else
127 
128  usage.total = 0;
129  usage.free = 0;
130  usage.resident = 0;
131  usage.vtotal = 0;
132  usage.regular = 0;
133  usage.shared = 0;
134  usage.frag = 0;
135 
136 #endif
137 
138  return usage;
139 }
Parse /proc/meminfo into key/value pairs.
Definition: DataMgr.h:69
Parse /proc/buddyinfo into a Fragmentation health score.
Definition: DataMgr.h:102

+ Here is the call graph for this function:

size_t Data_Namespace::DataMgr::getTableEpoch ( const int  db_id,
const int  tb_id 
)

Definition at line 536 of file DataMgr.cpp.

References bufferMgrs_, CHECK, and getGlobalFileMgr().

536  {
537  File_Namespace::GlobalFileMgr* gfm{nullptr};
538  gfm = dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])->getGlobalFileMgr();
539  CHECK(gfm);
540  return gfm->getTableEpoch(db_id, tb_id);
541 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:241
File_Namespace::GlobalFileMgr * getGlobalFileMgr() const
Definition: DataMgr.cpp:543
#define CHECK(condition)
Definition: Logger.h:206

+ Here is the call graph for this function:

size_t Data_Namespace::DataMgr::getTotalSystemMemory ( )
static

Definition at line 141 of file DataMgr.cpp.

Referenced by populateMgrs().

141  {
142 #ifdef __APPLE__
143  int mib[2];
144  size_t physical_memory;
145  size_t length;
146  // Get the Physical memory size
147  mib[0] = CTL_HW;
148  mib[1] = HW_MEMSIZE;
149  length = sizeof(size_t);
150  sysctl(mib, 2, &physical_memory, &length, NULL, 0);
151  return physical_memory;
152 #elif defined(_MSC_VER)
153  MEMORYSTATUSEX status;
154  status.dwLength = sizeof(status);
155  GlobalMemoryStatusEx(&status);
156  return status.ullTotalPhys;
157 #else // Linux
158  long pages = sysconf(_SC_PHYS_PAGES);
159  long page_size = sysconf(_SC_PAGE_SIZE);
160  return pages * page_size;
161 #endif
162 }

+ Here is the caller graph for this function:

bool Data_Namespace::DataMgr::gpusPresent ( ) const
inline

Definition at line 203 of file DataMgr.h.

References hasGpus_.

Referenced by get_available_gpus().

203 { return hasGpus_; }

+ Here is the caller graph for this function:

bool Data_Namespace::DataMgr::isBufferOnDevice ( const ChunkKey key,
const MemoryLevel  memLevel,
const int  deviceId 
)

Definition at line 403 of file DataMgr.cpp.

References buffer_access_mutex_, and bufferMgrs_.

Referenced by Chunk_NS::Chunk::isChunkOnDevice().

405  {
406  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
407  return bufferMgrs_[memLevel][deviceId]->isBufferOnDevice(key);
408 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:246
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:241

+ Here is the caller graph for this function:

void Data_Namespace::DataMgr::populateMgrs ( const SystemParameters system_parameters,
const size_t  userSpecifiedNumReaderThreads,
const File_Namespace::DiskCacheConfig cache_config 
)
private

Definition at line 179 of file DataMgr.cpp.

References bufferMgrs_, SystemParameters::cpu_buffer_mem_bytes, PersistentStorageMgr::createPersistentStorageMgr(), cudaMgr_, dataDir_, getTotalSystemMemory(), SystemParameters::gpu_buffer_mem_bytes, hasGpus_, logger::INFO, levelSizes_, LOG, SystemParameters::max_cpu_slab_size, SystemParameters::max_gpu_slab_size, SystemParameters::min_cpu_slab_size, SystemParameters::min_gpu_slab_size, reservedGpuMem_, and VLOG.

Referenced by resetPersistentStorage().

181  {
182  // no need for locking, as this is only called in the constructor
183  bufferMgrs_.resize(2);
185  dataDir_, userSpecifiedNumReaderThreads, cache_config));
186 
187  levelSizes_.push_back(1);
188  size_t page_size{512};
189  size_t cpuBufferSize = system_parameters.cpu_buffer_mem_bytes;
190  if (cpuBufferSize == 0) { // if size is not specified
191  const auto total_system_memory = getTotalSystemMemory();
192  VLOG(1) << "Detected " << (float)total_system_memory / (1024 * 1024)
193  << "M of total system memory.";
194  cpuBufferSize = total_system_memory *
195  0.8; // should get free memory instead of this ugly heuristic
196  }
197  size_t minCpuSlabSize = std::min(system_parameters.min_cpu_slab_size, cpuBufferSize);
198  minCpuSlabSize = (minCpuSlabSize / page_size) * page_size;
199  size_t maxCpuSlabSize = std::min(system_parameters.max_cpu_slab_size, cpuBufferSize);
200  maxCpuSlabSize = (maxCpuSlabSize / page_size) * page_size;
201  LOG(INFO) << "Min CPU Slab Size is " << (float)minCpuSlabSize / (1024 * 1024) << "MB";
202  LOG(INFO) << "Max CPU Slab Size is " << (float)maxCpuSlabSize / (1024 * 1024) << "MB";
203  LOG(INFO) << "Max memory pool size for CPU is " << (float)cpuBufferSize / (1024 * 1024)
204  << "MB";
205  if (hasGpus_ || cudaMgr_) {
206  LOG(INFO) << "Reserved GPU memory is " << (float)reservedGpuMem_ / (1024 * 1024)
207  << "MB includes render buffer allocation";
208  bufferMgrs_.resize(3);
209  bufferMgrs_[1].push_back(new Buffer_Namespace::CpuBufferMgr(0,
210  cpuBufferSize,
211  cudaMgr_.get(),
212  minCpuSlabSize,
213  maxCpuSlabSize,
214  page_size,
215  bufferMgrs_[0][0]));
216  levelSizes_.push_back(1);
217  int numGpus = cudaMgr_->getDeviceCount();
218  for (int gpuNum = 0; gpuNum < numGpus; ++gpuNum) {
219  size_t gpuMaxMemSize =
220  system_parameters.gpu_buffer_mem_bytes != 0
221  ? system_parameters.gpu_buffer_mem_bytes
222  : (cudaMgr_->getDeviceProperties(gpuNum)->globalMem) - (reservedGpuMem_);
223  size_t minGpuSlabSize =
224  std::min(system_parameters.min_gpu_slab_size, gpuMaxMemSize);
225  minGpuSlabSize = (minGpuSlabSize / page_size) * page_size;
226  size_t maxGpuSlabSize =
227  std::min(system_parameters.max_gpu_slab_size, gpuMaxMemSize);
228  maxGpuSlabSize = (maxGpuSlabSize / page_size) * page_size;
229  LOG(INFO) << "Min GPU Slab size for GPU " << gpuNum << " is "
230  << (float)minGpuSlabSize / (1024 * 1024) << "MB";
231  LOG(INFO) << "Max GPU Slab size for GPU " << gpuNum << " is "
232  << (float)maxGpuSlabSize / (1024 * 1024) << "MB";
233  LOG(INFO) << "Max memory pool size for GPU " << gpuNum << " is "
234  << (float)gpuMaxMemSize / (1024 * 1024) << "MB";
235  bufferMgrs_[2].push_back(new Buffer_Namespace::GpuCudaBufferMgr(gpuNum,
236  gpuMaxMemSize,
237  cudaMgr_.get(),
238  minGpuSlabSize,
239  maxGpuSlabSize,
240  page_size,
241  bufferMgrs_[1][0]));
242  }
243  levelSizes_.push_back(numGpus);
244  } else {
245  bufferMgrs_[1].push_back(new Buffer_Namespace::CpuBufferMgr(0,
246  cpuBufferSize,
247  cudaMgr_.get(),
248  minCpuSlabSize,
249  maxCpuSlabSize,
250  page_size,
251  bufferMgrs_[0][0]));
252  levelSizes_.push_back(1);
253  }
254 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:241
std::vector< int > levelSizes_
Definition: DataMgr.h:213
#define LOG(tag)
Definition: Logger.h:200
static size_t getTotalSystemMemory()
Definition: DataMgr.cpp:141
std::unique_ptr< CudaMgr_Namespace::CudaMgr > cudaMgr_
Definition: DataMgr.h:242
static PersistentStorageMgr * createPersistentStorageMgr(const std::string &data_dir, const size_t num_reader_threads, const File_Namespace::DiskCacheConfig &disk_cache_config)
#define VLOG(n)
Definition: Logger.h:300
std::string dataDir_
Definition: DataMgr.h:243

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

void Data_Namespace::DataMgr::removeTableRelatedDS ( const int  db_id,
const int  tb_id 
)

Definition at line 524 of file DataMgr.cpp.

References buffer_access_mutex_, and bufferMgrs_.

524  {
525  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
526  bufferMgrs_[0][0]->removeTableRelatedDS(db_id, tb_id);
527 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:246
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:241
void Data_Namespace::DataMgr::resetPersistentStorage ( const File_Namespace::DiskCacheConfig cache_config,
const size_t  num_reader_threads,
const SystemParameters sys_params 
)

Definition at line 165 of file DataMgr.cpp.

References bufferMgrs_, createTopLevelMetadata(), and populateMgrs().

167  {
168  int numLevels = bufferMgrs_.size();
169  for (int level = numLevels - 1; level >= 0; --level) {
170  for (size_t device = 0; device < bufferMgrs_[level].size(); device++) {
171  delete bufferMgrs_[level][device];
172  }
173  }
174  bufferMgrs_.clear();
175  populateMgrs(sys_params, num_reader_threads, cache_config);
177 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:241
void createTopLevelMetadata() const
Definition: DataMgr.cpp:287
void populateMgrs(const SystemParameters &system_parameters, const size_t userSpecifiedNumReaderThreads, const File_Namespace::DiskCacheConfig &cache_config)
Definition: DataMgr.cpp:179

+ Here is the call graph for this function:

void Data_Namespace::DataMgr::setTableEpoch ( const int  db_id,
const int  tb_id,
const int  start_epoch 
)

Definition at line 529 of file DataMgr.cpp.

References bufferMgrs_, CHECK, and getGlobalFileMgr().

529  {
530  File_Namespace::GlobalFileMgr* gfm{nullptr};
531  gfm = dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])->getGlobalFileMgr();
532  CHECK(gfm);
533  gfm->setTableEpoch(db_id, tb_id, start_epoch);
534 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:241
File_Namespace::GlobalFileMgr * getGlobalFileMgr() const
Definition: DataMgr.cpp:543
#define CHECK(condition)
Definition: Logger.h:206

+ Here is the call graph for this function:

Friends And Related Function Documentation

friend class GlobalFileMgr
friend

Definition at line 161 of file DataMgr.h.

Member Data Documentation

std::unique_ptr<CudaMgr_Namespace::CudaMgr> Data_Namespace::DataMgr::cudaMgr_
private

Definition at line 242 of file DataMgr.h.

Referenced by clearMemory(), dumpLevel(), getCudaMgr(), getMemoryInfo(), and populateMgrs().

std::string Data_Namespace::DataMgr::dataDir_
private

Definition at line 243 of file DataMgr.h.

Referenced by populateMgrs().

bool Data_Namespace::DataMgr::hasGpus_
private

Definition at line 244 of file DataMgr.h.

Referenced by getMemoryInfo(), gpusPresent(), and populateMgrs().

std::vector<int> Data_Namespace::DataMgr::levelSizes_

Definition at line 213 of file DataMgr.h.

Referenced by alloc(), checkpoint(), deleteChunksWithPrefix(), getChunkBuffer(), and populateMgrs().

size_t Data_Namespace::DataMgr::reservedGpuMem_
private

Definition at line 245 of file DataMgr.h.

Referenced by populateMgrs().


The documentation for this class was generated from the following files: