OmniSciDB  dfae7c3b14
Data_Namespace::DataMgr Class Reference

#include <DataMgr.h>

+ Collaboration diagram for Data_Namespace::DataMgr:

Classes

struct  SystemMemoryUsage
 

Public Member Functions

 DataMgr (const std::string &dataDir, const SystemParameters &system_parameters, const bool useGpus, const int numGpus, const int startGpu=0, const size_t reservedGpuMem=(1<< 27), const size_t numReaderThreads=0, const DiskCacheConfig cacheConfig=DiskCacheConfig())
 
 ~DataMgr ()
 
AbstractBuffercreateChunkBuffer (const ChunkKey &key, const MemoryLevel memoryLevel, const int deviceId=0, const size_t page_size=0)
 
AbstractBuffergetChunkBuffer (const ChunkKey &key, const MemoryLevel memoryLevel, const int deviceId=0, const size_t numBytes=0)
 
void deleteChunksWithPrefix (const ChunkKey &keyPrefix)
 
void deleteChunksWithPrefix (const ChunkKey &keyPrefix, const MemoryLevel memLevel)
 
AbstractBufferalloc (const MemoryLevel memoryLevel, const int deviceId, const size_t numBytes)
 
void free (AbstractBuffer *buffer)
 
void copy (AbstractBuffer *destBuffer, AbstractBuffer *srcBuffer)
 
bool isBufferOnDevice (const ChunkKey &key, const MemoryLevel memLevel, const int deviceId)
 
std::vector< MemoryInfogetMemoryInfo (const MemoryLevel memLevel)
 
std::string dumpLevel (const MemoryLevel memLevel)
 
void clearMemory (const MemoryLevel memLevel)
 
const std::map< ChunkKey, File_Namespace::FileBuffer * > & getChunkMap ()
 
void checkpoint (const int db_id, const int tb_id)
 
void getChunkMetadataVecForKeyPrefix (ChunkMetadataVector &chunkMetadataVec, const ChunkKey &keyPrefix)
 
bool gpusPresent ()
 
void removeTableRelatedDS (const int db_id, const int tb_id)
 
void setTableEpoch (const int db_id, const int tb_id, const int start_epoch)
 
size_t getTableEpoch (const int db_id, const int tb_id)
 
CudaMgr_Namespace::CudaMgrgetCudaMgr () const
 
File_Namespace::GlobalFileMgrgetGlobalFileMgr () const
 
SystemMemoryUsage getSystemMemoryUsage () const
 
PersistentStorageMgrgetPersistentStorageMgr () const
 
void resetPersistentStorage (const DiskCacheConfig &cache_config, const size_t num_reader_threads, const SystemParameters &sys_params)
 

Static Public Member Functions

static size_t getTotalSystemMemory ()
 

Public Attributes

std::vector< int > levelSizes_
 

Private Member Functions

void populateMgrs (const SystemParameters &system_parameters, const size_t userSpecifiedNumReaderThreads, const DiskCacheConfig &cache_config)
 
void convertDB (const std::string basePath)
 
void checkpoint ()
 
void createTopLevelMetadata () const
 

Private Attributes

std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
 
std::unique_ptr< CudaMgr_Namespace::CudaMgrcudaMgr_
 
std::string dataDir_
 
bool hasGpus_
 
size_t reservedGpuMem_
 
std::mutex buffer_access_mutex_
 

Friends

class GlobalFileMgr
 

Detailed Description

Definition at line 160 of file DataMgr.h.

Constructor & Destructor Documentation

◆ DataMgr()

Data_Namespace::DataMgr::DataMgr ( const std::string &  dataDir,
const SystemParameters system_parameters,
const bool  useGpus,
const int  numGpus,
const int  startGpu = 0,
const size_t  reservedGpuMem = (1 << 27),
const size_t  numReaderThreads = 0,
const DiskCacheConfig  cacheConfig = DiskCacheConfig() 
)

Definition at line 49 of file DataMgr.cpp.

References createTopLevelMetadata(), cudaMgr_, logger::ERROR, hasGpus_, LOG, populateMgrs(), and reservedGpuMem_.

57  : dataDir_(dataDir) {
58  if (useGpus) {
59  try {
60  cudaMgr_ = std::make_unique<CudaMgr_Namespace::CudaMgr>(numGpus, startGpu);
61  reservedGpuMem_ = reservedGpuMem;
62  hasGpus_ = true;
63  } catch (const std::exception& e) {
64  LOG(ERROR) << "Unable to instantiate CudaMgr, falling back to CPU-only mode. "
65  << e.what();
66  hasGpus_ = false;
67  }
68  } else {
69  hasGpus_ = false;
70  }
71 
72  populateMgrs(system_parameters, numReaderThreads, cache_config);
74 }
#define LOG(tag)
Definition: Logger.h:188
void populateMgrs(const SystemParameters &system_parameters, const size_t userSpecifiedNumReaderThreads, const DiskCacheConfig &cache_config)
Definition: DataMgr.cpp:176
void createTopLevelMetadata() const
Definition: DataMgr.cpp:283
std::unique_ptr< CudaMgr_Namespace::CudaMgr > cudaMgr_
Definition: DataMgr.h:241
std::string dataDir_
Definition: DataMgr.h:242
+ Here is the call graph for this function:

◆ ~DataMgr()

Data_Namespace::DataMgr::~DataMgr ( )

Definition at line 76 of file DataMgr.cpp.

References bufferMgrs_, and run_benchmark_import::level.

76  {
77  int numLevels = bufferMgrs_.size();
78  for (int level = numLevels - 1; level >= 0; --level) {
79  for (size_t device = 0; device < bufferMgrs_[level].size(); device++) {
80  delete bufferMgrs_[level][device];
81  }
82  }
83 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:240

Member Function Documentation

◆ alloc()

AbstractBuffer * Data_Namespace::DataMgr::alloc ( const MemoryLevel  memoryLevel,
const int  deviceId,
const size_t  numBytes 
)

Definition at line 452 of file DataMgr.cpp.

References buffer_access_mutex_, bufferMgrs_, CHECK_LT, run_benchmark_import::level, and levelSizes_.

Referenced by CudaAllocator::allocGpuAbstractBuffer().

454  {
455  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
456  const auto level = static_cast<int>(memoryLevel);
457  CHECK_LT(deviceId, levelSizes_[level]);
458  return bufferMgrs_[level][deviceId]->alloc(numBytes);
459 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:245
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:240
std::vector< int > levelSizes_
Definition: DataMgr.h:212
#define CHECK_LT(x, y)
Definition: Logger.h:207
+ Here is the caller graph for this function:

◆ checkpoint() [1/2]

void Data_Namespace::DataMgr::checkpoint ( const int  db_id,
const int  tb_id 
)

Definition at line 481 of file DataMgr.cpp.

References bufferMgrs_.

Referenced by Catalog_Namespace::Catalog::checkpoint().

481  {
482  // TODO(adb): do we need a buffer mgr lock here?
483  for (auto levelIt = bufferMgrs_.rbegin(); levelIt != bufferMgrs_.rend(); ++levelIt) {
484  // use reverse iterator so we start at GPU level, then CPU then DISK
485  for (auto deviceIt = levelIt->begin(); deviceIt != levelIt->end(); ++deviceIt) {
486  (*deviceIt)->checkpoint(db_id, tb_id);
487  }
488  }
489 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:240
+ Here is the caller graph for this function:

◆ checkpoint() [2/2]

void Data_Namespace::DataMgr::checkpoint ( )
private

Definition at line 491 of file DataMgr.cpp.

References bufferMgrs_.

Referenced by convertDB().

491  {
492  // TODO(adb): SAA
493  for (auto levelIt = bufferMgrs_.rbegin(); levelIt != bufferMgrs_.rend(); ++levelIt) {
494  // use reverse iterator so we start at GPU level, then CPU then DISK
495  for (auto deviceIt = levelIt->begin(); deviceIt != levelIt->end(); ++deviceIt) {
496  (*deviceIt)->checkpoint();
497  }
498  }
499 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:240
+ Here is the caller graph for this function:

◆ clearMemory()

void Data_Namespace::DataMgr::clearMemory ( const MemoryLevel  memLevel)

Definition at line 377 of file DataMgr.cpp.

References buffer_access_mutex_, bufferMgrs_, cudaMgr_, Data_Namespace::GPU_LEVEL, logger::INFO, and LOG.

Referenced by Executor::clearMemory().

377  {
378  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
379 
380  // if gpu we need to iterate through all the buffermanagers for each card
381  if (memLevel == MemoryLevel::GPU_LEVEL) {
382  if (cudaMgr_) {
383  int numGpus = cudaMgr_->getDeviceCount();
384  for (int gpuNum = 0; gpuNum < numGpus; ++gpuNum) {
385  LOG(INFO) << "clear slabs on gpu " << gpuNum;
386  bufferMgrs_[memLevel][gpuNum]->clearSlabs();
387  }
388  } else {
389  throw std::runtime_error("Unable to clear GPU memory: No GPUs detected");
390  }
391  } else {
392  bufferMgrs_[memLevel][0]->clearSlabs();
393  }
394 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:245
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:240
#define LOG(tag)
Definition: Logger.h:188
std::unique_ptr< CudaMgr_Namespace::CudaMgr > cudaMgr_
Definition: DataMgr.h:241
+ Here is the caller graph for this function:

◆ convertDB()

void Data_Namespace::DataMgr::convertDB ( const std::string  basePath)
private

Definition at line 253 of file DataMgr.cpp.

References bufferMgrs_, checkpoint(), logger::FATAL, File_Namespace::GlobalFileMgr::getDefaultPageSize(), getGlobalFileMgr(), logger::INFO, and LOG.

253  {
254  // no need for locking, as this is only called in the constructor
255 
256  /* check that "mapd_data" directory exists and it's empty */
257  std::string mapdDataPath(basePath + "/../mapd_data/");
258  boost::filesystem::path path(mapdDataPath);
259  if (boost::filesystem::exists(path)) {
260  if (!boost::filesystem::is_directory(path)) {
261  LOG(FATAL) << "Path to directory mapd_data to convert DB is not a directory.";
262  }
263  } else { // data directory does not exist
264  LOG(FATAL) << "Path to directory mapd_data to convert DB does not exist.";
265  }
266 
267  GlobalFileMgr* gfm;
268  gfm = dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])->getGlobalFileMgr();
269 
270  size_t defaultPageSize = gfm->getDefaultPageSize();
271  LOG(INFO) << "Database conversion started.";
272  FileMgr* fm_base_db =
273  new FileMgr(gfm,
274  defaultPageSize,
275  basePath); // this call also copies data into new DB structure
276  delete fm_base_db;
277 
278  /* write content of DB into newly created/converted DB structure & location */
279  checkpoint(); // outputs data files as well as metadata files
280  LOG(INFO) << "Database conversion completed.";
281 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:240
#define LOG(tag)
Definition: Logger.h:188
File_Namespace::GlobalFileMgr * getGlobalFileMgr() const
Definition: DataMgr.cpp:518
+ Here is the call graph for this function:

◆ copy()

void Data_Namespace::DataMgr::copy ( AbstractBuffer destBuffer,
AbstractBuffer srcBuffer 
)

Definition at line 467 of file DataMgr.cpp.

References Data_Namespace::AbstractBuffer::getDeviceId(), Data_Namespace::AbstractBuffer::getMemoryPtr(), Data_Namespace::AbstractBuffer::getType(), Data_Namespace::AbstractBuffer::size(), and Data_Namespace::AbstractBuffer::write().

467  {
468  destBuffer->write(srcBuffer->getMemoryPtr(),
469  srcBuffer->size(),
470  0,
471  srcBuffer->getType(),
472  srcBuffer->getDeviceId());
473 }
virtual int8_t * getMemoryPtr()=0
virtual MemoryLevel getType() const =0
virtual void write(int8_t *src, const size_t num_bytes, const size_t offset=0, const MemoryLevel src_buffer_type=CPU_LEVEL, const int src_device_id=-1)=0
+ Here is the call graph for this function:

◆ createChunkBuffer()

AbstractBuffer * Data_Namespace::DataMgr::createChunkBuffer ( const ChunkKey key,
const MemoryLevel  memoryLevel,
const int  deviceId = 0,
const size_t  page_size = 0 
)

Definition at line 408 of file DataMgr.cpp.

References buffer_access_mutex_, bufferMgrs_, and run_benchmark_import::level.

Referenced by Chunk_NS::Chunk::createChunkBuffer().

411  {
412  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
413  int level = static_cast<int>(memoryLevel);
414  return bufferMgrs_[level][deviceId]->createBuffer(key, page_size);
415 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:245
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:240
+ Here is the caller graph for this function:

◆ createTopLevelMetadata()

void Data_Namespace::DataMgr::createTopLevelMetadata ( ) const
private

Definition at line 283 of file DataMgr.cpp.

References bufferMgrs_, File_Namespace::GlobalFileMgr::getFileMgr(), and getGlobalFileMgr().

Referenced by DataMgr(), and resetPersistentStorage().

284  { // create metadata shared by all tables of all DBs
285  ChunkKey chunkKey(2);
286  chunkKey[0] = 0; // top level db_id
287  chunkKey[1] = 0; // top level tb_id
288 
289  GlobalFileMgr* gfm;
290  gfm = dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])->getGlobalFileMgr();
291 
292  auto fm_top = gfm->getFileMgr(chunkKey);
293  if (dynamic_cast<File_Namespace::FileMgr*>(fm_top)) {
294  static_cast<File_Namespace::FileMgr*>(fm_top)->createTopLevelMetadata();
295  }
296 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:240
void createTopLevelMetadata() const
Definition: DataMgr.cpp:283
AbstractBufferMgr * getFileMgr(const int db_id, const int tb_id)
File_Namespace::GlobalFileMgr * getGlobalFileMgr() const
Definition: DataMgr.cpp:518
std::vector< int > ChunkKey
Definition: types.h:37
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ deleteChunksWithPrefix() [1/2]

void Data_Namespace::DataMgr::deleteChunksWithPrefix ( const ChunkKey keyPrefix)

Definition at line 428 of file DataMgr.cpp.

References buffer_access_mutex_, bufferMgrs_, run_benchmark_import::level, and levelSizes_.

Referenced by UpdelRoll::commitUpdate().

428  {
429  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
430 
431  int numLevels = bufferMgrs_.size();
432  for (int level = numLevels - 1; level >= 0; --level) {
433  for (int device = 0; device < levelSizes_[level]; ++device) {
434  bufferMgrs_[level][device]->deleteBuffersWithPrefix(keyPrefix);
435  }
436  }
437 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:245
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:240
std::vector< int > levelSizes_
Definition: DataMgr.h:212
+ Here is the caller graph for this function:

◆ deleteChunksWithPrefix() [2/2]

void Data_Namespace::DataMgr::deleteChunksWithPrefix ( const ChunkKey keyPrefix,
const MemoryLevel  memLevel 
)

Definition at line 440 of file DataMgr.cpp.

References buffer_access_mutex_, bufferMgrs_, and levelSizes_.

441  {
442  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
443 
444  if (bufferMgrs_.size() <= memLevel) {
445  return;
446  }
447  for (int device = 0; device < levelSizes_[memLevel]; ++device) {
448  bufferMgrs_[memLevel][device]->deleteBuffersWithPrefix(keyPrefix);
449  }
450 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:245
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:240
std::vector< int > levelSizes_
Definition: DataMgr.h:212

◆ dumpLevel()

std::string Data_Namespace::DataMgr::dumpLevel ( const MemoryLevel  memLevel)

Definition at line 361 of file DataMgr.cpp.

References buffer_access_mutex_, bufferMgrs_, cudaMgr_, and Data_Namespace::GPU_LEVEL.

361  {
362  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
363 
364  // if gpu we need to iterate through all the buffermanagers for each card
365  if (memLevel == MemoryLevel::GPU_LEVEL) {
366  int numGpus = cudaMgr_->getDeviceCount();
367  std::ostringstream tss;
368  for (int gpuNum = 0; gpuNum < numGpus; ++gpuNum) {
369  tss << bufferMgrs_[memLevel][gpuNum]->printSlabs();
370  }
371  return tss.str();
372  } else {
373  return bufferMgrs_[memLevel][0]->printSlabs();
374  }
375 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:245
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:240
std::unique_ptr< CudaMgr_Namespace::CudaMgr > cudaMgr_
Definition: DataMgr.h:241

◆ free()

void Data_Namespace::DataMgr::free ( AbstractBuffer buffer)

Definition at line 461 of file DataMgr.cpp.

References buffer_access_mutex_, bufferMgrs_, Data_Namespace::AbstractBuffer::getDeviceId(), Data_Namespace::AbstractBuffer::getType(), and run_benchmark_import::level.

Referenced by ThrustAllocator::deallocate(), CudaAllocator::free(), CudaAllocator::freeGpuAbstractBuffer(), CudaAllocator::~CudaAllocator(), InValuesBitmap::~InValuesBitmap(), ResultSet::~ResultSet(), and ThrustAllocator::~ThrustAllocator().

461  {
462  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
463  int level = static_cast<int>(buffer->getType());
464  bufferMgrs_[level][buffer->getDeviceId()]->free(buffer);
465 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:245
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:240
virtual MemoryLevel getType() const =0
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ getChunkBuffer()

AbstractBuffer * Data_Namespace::DataMgr::getChunkBuffer ( const ChunkKey key,
const MemoryLevel  memoryLevel,
const int  deviceId = 0,
const size_t  numBytes = 0 
)

Definition at line 417 of file DataMgr.cpp.

References buffer_access_mutex_, bufferMgrs_, CHECK_LT, run_benchmark_import::level, and levelSizes_.

Referenced by Chunk_NS::Chunk::getChunkBuffer().

420  {
421  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
422  const auto level = static_cast<size_t>(memoryLevel);
423  CHECK_LT(level, levelSizes_.size()); // make sure we have a legit buffermgr
424  CHECK_LT(deviceId, levelSizes_[level]); // make sure we have a legit buffermgr
425  return bufferMgrs_[level][deviceId]->getBuffer(key, numBytes);
426 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:245
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:240
std::vector< int > levelSizes_
Definition: DataMgr.h:212
#define CHECK_LT(x, y)
Definition: Logger.h:207
+ Here is the caller graph for this function:

◆ getChunkMap()

const std::map<ChunkKey, File_Namespace::FileBuffer*>& Data_Namespace::DataMgr::getChunkMap ( )

◆ getChunkMetadataVecForKeyPrefix()

void Data_Namespace::DataMgr::getChunkMetadataVecForKeyPrefix ( ChunkMetadataVector chunkMetadataVec,
const ChunkKey keyPrefix 
)

Definition at line 403 of file DataMgr.cpp.

References bufferMgrs_.

404  {
405  bufferMgrs_[0][0]->getChunkMetadataVecForKeyPrefix(chunkMetadataVec, keyPrefix);
406 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:240

◆ getCudaMgr()

CudaMgr_Namespace::CudaMgr* Data_Namespace::DataMgr::getCudaMgr ( ) const
inline

Definition at line 208 of file DataMgr.h.

Referenced by Executor::blockSize(), copy_from_gpu(), copy_to_gpu(), CudaAllocator::copyFromDevice(), CudaAllocator::copyToDevice(), CudaAllocator::CudaAllocator(), Executor::deviceCount(), Executor::deviceCycles(), get_available_gpus(), Executor::gridSize(), Executor::interrupt(), Executor::isCPUOnly(), Executor::numBlocksPerMP(), ResultSet::ResultSet(), CudaAllocator::setDeviceMem(), Executor::warpSize(), and CudaAllocator::zeroDeviceMem().

208 { return cudaMgr_.get(); }
std::unique_ptr< CudaMgr_Namespace::CudaMgr > cudaMgr_
Definition: DataMgr.h:241
+ Here is the caller graph for this function:

◆ getGlobalFileMgr()

GlobalFileMgr * Data_Namespace::DataMgr::getGlobalFileMgr ( ) const

Definition at line 518 of file DataMgr.cpp.

References bufferMgrs_, and CHECK.

Referenced by convertDB(), createTopLevelMetadata(), Catalog_Namespace::Catalog::getTableDataDirectories(), getTableEpoch(), and setTableEpoch().

518  {
519  GlobalFileMgr* global_file_mgr;
520  global_file_mgr =
521  dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])->getGlobalFileMgr();
522  CHECK(global_file_mgr);
523  return global_file_mgr;
524 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:240
File_Namespace::GlobalFileMgr * getGlobalFileMgr() const
Definition: DataMgr.cpp:518
#define CHECK(condition)
Definition: Logger.h:197
+ Here is the caller graph for this function:

◆ getMemoryInfo()

std::vector< MemoryInfo > Data_Namespace::DataMgr::getMemoryInfo ( const MemoryLevel  memLevel)

Definition at line 298 of file DataMgr.cpp.

References buffer_access_mutex_, bufferMgrs_, CHECK, Data_Namespace::MemoryData::chunk_key, Data_Namespace::CPU_LEVEL, cudaMgr_, Buffer_Namespace::BufferMgr::getAllocated(), Buffer_Namespace::BufferMgr::getMaxSize(), Buffer_Namespace::BufferMgr::getPageSize(), Buffer_Namespace::BufferMgr::getSlabSegments(), Data_Namespace::GPU_LEVEL, hasGpus_, Data_Namespace::MemoryInfo::isAllocationCapped, Buffer_Namespace::BufferMgr::isAllocationCapped(), Data_Namespace::MemoryInfo::maxNumPages, Data_Namespace::MemoryData::memStatus, Data_Namespace::MemoryInfo::nodeMemoryData, Data_Namespace::MemoryInfo::numPageAllocated, Data_Namespace::MemoryData::numPages, Data_Namespace::MemoryInfo::pageSize, Data_Namespace::MemoryData::slabNum, Data_Namespace::MemoryData::startPage, and Data_Namespace::MemoryData::touch.

Referenced by Executor::createKernels().

298  {
299  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
300 
301  std::vector<MemoryInfo> mem_info;
302  if (memLevel == MemoryLevel::CPU_LEVEL) {
303  CpuBufferMgr* cpu_buffer =
304  dynamic_cast<CpuBufferMgr*>(bufferMgrs_[MemoryLevel::CPU_LEVEL][0]);
305  CHECK(cpu_buffer);
306  MemoryInfo mi;
307 
308  mi.pageSize = cpu_buffer->getPageSize();
309  mi.maxNumPages = cpu_buffer->getMaxSize() / mi.pageSize;
310  mi.isAllocationCapped = cpu_buffer->isAllocationCapped();
311  mi.numPageAllocated = cpu_buffer->getAllocated() / mi.pageSize;
312 
313  const auto& slab_segments = cpu_buffer->getSlabSegments();
314  for (size_t slab_num = 0; slab_num < slab_segments.size(); ++slab_num) {
315  for (auto segment : slab_segments[slab_num]) {
316  MemoryData md;
317  md.slabNum = slab_num;
318  md.startPage = segment.start_page;
319  md.numPages = segment.num_pages;
320  md.touch = segment.last_touched;
321  md.memStatus = segment.mem_status;
322  md.chunk_key.insert(
323  md.chunk_key.end(), segment.chunk_key.begin(), segment.chunk_key.end());
324  mi.nodeMemoryData.push_back(md);
325  }
326  }
327  mem_info.push_back(mi);
328  } else if (hasGpus_) {
329  int numGpus = cudaMgr_->getDeviceCount();
330  for (int gpuNum = 0; gpuNum < numGpus; ++gpuNum) {
331  GpuCudaBufferMgr* gpu_buffer =
332  dynamic_cast<GpuCudaBufferMgr*>(bufferMgrs_[MemoryLevel::GPU_LEVEL][gpuNum]);
333  CHECK(gpu_buffer);
334  MemoryInfo mi;
335 
336  mi.pageSize = gpu_buffer->getPageSize();
337  mi.maxNumPages = gpu_buffer->getMaxSize() / mi.pageSize;
338  mi.isAllocationCapped = gpu_buffer->isAllocationCapped();
339  mi.numPageAllocated = gpu_buffer->getAllocated() / mi.pageSize;
340 
341  const auto& slab_segments = gpu_buffer->getSlabSegments();
342  for (size_t slab_num = 0; slab_num < slab_segments.size(); ++slab_num) {
343  for (auto segment : slab_segments[slab_num]) {
344  MemoryData md;
345  md.slabNum = slab_num;
346  md.startPage = segment.start_page;
347  md.numPages = segment.num_pages;
348  md.touch = segment.last_touched;
349  md.chunk_key.insert(
350  md.chunk_key.end(), segment.chunk_key.begin(), segment.chunk_key.end());
351  md.memStatus = segment.mem_status;
352  mi.nodeMemoryData.push_back(md);
353  }
354  }
355  mem_info.push_back(mi);
356  }
357  }
358  return mem_info;
359 }
size_t getAllocated() override
Definition: BufferMgr.cpp:493
std::mutex buffer_access_mutex_
Definition: DataMgr.h:245
std::vector< MemoryData > nodeMemoryData
Definition: DataMgr.h:65
Buffer_Namespace::MemStatus memStatus
Definition: DataMgr.h:57
size_t getMaxSize() override
Definition: BufferMgr.cpp:488
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:240
bool isAllocationCapped() override
Definition: BufferMgr.cpp:498
std::unique_ptr< CudaMgr_Namespace::CudaMgr > cudaMgr_
Definition: DataMgr.h:241
const std::vector< BufferList > & getSlabSegments()
Definition: BufferMgr.cpp:868
#define CHECK(condition)
Definition: Logger.h:197
std::vector< int32_t > chunk_key
Definition: DataMgr.h:56
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ getPersistentStorageMgr()

PersistentStorageMgr * Data_Namespace::DataMgr::getPersistentStorageMgr ( ) const

Definition at line 541 of file DataMgr.cpp.

References bufferMgrs_.

541  {
542  return dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0]);
543 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:240

◆ getSystemMemoryUsage()

DataMgr::SystemMemoryUsage Data_Namespace::DataMgr::getSystemMemoryUsage ( ) const

Definition at line 85 of file DataMgr.cpp.

References Data_Namespace::DataMgr::SystemMemoryUsage::frag, Data_Namespace::DataMgr::SystemMemoryUsage::free, Data_Namespace::ProcBuddyinfoParser::getFragmentationPercent(), Data_Namespace::DataMgr::SystemMemoryUsage::regular, Data_Namespace::DataMgr::SystemMemoryUsage::resident, Data_Namespace::DataMgr::SystemMemoryUsage::shared, Data_Namespace::DataMgr::SystemMemoryUsage::total, and Data_Namespace::DataMgr::SystemMemoryUsage::vtotal.

85  {
86  SystemMemoryUsage usage;
87 
88 #ifdef __linux__
89 
90  // Determine Linux available memory and total memory.
91  // Available memory is different from free memory because
92  // when Linux sees free memory, it tries to use it for
93  // stuff like disk caching. However, the memory is not
94  // reserved and is still available to be allocated by
95  // user processes.
96  // Parsing /proc/meminfo for this info isn't very elegant
97  // but as a virtual file it should be reasonably fast.
98  // See also:
99  // https://github.com/torvalds/linux/commit/34e431b0ae398fc54ea69ff85ec700722c9da773
101  usage.free = mi["MemAvailable"];
102  usage.total = mi["MemTotal"];
103 
104  // Determine process memory in use.
105  // See also:
106  // https://stackoverflow.com/questions/669438/how-to-get-memory-usage-at-runtime-using-c
107  // http://man7.org/linux/man-pages/man5/proc.5.html
108  int64_t size = 0;
109  int64_t resident = 0;
110  int64_t shared = 0;
111 
112  std::ifstream fstatm("/proc/self/statm");
113  fstatm >> size >> resident >> shared;
114  fstatm.close();
115 
116  long page_size =
117  sysconf(_SC_PAGE_SIZE); // in case x86-64 is configured to use 2MB pages
118 
119  usage.resident = resident * page_size;
120  usage.vtotal = size * page_size;
121  usage.regular = (resident - shared) * page_size;
122  usage.shared = shared * page_size;
123 
125  usage.frag = bi.getFragmentationPercent();
126 
127 #else
128 
129  usage.total = 0;
130  usage.free = 0;
131  usage.resident = 0;
132  usage.vtotal = 0;
133  usage.regular = 0;
134  usage.shared = 0;
135  usage.frag = 0;
136 
137 #endif
138 
139  return usage;
140 }
Parse /proc/meminfo into key/value pairs.
Definition: DataMgr.h:69
Parse /proc/buddyinfo into a Fragmentation health score.
Definition: DataMgr.h:102
+ Here is the call graph for this function:

◆ getTableEpoch()

size_t Data_Namespace::DataMgr::getTableEpoch ( const int  db_id,
const int  tb_id 
)

Definition at line 512 of file DataMgr.cpp.

References bufferMgrs_, getGlobalFileMgr(), and File_Namespace::GlobalFileMgr::getTableEpoch().

512  {
513  GlobalFileMgr* gfm;
514  gfm = dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])->getGlobalFileMgr();
515  return gfm->getTableEpoch(db_id, tb_id);
516 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:240
size_t getTableEpoch(const int db_id, const int tb_id)
File_Namespace::GlobalFileMgr * getGlobalFileMgr() const
Definition: DataMgr.cpp:518
+ Here is the call graph for this function:

◆ getTotalSystemMemory()

size_t Data_Namespace::DataMgr::getTotalSystemMemory ( )
static

Definition at line 142 of file DataMgr.cpp.

Referenced by populateMgrs().

142  {
143 #ifdef __APPLE__
144  int mib[2];
145  size_t physical_memory;
146  size_t length;
147  // Get the Physical memory size
148  mib[0] = CTL_HW;
149  mib[1] = HW_MEMSIZE;
150  length = sizeof(size_t);
151  sysctl(mib, 2, &physical_memory, &length, NULL, 0);
152  return physical_memory;
153 
154 #else // Linux
155  long pages = sysconf(_SC_PHYS_PAGES);
156  long page_size = sysconf(_SC_PAGE_SIZE);
157  return pages * page_size;
158 #endif
159 }
+ Here is the caller graph for this function:

◆ gpusPresent()

bool Data_Namespace::DataMgr::gpusPresent ( )
inline

Definition at line 203 of file DataMgr.h.

Referenced by get_available_gpus().

203 { return hasGpus_; }
+ Here is the caller graph for this function:

◆ isBufferOnDevice()

bool Data_Namespace::DataMgr::isBufferOnDevice ( const ChunkKey key,
const MemoryLevel  memLevel,
const int  deviceId 
)

Definition at line 396 of file DataMgr.cpp.

References buffer_access_mutex_, and bufferMgrs_.

Referenced by Chunk_NS::Chunk::isChunkOnDevice().

398  {
399  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
400  return bufferMgrs_[memLevel][deviceId]->isBufferOnDevice(key);
401 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:245
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:240
+ Here is the caller graph for this function:

◆ populateMgrs()

void Data_Namespace::DataMgr::populateMgrs ( const SystemParameters system_parameters,
const size_t  userSpecifiedNumReaderThreads,
const DiskCacheConfig cache_config 
)
private

Definition at line 176 of file DataMgr.cpp.

References bufferMgrs_, SystemParameters::cpu_buffer_mem_bytes, PersistentStorageMgr::createPersistentStorageMgr(), cudaMgr_, dataDir_, getTotalSystemMemory(), SystemParameters::gpu_buffer_mem_bytes, hasGpus_, logger::INFO, levelSizes_, LOG, SystemParameters::max_cpu_slab_size, SystemParameters::max_gpu_slab_size, SystemParameters::min_cpu_slab_size, SystemParameters::min_gpu_slab_size, reservedGpuMem_, and VLOG.

Referenced by DataMgr(), and resetPersistentStorage().

178  {
179  // no need for locking, as this is only called in the constructor
180  bufferMgrs_.resize(2);
182  dataDir_, userSpecifiedNumReaderThreads, cache_config));
183 
184  levelSizes_.push_back(1);
185  size_t page_size{512};
186  size_t cpuBufferSize = system_parameters.cpu_buffer_mem_bytes;
187  if (cpuBufferSize == 0) { // if size is not specified
188  const auto total_system_memory = getTotalSystemMemory();
189  VLOG(1) << "Detected " << (float)total_system_memory / (1024 * 1024)
190  << "M of total system memory.";
191  cpuBufferSize = total_system_memory *
192  0.8; // should get free memory instead of this ugly heuristic
193  }
194  size_t minCpuSlabSize = std::min(system_parameters.min_cpu_slab_size, cpuBufferSize);
195  minCpuSlabSize = (minCpuSlabSize / page_size) * page_size;
196  size_t maxCpuSlabSize = std::min(system_parameters.max_cpu_slab_size, cpuBufferSize);
197  maxCpuSlabSize = (maxCpuSlabSize / page_size) * page_size;
198  LOG(INFO) << "Min CPU Slab Size is " << (float)minCpuSlabSize / (1024 * 1024) << "MB";
199  LOG(INFO) << "Max CPU Slab Size is " << (float)maxCpuSlabSize / (1024 * 1024) << "MB";
200  LOG(INFO) << "Max memory pool size for CPU is " << (float)cpuBufferSize / (1024 * 1024)
201  << "MB";
202  if (hasGpus_) {
203  LOG(INFO) << "Reserved GPU memory is " << (float)reservedGpuMem_ / (1024 * 1024)
204  << "MB includes render buffer allocation";
205  bufferMgrs_.resize(3);
206  bufferMgrs_[1].push_back(new CpuBufferMgr(0,
207  cpuBufferSize,
208  cudaMgr_.get(),
209  minCpuSlabSize,
210  maxCpuSlabSize,
211  page_size,
212  bufferMgrs_[0][0]));
213  levelSizes_.push_back(1);
214  int numGpus = cudaMgr_->getDeviceCount();
215  for (int gpuNum = 0; gpuNum < numGpus; ++gpuNum) {
216  size_t gpuMaxMemSize =
217  system_parameters.gpu_buffer_mem_bytes != 0
218  ? system_parameters.gpu_buffer_mem_bytes
219  : (cudaMgr_->getDeviceProperties(gpuNum)->globalMem) - (reservedGpuMem_);
220  size_t minGpuSlabSize =
221  std::min(system_parameters.min_gpu_slab_size, gpuMaxMemSize);
222  minGpuSlabSize = (minGpuSlabSize / page_size) * page_size;
223  size_t maxGpuSlabSize =
224  std::min(system_parameters.max_gpu_slab_size, gpuMaxMemSize);
225  maxGpuSlabSize = (maxGpuSlabSize / page_size) * page_size;
226  LOG(INFO) << "Min GPU Slab size for GPU " << gpuNum << " is "
227  << (float)minGpuSlabSize / (1024 * 1024) << "MB";
228  LOG(INFO) << "Max GPU Slab size for GPU " << gpuNum << " is "
229  << (float)maxGpuSlabSize / (1024 * 1024) << "MB";
230  LOG(INFO) << "Max memory pool size for GPU " << gpuNum << " is "
231  << (float)gpuMaxMemSize / (1024 * 1024) << "MB";
232  bufferMgrs_[2].push_back(new GpuCudaBufferMgr(gpuNum,
233  gpuMaxMemSize,
234  cudaMgr_.get(),
235  minGpuSlabSize,
236  maxGpuSlabSize,
237  page_size,
238  bufferMgrs_[1][0]));
239  }
240  levelSizes_.push_back(numGpus);
241  } else {
242  bufferMgrs_[1].push_back(new CpuBufferMgr(0,
243  cpuBufferSize,
244  cudaMgr_.get(),
245  minCpuSlabSize,
246  maxCpuSlabSize,
247  page_size,
248  bufferMgrs_[0][0]));
249  levelSizes_.push_back(1);
250  }
251 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:240
std::vector< int > levelSizes_
Definition: DataMgr.h:212
#define LOG(tag)
Definition: Logger.h:188
static size_t getTotalSystemMemory()
Definition: DataMgr.cpp:142
std::unique_ptr< CudaMgr_Namespace::CudaMgr > cudaMgr_
Definition: DataMgr.h:241
static PersistentStorageMgr * createPersistentStorageMgr(const std::string &data_dir, const size_t num_reader_threads, const DiskCacheConfig &disk_cache_config)
#define VLOG(n)
Definition: Logger.h:291
std::string dataDir_
Definition: DataMgr.h:242
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ removeTableRelatedDS()

void Data_Namespace::DataMgr::removeTableRelatedDS ( const int  db_id,
const int  tb_id 
)

Definition at line 501 of file DataMgr.cpp.

References buffer_access_mutex_, and bufferMgrs_.

501  {
502  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
503  bufferMgrs_[0][0]->removeTableRelatedDS(db_id, tb_id);
504 }
std::mutex buffer_access_mutex_
Definition: DataMgr.h:245
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:240

◆ resetPersistentStorage()

void Data_Namespace::DataMgr::resetPersistentStorage ( const DiskCacheConfig cache_config,
const size_t  num_reader_threads,
const SystemParameters sys_params 
)

Definition at line 162 of file DataMgr.cpp.

References bufferMgrs_, createTopLevelMetadata(), run_benchmark_import::level, and populateMgrs().

164  {
165  int numLevels = bufferMgrs_.size();
166  for (int level = numLevels - 1; level >= 0; --level) {
167  for (size_t device = 0; device < bufferMgrs_[level].size(); device++) {
168  delete bufferMgrs_[level][device];
169  }
170  }
171  bufferMgrs_.clear();
172  populateMgrs(sys_params, num_reader_threads, cache_config);
174 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:240
void populateMgrs(const SystemParameters &system_parameters, const size_t userSpecifiedNumReaderThreads, const DiskCacheConfig &cache_config)
Definition: DataMgr.cpp:176
void createTopLevelMetadata() const
Definition: DataMgr.cpp:283
+ Here is the call graph for this function:

◆ setTableEpoch()

void Data_Namespace::DataMgr::setTableEpoch ( const int  db_id,
const int  tb_id,
const int  start_epoch 
)

Definition at line 506 of file DataMgr.cpp.

References bufferMgrs_, getGlobalFileMgr(), and File_Namespace::GlobalFileMgr::setTableEpoch().

506  {
507  GlobalFileMgr* gfm;
508  gfm = dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])->getGlobalFileMgr();
509  gfm->setTableEpoch(db_id, tb_id, start_epoch);
510 }
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:240
void setTableEpoch(const int db_id, const int tb_id, const int start_epoch)
File_Namespace::GlobalFileMgr * getGlobalFileMgr() const
Definition: DataMgr.cpp:518
+ Here is the call graph for this function:

Friends And Related Function Documentation

◆ GlobalFileMgr

friend class GlobalFileMgr
friend

Definition at line 161 of file DataMgr.h.

Member Data Documentation

◆ buffer_access_mutex_

std::mutex Data_Namespace::DataMgr::buffer_access_mutex_
private

◆ bufferMgrs_

◆ cudaMgr_

std::unique_ptr<CudaMgr_Namespace::CudaMgr> Data_Namespace::DataMgr::cudaMgr_
private

Definition at line 241 of file DataMgr.h.

Referenced by clearMemory(), DataMgr(), dumpLevel(), getMemoryInfo(), and populateMgrs().

◆ dataDir_

std::string Data_Namespace::DataMgr::dataDir_
private

Definition at line 242 of file DataMgr.h.

Referenced by populateMgrs().

◆ hasGpus_

bool Data_Namespace::DataMgr::hasGpus_
private

Definition at line 243 of file DataMgr.h.

Referenced by DataMgr(), getMemoryInfo(), and populateMgrs().

◆ levelSizes_

std::vector<int> Data_Namespace::DataMgr::levelSizes_

Definition at line 212 of file DataMgr.h.

Referenced by alloc(), deleteChunksWithPrefix(), getChunkBuffer(), and populateMgrs().

◆ reservedGpuMem_

size_t Data_Namespace::DataMgr::reservedGpuMem_
private

Definition at line 244 of file DataMgr.h.

Referenced by DataMgr(), and populateMgrs().


The documentation for this class was generated from the following files: