OmniSciDB  8a228a1076
DataMgr.cpp
Go to the documentation of this file.
1 /*
2  * Copyright 2020 OmniSci, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
22 #include "DataMgr/DataMgr.h"
25 #include "CudaMgr/CudaMgr.h"
26 #include "FileMgr/GlobalFileMgr.h"
28 
29 #ifdef __APPLE__
30 #include <sys/sysctl.h>
31 #include <sys/types.h>
32 #else
33 #include <unistd.h>
34 #endif
35 
36 #include <boost/filesystem.hpp>
37 
38 #include <algorithm>
39 #include <limits>
40 
41 using namespace std;
42 using namespace Buffer_Namespace;
43 using namespace File_Namespace;
44 
45 extern bool g_enable_fsi;
46 
47 namespace Data_Namespace {
48 
49 DataMgr::DataMgr(const string& dataDir,
50  const SystemParameters& system_parameters,
51  const bool useGpus,
52  const int numGpus,
53  const int startGpu,
54  const size_t reservedGpuMem,
55  const size_t numReaderThreads,
56  const DiskCacheConfig cache_config)
57  : dataDir_(dataDir) {
58  if (useGpus) {
59  try {
60  cudaMgr_ = std::make_unique<CudaMgr_Namespace::CudaMgr>(numGpus, startGpu);
61  reservedGpuMem_ = reservedGpuMem;
62  hasGpus_ = true;
63  } catch (const std::exception& e) {
64  LOG(ERROR) << "Unable to instantiate CudaMgr, falling back to CPU-only mode. "
65  << e.what();
66  hasGpus_ = false;
67  }
68  } else {
69  hasGpus_ = false;
70  }
71 
72  populateMgrs(system_parameters, numReaderThreads, cache_config);
74 }
75 
77  int numLevels = bufferMgrs_.size();
78  for (int level = numLevels - 1; level >= 0; --level) {
79  for (size_t device = 0; device < bufferMgrs_[level].size(); device++) {
80  delete bufferMgrs_[level][device];
81  }
82  }
83 }
84 
86  SystemMemoryUsage usage;
87 
88 #ifdef __linux__
89 
90  // Determine Linux available memory and total memory.
91  // Available memory is different from free memory because
92  // when Linux sees free memory, it tries to use it for
93  // stuff like disk caching. However, the memory is not
94  // reserved and is still available to be allocated by
95  // user processes.
96  // Parsing /proc/meminfo for this info isn't very elegant
97  // but as a virtual file it should be reasonably fast.
98  // See also:
99  // https://github.com/torvalds/linux/commit/34e431b0ae398fc54ea69ff85ec700722c9da773
101  usage.free = mi["MemAvailable"];
102  usage.total = mi["MemTotal"];
103 
104  // Determine process memory in use.
105  // See also:
106  // https://stackoverflow.com/questions/669438/how-to-get-memory-usage-at-runtime-using-c
107  // http://man7.org/linux/man-pages/man5/proc.5.html
108  int64_t size = 0;
109  int64_t resident = 0;
110  int64_t shared = 0;
111 
112  std::ifstream fstatm("/proc/self/statm");
113  fstatm >> size >> resident >> shared;
114  fstatm.close();
115 
116  long page_size =
117  sysconf(_SC_PAGE_SIZE); // in case x86-64 is configured to use 2MB pages
118 
119  usage.resident = resident * page_size;
120  usage.vtotal = size * page_size;
121  usage.regular = (resident - shared) * page_size;
122  usage.shared = shared * page_size;
123 
125  usage.frag = bi.getFragmentationPercent();
126 
127 #else
128 
129  usage.total = 0;
130  usage.free = 0;
131  usage.resident = 0;
132  usage.vtotal = 0;
133  usage.regular = 0;
134  usage.shared = 0;
135  usage.frag = 0;
136 
137 #endif
138 
139  return usage;
140 }
141 
143 #ifdef __APPLE__
144  int mib[2];
145  size_t physical_memory;
146  size_t length;
147  // Get the Physical memory size
148  mib[0] = CTL_HW;
149  mib[1] = HW_MEMSIZE;
150  length = sizeof(size_t);
151  sysctl(mib, 2, &physical_memory, &length, NULL, 0);
152  return physical_memory;
153 
154 #else // Linux
155  long pages = sysconf(_SC_PHYS_PAGES);
156  long page_size = sysconf(_SC_PAGE_SIZE);
157  return pages * page_size;
158 #endif
159 }
160 
161 // This function exists for testing purposes so that we can test a reset of the cache.
163  const size_t num_reader_threads,
164  const SystemParameters& sys_params) {
165  int numLevels = bufferMgrs_.size();
166  for (int level = numLevels - 1; level >= 0; --level) {
167  for (size_t device = 0; device < bufferMgrs_[level].size(); device++) {
168  delete bufferMgrs_[level][device];
169  }
170  }
171  bufferMgrs_.clear();
172  populateMgrs(sys_params, num_reader_threads, cache_config);
174 }
175 
176 void DataMgr::populateMgrs(const SystemParameters& system_parameters,
177  const size_t userSpecifiedNumReaderThreads,
178  const DiskCacheConfig& cache_config) {
179  // no need for locking, as this is only called in the constructor
180  bufferMgrs_.resize(2);
181  if (g_enable_fsi) {
182  bufferMgrs_[0].push_back(
183  new PersistentStorageMgr(dataDir_, userSpecifiedNumReaderThreads, cache_config));
184  } else {
185  bufferMgrs_[0].push_back(
186  new GlobalFileMgr(0, dataDir_, userSpecifiedNumReaderThreads));
187  }
188  levelSizes_.push_back(1);
189  size_t page_size{512};
190  size_t cpuBufferSize = system_parameters.cpu_buffer_mem_bytes;
191  if (cpuBufferSize == 0) { // if size is not specified
192  const auto total_system_memory = getTotalSystemMemory();
193  VLOG(1) << "Detected " << (float)total_system_memory / (1024 * 1024)
194  << "M of total system memory.";
195  cpuBufferSize = total_system_memory *
196  0.8; // should get free memory instead of this ugly heuristic
197  }
198  size_t minCpuSlabSize = std::min(system_parameters.min_cpu_slab_size, cpuBufferSize);
199  minCpuSlabSize = (minCpuSlabSize / page_size) * page_size;
200  size_t maxCpuSlabSize = std::min(system_parameters.max_cpu_slab_size, cpuBufferSize);
201  maxCpuSlabSize = (maxCpuSlabSize / page_size) * page_size;
202  LOG(INFO) << "Min CPU Slab Size is " << (float)minCpuSlabSize / (1024 * 1024) << "MB";
203  LOG(INFO) << "Max CPU Slab Size is " << (float)maxCpuSlabSize / (1024 * 1024) << "MB";
204  LOG(INFO) << "Max memory pool size for CPU is " << (float)cpuBufferSize / (1024 * 1024)
205  << "MB";
206  if (hasGpus_) {
207  LOG(INFO) << "Reserved GPU memory is " << (float)reservedGpuMem_ / (1024 * 1024)
208  << "MB includes render buffer allocation";
209  bufferMgrs_.resize(3);
210  bufferMgrs_[1].push_back(new CpuBufferMgr(0,
211  cpuBufferSize,
212  cudaMgr_.get(),
213  minCpuSlabSize,
214  maxCpuSlabSize,
215  page_size,
216  bufferMgrs_[0][0]));
217  levelSizes_.push_back(1);
218  int numGpus = cudaMgr_->getDeviceCount();
219  for (int gpuNum = 0; gpuNum < numGpus; ++gpuNum) {
220  size_t gpuMaxMemSize =
221  system_parameters.gpu_buffer_mem_bytes != 0
222  ? system_parameters.gpu_buffer_mem_bytes
223  : (cudaMgr_->getDeviceProperties(gpuNum)->globalMem) - (reservedGpuMem_);
224  size_t minGpuSlabSize =
225  std::min(system_parameters.min_gpu_slab_size, gpuMaxMemSize);
226  minGpuSlabSize = (minGpuSlabSize / page_size) * page_size;
227  size_t maxGpuSlabSize =
228  std::min(system_parameters.max_gpu_slab_size, gpuMaxMemSize);
229  maxGpuSlabSize = (maxGpuSlabSize / page_size) * page_size;
230  LOG(INFO) << "Min GPU Slab size for GPU " << gpuNum << " is "
231  << (float)minGpuSlabSize / (1024 * 1024) << "MB";
232  LOG(INFO) << "Max GPU Slab size for GPU " << gpuNum << " is "
233  << (float)maxGpuSlabSize / (1024 * 1024) << "MB";
234  LOG(INFO) << "Max memory pool size for GPU " << gpuNum << " is "
235  << (float)gpuMaxMemSize / (1024 * 1024) << "MB";
236  bufferMgrs_[2].push_back(new GpuCudaBufferMgr(gpuNum,
237  gpuMaxMemSize,
238  cudaMgr_.get(),
239  minGpuSlabSize,
240  maxGpuSlabSize,
241  page_size,
242  bufferMgrs_[1][0]));
243  }
244  levelSizes_.push_back(numGpus);
245  } else {
246  bufferMgrs_[1].push_back(new CpuBufferMgr(0,
247  cpuBufferSize,
248  cudaMgr_.get(),
249  minCpuSlabSize,
250  maxCpuSlabSize,
251  page_size,
252  bufferMgrs_[0][0]));
253  levelSizes_.push_back(1);
254  }
255 }
256 
257 void DataMgr::convertDB(const std::string basePath) {
258  // no need for locking, as this is only called in the constructor
259 
260  /* check that "mapd_data" directory exists and it's empty */
261  std::string mapdDataPath(basePath + "/../mapd_data/");
262  boost::filesystem::path path(mapdDataPath);
263  if (boost::filesystem::exists(path)) {
264  if (!boost::filesystem::is_directory(path)) {
265  LOG(FATAL) << "Path to directory mapd_data to convert DB is not a directory.";
266  }
267  } else { // data directory does not exist
268  LOG(FATAL) << "Path to directory mapd_data to convert DB does not exist.";
269  }
270 
271  GlobalFileMgr* gfm;
272  if (g_enable_fsi) {
273  gfm = dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])->getGlobalFileMgr();
274  } else {
275  gfm = dynamic_cast<GlobalFileMgr*>(bufferMgrs_[0][0]);
276  }
277  size_t defaultPageSize = gfm->getDefaultPageSize();
278  LOG(INFO) << "Database conversion started.";
279  FileMgr* fm_base_db =
280  new FileMgr(gfm,
281  defaultPageSize,
282  basePath); // this call also copies data into new DB structure
283  delete fm_base_db;
284 
285  /* write content of DB into newly created/converted DB structure & location */
286  checkpoint(); // outputs data files as well as metadata files
287  LOG(INFO) << "Database conversion completed.";
288 }
289 
291  const { // create metadata shared by all tables of all DBs
292  ChunkKey chunkKey(2);
293  chunkKey[0] = 0; // top level db_id
294  chunkKey[1] = 0; // top level tb_id
295 
296  GlobalFileMgr* gfm;
297  if (g_enable_fsi) {
298  gfm = dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])->getGlobalFileMgr();
299  } else {
300  gfm = dynamic_cast<GlobalFileMgr*>(bufferMgrs_[0][0]);
301  }
302  auto fm_top = gfm->getFileMgr(chunkKey);
303  if (dynamic_cast<File_Namespace::FileMgr*>(fm_top)) {
304  static_cast<File_Namespace::FileMgr*>(fm_top)->createTopLevelMetadata();
305  }
306 }
307 
308 std::vector<MemoryInfo> DataMgr::getMemoryInfo(const MemoryLevel memLevel) {
309  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
310 
311  std::vector<MemoryInfo> mem_info;
312  if (memLevel == MemoryLevel::CPU_LEVEL) {
313  CpuBufferMgr* cpu_buffer =
314  dynamic_cast<CpuBufferMgr*>(bufferMgrs_[MemoryLevel::CPU_LEVEL][0]);
315  CHECK(cpu_buffer);
316  MemoryInfo mi;
317 
318  mi.pageSize = cpu_buffer->getPageSize();
319  mi.maxNumPages = cpu_buffer->getMaxSize() / mi.pageSize;
320  mi.isAllocationCapped = cpu_buffer->isAllocationCapped();
321  mi.numPageAllocated = cpu_buffer->getAllocated() / mi.pageSize;
322 
323  const auto& slab_segments = cpu_buffer->getSlabSegments();
324  for (size_t slab_num = 0; slab_num < slab_segments.size(); ++slab_num) {
325  for (auto segment : slab_segments[slab_num]) {
326  MemoryData md;
327  md.slabNum = slab_num;
328  md.startPage = segment.start_page;
329  md.numPages = segment.num_pages;
330  md.touch = segment.last_touched;
331  md.memStatus = segment.mem_status;
332  md.chunk_key.insert(
333  md.chunk_key.end(), segment.chunk_key.begin(), segment.chunk_key.end());
334  mi.nodeMemoryData.push_back(md);
335  }
336  }
337  mem_info.push_back(mi);
338  } else if (hasGpus_) {
339  int numGpus = cudaMgr_->getDeviceCount();
340  for (int gpuNum = 0; gpuNum < numGpus; ++gpuNum) {
341  GpuCudaBufferMgr* gpu_buffer =
342  dynamic_cast<GpuCudaBufferMgr*>(bufferMgrs_[MemoryLevel::GPU_LEVEL][gpuNum]);
343  CHECK(gpu_buffer);
344  MemoryInfo mi;
345 
346  mi.pageSize = gpu_buffer->getPageSize();
347  mi.maxNumPages = gpu_buffer->getMaxSize() / mi.pageSize;
348  mi.isAllocationCapped = gpu_buffer->isAllocationCapped();
349  mi.numPageAllocated = gpu_buffer->getAllocated() / mi.pageSize;
350 
351  const auto& slab_segments = gpu_buffer->getSlabSegments();
352  for (size_t slab_num = 0; slab_num < slab_segments.size(); ++slab_num) {
353  for (auto segment : slab_segments[slab_num]) {
354  MemoryData md;
355  md.slabNum = slab_num;
356  md.startPage = segment.start_page;
357  md.numPages = segment.num_pages;
358  md.touch = segment.last_touched;
359  md.chunk_key.insert(
360  md.chunk_key.end(), segment.chunk_key.begin(), segment.chunk_key.end());
361  md.memStatus = segment.mem_status;
362  mi.nodeMemoryData.push_back(md);
363  }
364  }
365  mem_info.push_back(mi);
366  }
367  }
368  return mem_info;
369 }
370 
371 std::string DataMgr::dumpLevel(const MemoryLevel memLevel) {
372  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
373 
374  // if gpu we need to iterate through all the buffermanagers for each card
375  if (memLevel == MemoryLevel::GPU_LEVEL) {
376  int numGpus = cudaMgr_->getDeviceCount();
377  std::ostringstream tss;
378  for (int gpuNum = 0; gpuNum < numGpus; ++gpuNum) {
379  tss << bufferMgrs_[memLevel][gpuNum]->printSlabs();
380  }
381  return tss.str();
382  } else {
383  return bufferMgrs_[memLevel][0]->printSlabs();
384  }
385 }
386 
387 void DataMgr::clearMemory(const MemoryLevel memLevel) {
388  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
389 
390  // if gpu we need to iterate through all the buffermanagers for each card
391  if (memLevel == MemoryLevel::GPU_LEVEL) {
392  if (cudaMgr_) {
393  int numGpus = cudaMgr_->getDeviceCount();
394  for (int gpuNum = 0; gpuNum < numGpus; ++gpuNum) {
395  LOG(INFO) << "clear slabs on gpu " << gpuNum;
396  bufferMgrs_[memLevel][gpuNum]->clearSlabs();
397  }
398  } else {
399  throw std::runtime_error("Unable to clear GPU memory: No GPUs detected");
400  }
401  } else {
402  bufferMgrs_[memLevel][0]->clearSlabs();
403  }
404 }
405 
407  const MemoryLevel memLevel,
408  const int deviceId) {
409  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
410  return bufferMgrs_[memLevel][deviceId]->isBufferOnDevice(key);
411 }
412 
414  // Can we always assume this will just be at the disklevel bc we just
415  // started?
416  // access to this object is locked by the file mgr
417  bufferMgrs_[0][0]->getChunkMetadataVec(chunkMetadataVec);
418 }
419 
421  const ChunkKey& keyPrefix) {
422  bufferMgrs_[0][0]->getChunkMetadataVecForKeyPrefix(chunkMetadataVec, keyPrefix);
423 }
424 
426  const MemoryLevel memoryLevel,
427  const int deviceId,
428  const size_t page_size) {
429  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
430  int level = static_cast<int>(memoryLevel);
431  return bufferMgrs_[level][deviceId]->createBuffer(key, page_size);
432 }
433 
435  const MemoryLevel memoryLevel,
436  const int deviceId,
437  const size_t numBytes) {
438  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
439  const auto level = static_cast<size_t>(memoryLevel);
440  CHECK_LT(level, levelSizes_.size()); // make sure we have a legit buffermgr
441  CHECK_LT(deviceId, levelSizes_[level]); // make sure we have a legit buffermgr
442  return bufferMgrs_[level][deviceId]->getBuffer(key, numBytes);
443 }
444 
446  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
447 
448  int numLevels = bufferMgrs_.size();
449  for (int level = numLevels - 1; level >= 0; --level) {
450  for (int device = 0; device < levelSizes_[level]; ++device) {
451  bufferMgrs_[level][device]->deleteBuffersWithPrefix(keyPrefix);
452  }
453  }
454 }
455 
456 // only deletes the chunks at the given memory level
458  const MemoryLevel memLevel) {
459  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
460 
461  if (bufferMgrs_.size() <= memLevel) {
462  return;
463  }
464  for (int device = 0; device < levelSizes_[memLevel]; ++device) {
465  bufferMgrs_[memLevel][device]->deleteBuffersWithPrefix(keyPrefix);
466  }
467 }
468 
470  const int deviceId,
471  const size_t numBytes) {
472  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
473  const auto level = static_cast<int>(memoryLevel);
474  CHECK_LT(deviceId, levelSizes_[level]);
475  return bufferMgrs_[level][deviceId]->alloc(numBytes);
476 }
477 
479  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
480  int level = static_cast<int>(buffer->getType());
481  bufferMgrs_[level][buffer->getDeviceId()]->free(buffer);
482 }
483 
484 void DataMgr::copy(AbstractBuffer* destBuffer, AbstractBuffer* srcBuffer) {
485  destBuffer->write(srcBuffer->getMemoryPtr(),
486  srcBuffer->size(),
487  0,
488  srcBuffer->getType(),
489  srcBuffer->getDeviceId());
490 }
491 
492 // could add function below to do arbitrary copies between buffers
493 
494 // void DataMgr::copy(AbstractBuffer *destBuffer, const AbstractBuffer *srcBuffer, const
495 // size_t numBytes, const size_t destOffset, const size_t srcOffset) {
496 //} /
497 
498 void DataMgr::checkpoint(const int db_id, const int tb_id) {
499  // TODO(adb): do we need a buffer mgr lock here?
500  for (auto levelIt = bufferMgrs_.rbegin(); levelIt != bufferMgrs_.rend(); ++levelIt) {
501  // use reverse iterator so we start at GPU level, then CPU then DISK
502  for (auto deviceIt = levelIt->begin(); deviceIt != levelIt->end(); ++deviceIt) {
503  (*deviceIt)->checkpoint(db_id, tb_id);
504  }
505  }
506 }
507 
509  // TODO(adb): SAA
510  for (auto levelIt = bufferMgrs_.rbegin(); levelIt != bufferMgrs_.rend(); ++levelIt) {
511  // use reverse iterator so we start at GPU level, then CPU then DISK
512  for (auto deviceIt = levelIt->begin(); deviceIt != levelIt->end(); ++deviceIt) {
513  (*deviceIt)->checkpoint();
514  }
515  }
516 }
517 
518 void DataMgr::removeTableRelatedDS(const int db_id, const int tb_id) {
519  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
520  bufferMgrs_[0][0]->removeTableRelatedDS(db_id, tb_id);
521 }
522 
523 void DataMgr::setTableEpoch(const int db_id, const int tb_id, const int start_epoch) {
524  GlobalFileMgr* gfm;
525  if (g_enable_fsi) {
526  gfm = dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])->getGlobalFileMgr();
527  } else {
528  gfm = dynamic_cast<GlobalFileMgr*>(bufferMgrs_[0][0]);
529  }
530  gfm->setTableEpoch(db_id, tb_id, start_epoch);
531 }
532 
533 size_t DataMgr::getTableEpoch(const int db_id, const int tb_id) {
534  GlobalFileMgr* gfm;
535  if (g_enable_fsi) {
536  gfm = dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])->getGlobalFileMgr();
537  } else {
538  gfm = dynamic_cast<GlobalFileMgr*>(bufferMgrs_[0][0]);
539  }
540  return gfm->getTableEpoch(db_id, tb_id);
541 }
542 
544  GlobalFileMgr* global_file_mgr;
545  if (g_enable_fsi) {
546  global_file_mgr =
547  dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])->getGlobalFileMgr();
548  } else {
549  global_file_mgr = dynamic_cast<GlobalFileMgr*>(bufferMgrs_[0][0]);
550  }
551  CHECK(global_file_mgr);
552  return global_file_mgr;
553 }
554 
555 std::ostream& operator<<(std::ostream& os, const DataMgr::SystemMemoryUsage& mem_info) {
556  os << "jsonlog ";
557  os << "{";
558  os << " \"name\": \"CPU Memory Info\",";
559  os << " \"TotalMB\": " << mem_info.total / (1024. * 1024.) << ",";
560  os << " \"FreeMB\": " << mem_info.free / (1024. * 1024.) << ",";
561  os << " \"ProcessMB\": " << mem_info.resident / (1024. * 1024.) << ",";
562  os << " \"VirtualMB\": " << mem_info.vtotal / (1024. * 1024.) << ",";
563  os << " \"ProcessPlusSwapMB\": " << mem_info.regular / (1024. * 1024.) << ",";
564  os << " \"ProcessSharedMB\": " << mem_info.shared / (1024. * 1024.) << ",";
565  os << " \"FragmentationPercent\": " << mem_info.frag;
566  os << " }";
567  return os;
568 }
569 
571  if (g_enable_fsi) {
572  return dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])->getForeignStorageMgr();
573  }
574  return nullptr;
575 }
576 
577 } // namespace Data_Namespace
size_t getAllocated() override
Definition: BufferMgr.cpp:493
std::mutex buffer_access_mutex_
Definition: DataMgr.h:246
std::vector< MemoryData > nodeMemoryData
Definition: DataMgr.h:65
Buffer_Namespace::MemStatus memStatus
Definition: DataMgr.h:57
size_t getMaxSize() override
Definition: BufferMgr.cpp:488
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:241
std::vector< int > levelSizes_
Definition: DataMgr.h:213
std::ostream & operator<<(std::ostream &os, const DataMgr::SystemMemoryUsage &mem_info)
Definition: DataMgr.cpp:555
#define LOG(tag)
Definition: Logger.h:188
void populateMgrs(const SystemParameters &system_parameters, const size_t userSpecifiedNumReaderThreads, const DiskCacheConfig &cache_config)
Definition: DataMgr.cpp:176
virtual size_t size() const =0
void getChunkMetadataVec(ChunkMetadataVector &chunkMetadataVec)
Definition: DataMgr.cpp:413
virtual int8_t * getMemoryPtr()=0
virtual MemoryLevel getType() const =0
void clearMemory(const MemoryLevel memLevel)
Definition: DataMgr.cpp:387
size_t getTableEpoch(const int db_id, const int tb_id)
SystemMemoryUsage getSystemMemoryUsage() const
Definition: DataMgr.cpp:85
std::string dumpLevel(const MemoryLevel memLevel)
Definition: DataMgr.cpp:371
void convertDB(const std::string basePath)
Definition: DataMgr.cpp:257
foreign_storage::ForeignStorageMgr * getForeignStorageMgr() const
Definition: DataMgr.cpp:570
void setTableEpoch(const int db_id, const int tb_id, const int start_epoch)
static size_t getTotalSystemMemory()
Definition: DataMgr.cpp:142
size_t getTableEpoch(const int db_id, const int tb_id)
Definition: DataMgr.cpp:533
void createTopLevelMetadata() const
Definition: DataMgr.cpp:290
bool isAllocationCapped() override
Definition: BufferMgr.cpp:498
std::unique_ptr< CudaMgr_Namespace::CudaMgr > cudaMgr_
Definition: DataMgr.h:242
void getChunkMetadataVecForKeyPrefix(ChunkMetadataVector &chunkMetadataVec, const ChunkKey &keyPrefix)
Definition: DataMgr.cpp:420
An AbstractBuffer is a unit of data management for a data manager.
virtual void write(int8_t *src, const size_t num_bytes, const size_t offset=0, const MemoryLevel src_buffer_type=CPU_LEVEL, const int src_device_id=-1)=0
std::vector< MemoryInfo > getMemoryInfo(const MemoryLevel memLevel)
Definition: DataMgr.cpp:308
AbstractBufferMgr * getFileMgr(const int db_id, const int tb_id)
Parse /proc/meminfo into key/value pairs.
Definition: DataMgr.h:69
#define CHECK_LT(x, y)
Definition: Logger.h:207
void deleteChunksWithPrefix(const ChunkKey &keyPrefix)
Definition: DataMgr.cpp:445
const std::vector< BufferList > & getSlabSegments()
Definition: BufferMgr.cpp:886
bool isBufferOnDevice(const ChunkKey &key, const MemoryLevel memLevel, const int deviceId)
Definition: DataMgr.cpp:406
AbstractBuffer * getChunkBuffer(const ChunkKey &key, const MemoryLevel memoryLevel, const int deviceId=0, const size_t numBytes=0)
Definition: DataMgr.cpp:434
File_Namespace::GlobalFileMgr * getGlobalFileMgr() const
Definition: DataMgr.cpp:543
bool g_enable_fsi
Definition: Catalog.cpp:90
virtual int getDeviceId() const
void removeTableRelatedDS(const int db_id, const int tb_id)
Definition: DataMgr.cpp:518
#define CHECK(condition)
Definition: Logger.h:197
void resetPersistentStorage(const DiskCacheConfig &cache_config, const size_t num_reader_threads, const SystemParameters &sys_params)
Definition: DataMgr.cpp:162
void copy(AbstractBuffer *destBuffer, AbstractBuffer *srcBuffer)
Definition: DataMgr.cpp:484
std::vector< int > ChunkKey
Definition: types.h:35
std::vector< int32_t > chunk_key
Definition: DataMgr.h:56
AbstractBuffer * createChunkBuffer(const ChunkKey &key, const MemoryLevel memoryLevel, const int deviceId=0, const size_t page_size=0)
Definition: DataMgr.cpp:425
std::vector< std::pair< ChunkKey, std::shared_ptr< ChunkMetadata > >> ChunkMetadataVector
void free(AbstractBuffer *buffer)
Definition: DataMgr.cpp:478
#define VLOG(n)
Definition: Logger.h:291
Parse /proc/buddyinfo into a Fragmentation health score.
Definition: DataMgr.h:102
void setTableEpoch(const int db_id, const int tb_id, const int start_epoch)
Definition: DataMgr.cpp:523
friend class GlobalFileMgr
Definition: DataMgr.h:161
AbstractBuffer * alloc(const MemoryLevel memoryLevel, const int deviceId, const size_t numBytes)
Definition: DataMgr.cpp:469
std::string dataDir_
Definition: DataMgr.h:243