OmniSciDB  91042dcc5b
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
DataMgr.cpp
Go to the documentation of this file.
1 /*
2  * Copyright 2020 OmniSci, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
22 #include "DataMgr/DataMgr.h"
26 #include "CudaMgr/CudaMgr.h"
28 #include "FileMgr/GlobalFileMgr.h"
30 
31 #ifdef __APPLE__
32 #include <sys/sysctl.h>
33 #include <sys/types.h>
34 #endif
35 
36 #include <boost/filesystem.hpp>
37 
38 #include <algorithm>
39 #include <limits>
40 
41 extern bool g_enable_fsi;
42 
43 #ifdef ENABLE_MEMKIND
44 bool g_enable_tiered_cpu_mem{false};
45 std::string g_pmem_path{};
46 size_t g_pmem_size{0};
47 #endif
48 
49 namespace Data_Namespace {
50 
51 DataMgr::DataMgr(const std::string& dataDir,
52  const SystemParameters& system_parameters,
53  std::unique_ptr<CudaMgr_Namespace::CudaMgr> cudaMgr,
54  const bool useGpus,
55  const size_t reservedGpuMem,
56  const size_t numReaderThreads,
57  const File_Namespace::DiskCacheConfig cache_config)
58  : cudaMgr_{std::move(cudaMgr)}
59  , dataDir_{dataDir}
60  , hasGpus_{false}
61  , reservedGpuMem_{reservedGpuMem} {
62  if (useGpus) {
63  if (cudaMgr_) {
64  hasGpus_ = true;
65  } else {
66  LOG(ERROR) << "CudaMgr instance is invalid, falling back to CPU-only mode.";
67  hasGpus_ = false;
68  }
69  } else {
70  // NOTE: useGpus == false with a valid cudaMgr is a potentially valid configuration.
71  // i.e. QueryEngine can be set to cpu-only for a cuda-enabled build, but still have
72  // rendering enabled. The renderer would require a CudaMgr in this case, in addition
73  // to a GpuCudaBufferMgr for cuda-backed thrust allocations.
74  // We're still setting hasGpus_ to false in that case tho to enforce cpu-only query
75  // execution.
76  hasGpus_ = false;
77  }
78 
79  populateMgrs(system_parameters, numReaderThreads, cache_config);
80  createTopLevelMetadata();
81 }
82 
84  int numLevels = bufferMgrs_.size();
85  for (int level = numLevels - 1; level >= 0; --level) {
86  for (size_t device = 0; device < bufferMgrs_[level].size(); device++) {
87  delete bufferMgrs_[level][device];
88  }
89  }
90 }
91 
93  SystemMemoryUsage usage;
94 
95 #ifdef __linux__
96 
97  // Determine Linux available memory and total memory.
98  // Available memory is different from free memory because
99  // when Linux sees free memory, it tries to use it for
100  // stuff like disk caching. However, the memory is not
101  // reserved and is still available to be allocated by
102  // user processes.
103  // Parsing /proc/meminfo for this info isn't very elegant
104  // but as a virtual file it should be reasonably fast.
105  // See also:
106  // https://github.com/torvalds/linux/commit/34e431b0ae398fc54ea69ff85ec700722c9da773
108  usage.free = mi["MemAvailable"];
109  usage.total = mi["MemTotal"];
110 
111  // Determine process memory in use.
112  // See also:
113  // https://stackoverflow.com/questions/669438/how-to-get-memory-usage-at-runtime-using-c
114  // http://man7.org/linux/man-pages/man5/proc.5.html
115  int64_t size = 0;
116  int64_t resident = 0;
117  int64_t shared = 0;
118 
119  std::ifstream fstatm("/proc/self/statm");
120  fstatm >> size >> resident >> shared;
121  fstatm.close();
122 
123  long page_size =
124  sysconf(_SC_PAGE_SIZE); // in case x86-64 is configured to use 2MB pages
125 
126  usage.resident = resident * page_size;
127  usage.vtotal = size * page_size;
128  usage.regular = (resident - shared) * page_size;
129  usage.shared = shared * page_size;
130 
132  usage.frag = bi.getFragmentationPercent();
133 
134 #else
135 
136  usage.total = 0;
137  usage.free = 0;
138  usage.resident = 0;
139  usage.vtotal = 0;
140  usage.regular = 0;
141  usage.shared = 0;
142  usage.frag = 0;
143 
144 #endif
145 
146  return usage;
147 }
148 
150 #ifdef __APPLE__
151  int mib[2];
152  size_t physical_memory;
153  size_t length;
154  // Get the Physical memory size
155  mib[0] = CTL_HW;
156  mib[1] = HW_MEMSIZE;
157  length = sizeof(size_t);
158  sysctl(mib, 2, &physical_memory, &length, NULL, 0);
159  return physical_memory;
160 #elif defined(_MSC_VER)
161  MEMORYSTATUSEX status;
162  status.dwLength = sizeof(status);
163  GlobalMemoryStatusEx(&status);
164  return status.ullTotalPhys;
165 #else // Linux
166  long pages = sysconf(_SC_PHYS_PAGES);
167  long page_size = sysconf(_SC_PAGE_SIZE);
168  return pages * page_size;
169 #endif
170 }
171 
172 void DataMgr::allocateCpuBufferMgr(int32_t device_id,
173  size_t total_cpu_size,
174  size_t minCpuSlabSize,
175  size_t maxCpuSlabSize,
176  size_t page_size,
177  const CpuTierSizeVector& cpu_tier_sizes) {
178 #ifdef ENABLE_MEMKIND
179  if (g_enable_tiered_cpu_mem) {
181  total_cpu_size,
182  cudaMgr_.get(),
183  minCpuSlabSize,
184  maxCpuSlabSize,
185  page_size,
186  cpu_tier_sizes,
187  bufferMgrs_[0][0]));
188  return;
189  }
190 #endif
191 
192  bufferMgrs_[1].push_back(new Buffer_Namespace::CpuBufferMgr(0,
193  total_cpu_size,
194  cudaMgr_.get(),
195  minCpuSlabSize,
196  maxCpuSlabSize,
197  page_size,
198  bufferMgrs_[0][0]));
199 }
200 
201 // This function exists for testing purposes so that we can test a reset of the cache.
203  const size_t num_reader_threads,
204  const SystemParameters& sys_params) {
205  int numLevels = bufferMgrs_.size();
206  for (int level = numLevels - 1; level >= 0; --level) {
207  for (size_t device = 0; device < bufferMgrs_[level].size(); device++) {
208  delete bufferMgrs_[level][device];
209  }
210  }
211  bufferMgrs_.clear();
212  populateMgrs(sys_params, num_reader_threads, cache_config);
214 }
215 
216 void DataMgr::populateMgrs(const SystemParameters& system_parameters,
217  const size_t userSpecifiedNumReaderThreads,
218  const File_Namespace::DiskCacheConfig& cache_config) {
219  // no need for locking, as this is only called in the constructor
220  bufferMgrs_.resize(2);
221  bufferMgrs_[0].push_back(
222  new PersistentStorageMgr(dataDir_, userSpecifiedNumReaderThreads, cache_config));
223 
224  levelSizes_.push_back(1);
225  size_t page_size{512};
226  size_t cpuBufferSize = system_parameters.cpu_buffer_mem_bytes;
227  if (cpuBufferSize == 0) { // if size is not specified
228  const auto total_system_memory = getTotalSystemMemory();
229  VLOG(1) << "Detected " << (float)total_system_memory / (1024 * 1024)
230  << "M of total system memory.";
231  cpuBufferSize = total_system_memory *
232  0.8; // should get free memory instead of this ugly heuristic
233  }
234  size_t minCpuSlabSize = std::min(system_parameters.min_cpu_slab_size, cpuBufferSize);
235  minCpuSlabSize = (minCpuSlabSize / page_size) * page_size;
236  size_t maxCpuSlabSize = std::min(system_parameters.max_cpu_slab_size, cpuBufferSize);
237  maxCpuSlabSize = (maxCpuSlabSize / page_size) * page_size;
238  LOG(INFO) << "Min CPU Slab Size is " << (float)minCpuSlabSize / (1024 * 1024) << "MB";
239  LOG(INFO) << "Max CPU Slab Size is " << (float)maxCpuSlabSize / (1024 * 1024) << "MB";
240  LOG(INFO) << "Max memory pool size for CPU is " << (float)cpuBufferSize / (1024 * 1024)
241  << "MB";
242 
243  size_t total_cpu_size = 0;
244 
245 #ifdef ENABLE_MEMKIND
246  CpuTierSizeVector cpu_tier_sizes(numCpuTiers, 0);
247  cpu_tier_sizes[CpuTier::DRAM] = cpuBufferSize;
248  if (g_enable_tiered_cpu_mem) {
249  cpu_tier_sizes[CpuTier::PMEM] = g_pmem_size;
250  LOG(INFO) << "Max memory pool size for PMEM is " << (float)g_pmem_size / (1024 * 1024)
251  << "MB";
252  }
253  for (auto cpu_tier_size : cpu_tier_sizes) {
254  total_cpu_size += cpu_tier_size;
255  }
256 #else
257  CpuTierSizeVector cpu_tier_sizes{};
258  total_cpu_size = cpuBufferSize;
259 #endif
260 
261  if (hasGpus_ || cudaMgr_) {
262  LOG(INFO) << "Reserved GPU memory is " << (float)reservedGpuMem_ / (1024 * 1024)
263  << "MB includes render buffer allocation";
264  bufferMgrs_.resize(3);
266  0, total_cpu_size, minCpuSlabSize, maxCpuSlabSize, page_size, cpu_tier_sizes);
267 
268  levelSizes_.push_back(1);
269  int numGpus = cudaMgr_->getDeviceCount();
270  for (int gpuNum = 0; gpuNum < numGpus; ++gpuNum) {
271  size_t gpuMaxMemSize =
272  system_parameters.gpu_buffer_mem_bytes != 0
273  ? system_parameters.gpu_buffer_mem_bytes
274  : (cudaMgr_->getDeviceProperties(gpuNum)->globalMem) - (reservedGpuMem_);
275  size_t minGpuSlabSize =
276  std::min(system_parameters.min_gpu_slab_size, gpuMaxMemSize);
277  minGpuSlabSize = (minGpuSlabSize / page_size) * page_size;
278  size_t maxGpuSlabSize =
279  std::min(system_parameters.max_gpu_slab_size, gpuMaxMemSize);
280  maxGpuSlabSize = (maxGpuSlabSize / page_size) * page_size;
281  LOG(INFO) << "Min GPU Slab size for GPU " << gpuNum << " is "
282  << (float)minGpuSlabSize / (1024 * 1024) << "MB";
283  LOG(INFO) << "Max GPU Slab size for GPU " << gpuNum << " is "
284  << (float)maxGpuSlabSize / (1024 * 1024) << "MB";
285  LOG(INFO) << "Max memory pool size for GPU " << gpuNum << " is "
286  << (float)gpuMaxMemSize / (1024 * 1024) << "MB";
287  bufferMgrs_[2].push_back(new Buffer_Namespace::GpuCudaBufferMgr(gpuNum,
288  gpuMaxMemSize,
289  cudaMgr_.get(),
290  minGpuSlabSize,
291  maxGpuSlabSize,
292  page_size,
293  bufferMgrs_[1][0]));
294  }
295  levelSizes_.push_back(numGpus);
296  } else {
298  0, total_cpu_size, minCpuSlabSize, maxCpuSlabSize, page_size, cpu_tier_sizes);
299  levelSizes_.push_back(1);
300  }
301 }
302 
303 void DataMgr::convertDB(const std::string basePath) {
304  // no need for locking, as this is only called in the constructor
305 
306  /* check that "mapd_data" directory exists and it's empty */
307  std::string mapdDataPath(basePath + "/../mapd_data/");
308  boost::filesystem::path path(mapdDataPath);
309  if (boost::filesystem::exists(path)) {
310  if (!boost::filesystem::is_directory(path)) {
311  LOG(FATAL) << "Path to directory mapd_data to convert DB is not a directory.";
312  }
313  } else { // data directory does not exist
314  LOG(FATAL) << "Path to directory mapd_data to convert DB does not exist.";
315  }
316 
317  File_Namespace::GlobalFileMgr* gfm{nullptr};
318  gfm = dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])->getGlobalFileMgr();
319  CHECK(gfm);
320 
321  size_t defaultPageSize = gfm->getDefaultPageSize();
322  LOG(INFO) << "Database conversion started.";
324  gfm,
325  defaultPageSize,
326  basePath); // this call also copies data into new DB structure
327  delete fm_base_db;
328 
329  /* write content of DB into newly created/converted DB structure & location */
330  checkpoint(); // outputs data files as well as metadata files
331  LOG(INFO) << "Database conversion completed.";
332 }
333 
335  const { // create metadata shared by all tables of all DBs
336  ChunkKey chunkKey(2);
337  chunkKey[0] = 0; // top level db_id
338  chunkKey[1] = 0; // top level tb_id
339 
340  File_Namespace::GlobalFileMgr* gfm{nullptr};
341  gfm = dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])->getGlobalFileMgr();
342  CHECK(gfm);
343 
344  auto fm_top = gfm->getFileMgr(chunkKey);
345  if (dynamic_cast<File_Namespace::FileMgr*>(fm_top)) {
346  static_cast<File_Namespace::FileMgr*>(fm_top)->createTopLevelMetadata();
347  }
348 }
349 
350 std::vector<MemoryInfo> DataMgr::getMemoryInfo(const MemoryLevel mem_level) const {
351  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
352  return getMemoryInfoUnlocked(mem_level);
353 }
354 
355 std::vector<MemoryInfo> DataMgr::getMemoryInfoUnlocked(
356  const MemoryLevel mem_level) const {
357  std::vector<MemoryInfo> mem_info;
358  if (mem_level == MemoryLevel::CPU_LEVEL) {
359  Buffer_Namespace::CpuBufferMgr* cpu_buffer =
360  dynamic_cast<Buffer_Namespace::CpuBufferMgr*>(
362  CHECK(cpu_buffer);
363  MemoryInfo mi;
364 
365  mi.pageSize = cpu_buffer->getPageSize();
366  mi.maxNumPages = cpu_buffer->getMaxSize() / mi.pageSize;
367  mi.isAllocationCapped = cpu_buffer->isAllocationCapped();
368  mi.numPageAllocated = cpu_buffer->getAllocated() / mi.pageSize;
369 
370  const auto& slab_segments = cpu_buffer->getSlabSegments();
371  for (size_t slab_num = 0; slab_num < slab_segments.size(); ++slab_num) {
372  for (auto segment : slab_segments[slab_num]) {
373  MemoryData md;
374  md.slabNum = slab_num;
375  md.startPage = segment.start_page;
376  md.numPages = segment.num_pages;
377  md.touch = segment.last_touched;
378  md.memStatus = segment.mem_status;
379  md.chunk_key.insert(
380  md.chunk_key.end(), segment.chunk_key.begin(), segment.chunk_key.end());
381  mi.nodeMemoryData.push_back(md);
382  }
383  }
384  mem_info.push_back(mi);
385  } else if (hasGpus_) {
386  int numGpus = cudaMgr_->getDeviceCount();
387  for (int gpuNum = 0; gpuNum < numGpus; ++gpuNum) {
389  dynamic_cast<Buffer_Namespace::GpuCudaBufferMgr*>(
391  CHECK(gpu_buffer);
392  MemoryInfo mi;
393 
394  mi.pageSize = gpu_buffer->getPageSize();
395  mi.maxNumPages = gpu_buffer->getMaxSize() / mi.pageSize;
396  mi.isAllocationCapped = gpu_buffer->isAllocationCapped();
397  mi.numPageAllocated = gpu_buffer->getAllocated() / mi.pageSize;
398 
399  const auto& slab_segments = gpu_buffer->getSlabSegments();
400  for (size_t slab_num = 0; slab_num < slab_segments.size(); ++slab_num) {
401  for (auto segment : slab_segments[slab_num]) {
402  MemoryData md;
403  md.slabNum = slab_num;
404  md.startPage = segment.start_page;
405  md.numPages = segment.num_pages;
406  md.touch = segment.last_touched;
407  md.chunk_key.insert(
408  md.chunk_key.end(), segment.chunk_key.begin(), segment.chunk_key.end());
409  md.memStatus = segment.mem_status;
410  mi.nodeMemoryData.push_back(md);
411  }
412  }
413  mem_info.push_back(mi);
414  }
415  }
416  return mem_info;
417 }
418 
419 std::string DataMgr::dumpLevel(const MemoryLevel memLevel) {
420  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
421 
422  // if gpu we need to iterate through all the buffermanagers for each card
423  if (memLevel == MemoryLevel::GPU_LEVEL) {
424  int numGpus = cudaMgr_->getDeviceCount();
425  std::ostringstream tss;
426  for (int gpuNum = 0; gpuNum < numGpus; ++gpuNum) {
427  tss << bufferMgrs_[memLevel][gpuNum]->printSlabs();
428  }
429  return tss.str();
430  } else {
431  return bufferMgrs_[memLevel][0]->printSlabs();
432  }
433 }
434 
435 void DataMgr::clearMemory(const MemoryLevel memLevel) {
436  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
437 
438  // if gpu we need to iterate through all the buffermanagers for each card
439  if (memLevel == MemoryLevel::GPU_LEVEL) {
440  if (cudaMgr_) {
441  int numGpus = cudaMgr_->getDeviceCount();
442  for (int gpuNum = 0; gpuNum < numGpus; ++gpuNum) {
443  LOG(INFO) << "clear slabs on gpu " << gpuNum;
444  auto buffer_mgr_for_gpu =
445  dynamic_cast<Buffer_Namespace::BufferMgr*>(bufferMgrs_[memLevel][gpuNum]);
446  CHECK(buffer_mgr_for_gpu);
447  buffer_mgr_for_gpu->clearSlabs();
448  }
449  } else {
450  LOG(WARNING) << "Unable to clear GPU memory: No GPUs detected";
451  }
452  } else {
453  auto buffer_mgr_for_cpu =
454  dynamic_cast<Buffer_Namespace::BufferMgr*>(bufferMgrs_[memLevel][0]);
455  CHECK(buffer_mgr_for_cpu);
456  buffer_mgr_for_cpu->clearSlabs();
457  }
458 }
459 
461  const MemoryLevel memLevel,
462  const int deviceId) {
463  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
464  return bufferMgrs_[memLevel][deviceId]->isBufferOnDevice(key);
465 }
466 
468  const ChunkKey& keyPrefix) {
469  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
470  bufferMgrs_[0][0]->getChunkMetadataVecForKeyPrefix(chunkMetadataVec, keyPrefix);
471 }
472 
474  const MemoryLevel memoryLevel,
475  const int deviceId,
476  const size_t page_size) {
477  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
478  int level = static_cast<int>(memoryLevel);
479  return bufferMgrs_[level][deviceId]->createBuffer(key, page_size);
480 }
481 
483  const MemoryLevel memoryLevel,
484  const int deviceId,
485  const size_t numBytes) {
486  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
487  const auto level = static_cast<size_t>(memoryLevel);
488  CHECK_LT(level, levelSizes_.size()); // make sure we have a legit buffermgr
489  CHECK_LT(deviceId, levelSizes_[level]); // make sure we have a legit buffermgr
490  return bufferMgrs_[level][deviceId]->getBuffer(key, numBytes);
491 }
492 
494  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
495 
496  int numLevels = bufferMgrs_.size();
497  for (int level = numLevels - 1; level >= 0; --level) {
498  for (int device = 0; device < levelSizes_[level]; ++device) {
499  bufferMgrs_[level][device]->deleteBuffersWithPrefix(keyPrefix);
500  }
501  }
502 }
503 
504 // only deletes the chunks at the given memory level
506  const MemoryLevel memLevel) {
507  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
508 
509  if (bufferMgrs_.size() <= memLevel) {
510  return;
511  }
512  for (int device = 0; device < levelSizes_[memLevel]; ++device) {
513  bufferMgrs_[memLevel][device]->deleteBuffersWithPrefix(keyPrefix);
514  }
515 }
516 
518  const int deviceId,
519  const size_t numBytes) {
520  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
521  const auto level = static_cast<int>(memoryLevel);
522  CHECK_LT(deviceId, levelSizes_[level]);
523  return bufferMgrs_[level][deviceId]->alloc(numBytes);
524 }
525 
526 std::unique_ptr<DeviceAllocator> DataMgr::createGpuAllocator(int device_id) {
527 #ifdef HAVE_CUDA
528  return std::make_unique<CudaAllocator>(this, device_id);
529 #else
530  UNREACHABLE();
531  return nullptr; // avoid warning/error
532 #endif
533 }
534 
536  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
537  int level = static_cast<int>(buffer->getType());
538  bufferMgrs_[level][buffer->getDeviceId()]->free(buffer);
539 }
540 
541 void DataMgr::copy(AbstractBuffer* destBuffer, AbstractBuffer* srcBuffer) {
542  destBuffer->write(srcBuffer->getMemoryPtr(),
543  srcBuffer->size(),
544  0,
545  srcBuffer->getType(),
546  srcBuffer->getDeviceId());
547 }
548 
549 // could add function below to do arbitrary copies between buffers
550 
551 // void DataMgr::copy(AbstractBuffer *destBuffer, const AbstractBuffer *srcBuffer, const
552 // size_t numBytes, const size_t destOffset, const size_t srcOffset) {
553 //} /
554 
555 void DataMgr::checkpoint(const int db_id, const int tb_id) {
556  // TODO(adb): do we need a buffer mgr lock here?
557  // MAT Yes to reduce Parallel Executor TSAN issues (and correctness for now)
558  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
559  for (auto levelIt = bufferMgrs_.rbegin(); levelIt != bufferMgrs_.rend(); ++levelIt) {
560  // use reverse iterator so we start at GPU level, then CPU then DISK
561  for (auto deviceIt = levelIt->begin(); deviceIt != levelIt->end(); ++deviceIt) {
562  (*deviceIt)->checkpoint(db_id, tb_id);
563  }
564  }
565 }
566 
567 void DataMgr::checkpoint(const int db_id,
568  const int table_id,
569  const MemoryLevel memory_level) {
570  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
571  CHECK_LT(static_cast<size_t>(memory_level), bufferMgrs_.size());
572  CHECK_LT(static_cast<size_t>(memory_level), levelSizes_.size());
573  for (int device_id = 0; device_id < levelSizes_[memory_level]; device_id++) {
574  bufferMgrs_[memory_level][device_id]->checkpoint(db_id, table_id);
575  }
576 }
577 
579  // TODO(adb): SAA
580  // MAT Yes to reduce Parallel Executor TSAN issues (and correctness for now)
581  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
582  for (auto levelIt = bufferMgrs_.rbegin(); levelIt != bufferMgrs_.rend(); ++levelIt) {
583  // use reverse iterator so we start at GPU level, then CPU then DISK
584  for (auto deviceIt = levelIt->begin(); deviceIt != levelIt->end(); ++deviceIt) {
585  (*deviceIt)->checkpoint();
586  }
587  }
588 }
589 
590 void DataMgr::removeTableRelatedDS(const int db_id, const int tb_id) {
591  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
592  bufferMgrs_[0][0]->removeTableRelatedDS(db_id, tb_id);
593 }
594 
595 void DataMgr::setTableEpoch(const int db_id, const int tb_id, const int start_epoch) {
596  File_Namespace::GlobalFileMgr* gfm{nullptr};
597  gfm = dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])->getGlobalFileMgr();
598  CHECK(gfm);
599  gfm->setTableEpoch(db_id, tb_id, start_epoch);
600 }
601 
602 size_t DataMgr::getTableEpoch(const int db_id, const int tb_id) {
603  File_Namespace::GlobalFileMgr* gfm{nullptr};
604  gfm = dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])->getGlobalFileMgr();
605  CHECK(gfm);
606  return gfm->getTableEpoch(db_id, tb_id);
607 }
608 
609 void DataMgr::resetTableEpochFloor(const int32_t db_id, const int32_t tb_id) {
610  File_Namespace::GlobalFileMgr* gfm{nullptr};
611  gfm = dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])->getGlobalFileMgr();
612  CHECK(gfm);
613  gfm->resetTableEpochFloor(db_id, tb_id);
614 }
615 
617  File_Namespace::GlobalFileMgr* global_file_mgr{nullptr};
618  global_file_mgr =
619  dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])->getGlobalFileMgr();
620  CHECK(global_file_mgr);
621  return global_file_mgr;
622 }
623 
624 std::shared_ptr<ForeignStorageInterface> DataMgr::getForeignStorageInterface() const {
625  return dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])
627 }
628 
629 std::ostream& operator<<(std::ostream& os, const DataMgr::SystemMemoryUsage& mem_info) {
630  os << "jsonlog ";
631  os << "{";
632  os << " \"name\": \"CPU Memory Info\",";
633  os << " \"TotalMB\": " << mem_info.total / (1024. * 1024.) << ",";
634  os << " \"FreeMB\": " << mem_info.free / (1024. * 1024.) << ",";
635  os << " \"ProcessMB\": " << mem_info.resident / (1024. * 1024.) << ",";
636  os << " \"VirtualMB\": " << mem_info.vtotal / (1024. * 1024.) << ",";
637  os << " \"ProcessPlusSwapMB\": " << mem_info.regular / (1024. * 1024.) << ",";
638  os << " \"ProcessSharedMB\": " << mem_info.shared / (1024. * 1024.) << ",";
639  os << " \"FragmentationPercent\": " << mem_info.frag;
640  os << " }";
641  return os;
642 }
643 
645  return dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[MemoryLevel::DISK_LEVEL][0]);
646 }
647 
649  return dynamic_cast<Buffer_Namespace::CpuBufferMgr*>(
651 }
652 
654  if (bufferMgrs_.size() > MemoryLevel::GPU_LEVEL) {
655  CHECK_GT(bufferMgrs_[MemoryLevel::GPU_LEVEL].size(), static_cast<size_t>(device_id));
656  return dynamic_cast<Buffer_Namespace::GpuCudaBufferMgr*>(
657  bufferMgrs_[MemoryLevel::GPU_LEVEL][device_id]);
658  } else {
659  return nullptr;
660  }
661 }
662 
663 } // namespace Data_Namespace
size_t getAllocated() override
Definition: BufferMgr.cpp:500
std::mutex buffer_access_mutex_
Definition: DataMgr.h:274
std::vector< int > ChunkKey
Definition: types.h:37
size_t g_pmem_size
void resetPersistentStorage(const File_Namespace::DiskCacheConfig &cache_config, const size_t num_reader_threads, const SystemParameters &sys_params)
Definition: DataMgr.cpp:202
std::vector< MemoryData > nodeMemoryData
Definition: DataMgr.h:74
Buffer_Namespace::MemStatus memStatus
Definition: DataMgr.h:66
size_t getMaxSize() override
Definition: BufferMgr.cpp:495
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:269
std::vector< int > levelSizes_
Definition: DataMgr.h:227
std::ostream & operator<<(std::ostream &os, const DataMgr::SystemMemoryUsage &mem_info)
Definition: DataMgr.cpp:629
#define LOG(tag)
Definition: Logger.h:205
SystemMemoryUsage getSystemMemoryUsage() const
Definition: DataMgr.cpp:92
PersistentStorageMgr * getPersistentStorageMgr() const
Definition: DataMgr.cpp:644
virtual int8_t * getMemoryPtr()=0
virtual MemoryLevel getType() const =0
void clearMemory(const MemoryLevel memLevel)
Definition: DataMgr.cpp:435
#define UNREACHABLE()
Definition: Logger.h:255
std::vector< MemoryInfo > getMemoryInfoUnlocked(const MemoryLevel memLevel) const
Definition: DataMgr.cpp:355
void resetTableEpochFloor(const int32_t db_id, const int32_t tb_id)
Definition: DataMgr.cpp:609
std::string dumpLevel(const MemoryLevel memLevel)
Definition: DataMgr.cpp:419
void convertDB(const std::string basePath)
Definition: DataMgr.cpp:303
#define CHECK_GT(x, y)
Definition: Logger.h:223
constexpr size_t numCpuTiers
static size_t getTotalSystemMemory()
Definition: DataMgr.cpp:149
Note(s): Forbid Copying Idiom 4.1.
Definition: BufferMgr.h:96
size_t getTableEpoch(const int db_id, const int tb_id)
Definition: DataMgr.cpp:602
Buffer_Namespace::GpuCudaBufferMgr * getGpuBufferMgr(int32_t device_id) const
Definition: DataMgr.cpp:653
std::shared_ptr< ForeignStorageInterface > getForeignStorageInterface() const
Definition: DataMgr.cpp:624
std::unique_ptr< DeviceAllocator > createGpuAllocator(int device_id)
Definition: DataMgr.cpp:526
void createTopLevelMetadata() const
Definition: DataMgr.cpp:334
bool isAllocationCapped() override
Definition: BufferMgr.cpp:505
std::string g_pmem_path
std::unique_ptr< CudaMgr_Namespace::CudaMgr > cudaMgr_
Definition: DataMgr.h:270
void getChunkMetadataVecForKeyPrefix(ChunkMetadataVector &chunkMetadataVec, const ChunkKey &keyPrefix)
Definition: DataMgr.cpp:467
void populateMgrs(const SystemParameters &system_parameters, const size_t userSpecifiedNumReaderThreads, const File_Namespace::DiskCacheConfig &cache_config)
Definition: DataMgr.cpp:216
std::vector< std::pair< ChunkKey, std::shared_ptr< ChunkMetadata >>> ChunkMetadataVector
An AbstractBuffer is a unit of data management for a data manager.
virtual void write(int8_t *src, const size_t num_bytes, const size_t offset=0, const MemoryLevel src_buffer_type=CPU_LEVEL, const int src_device_id=-1)=0
File_Namespace::GlobalFileMgr * getGlobalFileMgr() const
Definition: DataMgr.cpp:616
Parse /proc/meminfo into key/value pairs.
Definition: DataMgr.h:78
#define CHECK_LT(x, y)
Definition: Logger.h:221
void deleteChunksWithPrefix(const ChunkKey &keyPrefix)
Definition: DataMgr.cpp:493
const std::vector< BufferList > & getSlabSegments()
Definition: BufferMgr.cpp:886
bool isBufferOnDevice(const ChunkKey &key, const MemoryLevel memLevel, const int deviceId)
Definition: DataMgr.cpp:460
AbstractBuffer * getChunkBuffer(const ChunkKey &key, const MemoryLevel memoryLevel, const int deviceId=0, const size_t numBytes=0)
Definition: DataMgr.cpp:482
std::vector< MemoryInfo > getMemoryInfo(const MemoryLevel memLevel) const
Definition: DataMgr.cpp:350
void removeTableRelatedDS(const int db_id, const int tb_id)
Definition: DataMgr.cpp:590
DataMgr(const std::string &dataDir, const SystemParameters &system_parameters, std::unique_ptr< CudaMgr_Namespace::CudaMgr > cudaMgr, const bool useGpus, const size_t reservedGpuMem=(1<< 27), const size_t numReaderThreads=0, const File_Namespace::DiskCacheConfig cacheConfig=File_Namespace::DiskCacheConfig())
Definition: DataMgr.cpp:51
Buffer_Namespace::CpuBufferMgr * getCpuBufferMgr() const
Definition: DataMgr.cpp:648
#define CHECK(condition)
Definition: Logger.h:211
void copy(AbstractBuffer *destBuffer, AbstractBuffer *srcBuffer)
Definition: DataMgr.cpp:541
std::vector< int32_t > chunk_key
Definition: DataMgr.h:65
AbstractBuffer * createChunkBuffer(const ChunkKey &key, const MemoryLevel memoryLevel, const int deviceId=0, const size_t page_size=0)
Definition: DataMgr.cpp:473
Allocate GPU memory using GpuBuffers via DataMgr.
void free(AbstractBuffer *buffer)
Definition: DataMgr.cpp:535
bool g_enable_fsi
Definition: Catalog.cpp:94
void allocateCpuBufferMgr(int32_t device_id, size_t total_cpu_size, size_t minCpuSlabSize, size_t maxCpuSlabSize, size_t page_size, const std::vector< size_t > &cpu_tier_sizes)
Definition: DataMgr.cpp:172
std::vector< size_t > CpuTierSizeVector
#define VLOG(n)
Definition: Logger.h:305
Parse /proc/buddyinfo into a Fragmentation health score.
Definition: DataMgr.h:111
void setTableEpoch(const int db_id, const int tb_id, const int start_epoch)
Definition: DataMgr.cpp:595
AbstractBuffer * alloc(const MemoryLevel memoryLevel, const int deviceId, const size_t numBytes)
Definition: DataMgr.cpp:517
std::string dataDir_
Definition: DataMgr.h:271