OmniSciDB  eb3a3d0a03
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
DataMgr.cpp
Go to the documentation of this file.
1 /*
2  * Copyright 2020 OmniSci, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
22 #include "DataMgr/DataMgr.h"
25 #include "CudaMgr/CudaMgr.h"
26 #include "FileMgr/GlobalFileMgr.h"
28 
29 #ifdef __APPLE__
30 #include <sys/sysctl.h>
31 #include <sys/types.h>
32 #endif
33 
34 #include <boost/filesystem.hpp>
35 
36 #include <algorithm>
37 #include <limits>
38 
39 extern bool g_enable_fsi;
40 
41 namespace Data_Namespace {
42 
43 DataMgr::DataMgr(const std::string& dataDir,
44  const SystemParameters& system_parameters,
45  std::unique_ptr<CudaMgr_Namespace::CudaMgr> cudaMgr,
46  const bool useGpus,
47  const size_t reservedGpuMem,
48  const size_t numReaderThreads,
49  const File_Namespace::DiskCacheConfig cache_config)
50  : cudaMgr_{std::move(cudaMgr)}
51  , dataDir_{dataDir}
52  , hasGpus_{false}
53  , reservedGpuMem_{reservedGpuMem} {
54  if (useGpus) {
55  if (cudaMgr_) {
56  hasGpus_ = true;
57  } else {
58  LOG(ERROR) << "CudaMgr instance is invalid, falling back to CPU-only mode.";
59  hasGpus_ = false;
60  }
61  } else {
62  // NOTE: useGpus == false with a valid cudaMgr is a potentially valid configuration.
63  // i.e. QueryEngine can be set to cpu-only for a cuda-enabled build, but still have
64  // rendering enabled. The renderer would require a CudaMgr in this case, in addition
65  // to a GpuCudaBufferMgr for cuda-backed thrust allocations.
66  // We're still setting hasGpus_ to false in that case tho to enforce cpu-only query
67  // execution.
68  hasGpus_ = false;
69  }
70 
71  populateMgrs(system_parameters, numReaderThreads, cache_config);
72  createTopLevelMetadata();
73 }
74 
76  int numLevels = bufferMgrs_.size();
77  for (int level = numLevels - 1; level >= 0; --level) {
78  for (size_t device = 0; device < bufferMgrs_[level].size(); device++) {
79  delete bufferMgrs_[level][device];
80  }
81  }
82 }
83 
85  SystemMemoryUsage usage;
86 
87 #ifdef __linux__
88 
89  // Determine Linux available memory and total memory.
90  // Available memory is different from free memory because
91  // when Linux sees free memory, it tries to use it for
92  // stuff like disk caching. However, the memory is not
93  // reserved and is still available to be allocated by
94  // user processes.
95  // Parsing /proc/meminfo for this info isn't very elegant
96  // but as a virtual file it should be reasonably fast.
97  // See also:
98  // https://github.com/torvalds/linux/commit/34e431b0ae398fc54ea69ff85ec700722c9da773
100  usage.free = mi["MemAvailable"];
101  usage.total = mi["MemTotal"];
102 
103  // Determine process memory in use.
104  // See also:
105  // https://stackoverflow.com/questions/669438/how-to-get-memory-usage-at-runtime-using-c
106  // http://man7.org/linux/man-pages/man5/proc.5.html
107  int64_t size = 0;
108  int64_t resident = 0;
109  int64_t shared = 0;
110 
111  std::ifstream fstatm("/proc/self/statm");
112  fstatm >> size >> resident >> shared;
113  fstatm.close();
114 
115  long page_size =
116  sysconf(_SC_PAGE_SIZE); // in case x86-64 is configured to use 2MB pages
117 
118  usage.resident = resident * page_size;
119  usage.vtotal = size * page_size;
120  usage.regular = (resident - shared) * page_size;
121  usage.shared = shared * page_size;
122 
124  usage.frag = bi.getFragmentationPercent();
125 
126 #else
127 
128  usage.total = 0;
129  usage.free = 0;
130  usage.resident = 0;
131  usage.vtotal = 0;
132  usage.regular = 0;
133  usage.shared = 0;
134  usage.frag = 0;
135 
136 #endif
137 
138  return usage;
139 }
140 
142 #ifdef __APPLE__
143  int mib[2];
144  size_t physical_memory;
145  size_t length;
146  // Get the Physical memory size
147  mib[0] = CTL_HW;
148  mib[1] = HW_MEMSIZE;
149  length = sizeof(size_t);
150  sysctl(mib, 2, &physical_memory, &length, NULL, 0);
151  return physical_memory;
152 #elif defined(_MSC_VER)
153  MEMORYSTATUSEX status;
154  status.dwLength = sizeof(status);
155  GlobalMemoryStatusEx(&status);
156  return status.ullTotalPhys;
157 #else // Linux
158  long pages = sysconf(_SC_PHYS_PAGES);
159  long page_size = sysconf(_SC_PAGE_SIZE);
160  return pages * page_size;
161 #endif
162 }
163 
164 // This function exists for testing purposes so that we can test a reset of the cache.
166  const size_t num_reader_threads,
167  const SystemParameters& sys_params) {
168  int numLevels = bufferMgrs_.size();
169  for (int level = numLevels - 1; level >= 0; --level) {
170  for (size_t device = 0; device < bufferMgrs_[level].size(); device++) {
171  delete bufferMgrs_[level][device];
172  }
173  }
174  bufferMgrs_.clear();
175  populateMgrs(sys_params, num_reader_threads, cache_config);
177 }
178 
179 void DataMgr::populateMgrs(const SystemParameters& system_parameters,
180  const size_t userSpecifiedNumReaderThreads,
181  const File_Namespace::DiskCacheConfig& cache_config) {
182  // no need for locking, as this is only called in the constructor
183  bufferMgrs_.resize(2);
185  dataDir_, userSpecifiedNumReaderThreads, cache_config));
186 
187  levelSizes_.push_back(1);
188  size_t page_size{512};
189  size_t cpuBufferSize = system_parameters.cpu_buffer_mem_bytes;
190  if (cpuBufferSize == 0) { // if size is not specified
191  const auto total_system_memory = getTotalSystemMemory();
192  VLOG(1) << "Detected " << (float)total_system_memory / (1024 * 1024)
193  << "M of total system memory.";
194  cpuBufferSize = total_system_memory *
195  0.8; // should get free memory instead of this ugly heuristic
196  }
197  size_t minCpuSlabSize = std::min(system_parameters.min_cpu_slab_size, cpuBufferSize);
198  minCpuSlabSize = (minCpuSlabSize / page_size) * page_size;
199  size_t maxCpuSlabSize = std::min(system_parameters.max_cpu_slab_size, cpuBufferSize);
200  maxCpuSlabSize = (maxCpuSlabSize / page_size) * page_size;
201  LOG(INFO) << "Min CPU Slab Size is " << (float)minCpuSlabSize / (1024 * 1024) << "MB";
202  LOG(INFO) << "Max CPU Slab Size is " << (float)maxCpuSlabSize / (1024 * 1024) << "MB";
203  LOG(INFO) << "Max memory pool size for CPU is " << (float)cpuBufferSize / (1024 * 1024)
204  << "MB";
205  if (hasGpus_ || cudaMgr_) {
206  LOG(INFO) << "Reserved GPU memory is " << (float)reservedGpuMem_ / (1024 * 1024)
207  << "MB includes render buffer allocation";
208  bufferMgrs_.resize(3);
209  bufferMgrs_[1].push_back(new Buffer_Namespace::CpuBufferMgr(0,
210  cpuBufferSize,
211  cudaMgr_.get(),
212  minCpuSlabSize,
213  maxCpuSlabSize,
214  page_size,
215  bufferMgrs_[0][0]));
216  levelSizes_.push_back(1);
217  int numGpus = cudaMgr_->getDeviceCount();
218  for (int gpuNum = 0; gpuNum < numGpus; ++gpuNum) {
219  size_t gpuMaxMemSize =
220  system_parameters.gpu_buffer_mem_bytes != 0
221  ? system_parameters.gpu_buffer_mem_bytes
222  : (cudaMgr_->getDeviceProperties(gpuNum)->globalMem) - (reservedGpuMem_);
223  size_t minGpuSlabSize =
224  std::min(system_parameters.min_gpu_slab_size, gpuMaxMemSize);
225  minGpuSlabSize = (minGpuSlabSize / page_size) * page_size;
226  size_t maxGpuSlabSize =
227  std::min(system_parameters.max_gpu_slab_size, gpuMaxMemSize);
228  maxGpuSlabSize = (maxGpuSlabSize / page_size) * page_size;
229  LOG(INFO) << "Min GPU Slab size for GPU " << gpuNum << " is "
230  << (float)minGpuSlabSize / (1024 * 1024) << "MB";
231  LOG(INFO) << "Max GPU Slab size for GPU " << gpuNum << " is "
232  << (float)maxGpuSlabSize / (1024 * 1024) << "MB";
233  LOG(INFO) << "Max memory pool size for GPU " << gpuNum << " is "
234  << (float)gpuMaxMemSize / (1024 * 1024) << "MB";
235  bufferMgrs_[2].push_back(new Buffer_Namespace::GpuCudaBufferMgr(gpuNum,
236  gpuMaxMemSize,
237  cudaMgr_.get(),
238  minGpuSlabSize,
239  maxGpuSlabSize,
240  page_size,
241  bufferMgrs_[1][0]));
242  }
243  levelSizes_.push_back(numGpus);
244  } else {
245  bufferMgrs_[1].push_back(new Buffer_Namespace::CpuBufferMgr(0,
246  cpuBufferSize,
247  cudaMgr_.get(),
248  minCpuSlabSize,
249  maxCpuSlabSize,
250  page_size,
251  bufferMgrs_[0][0]));
252  levelSizes_.push_back(1);
253  }
254 }
255 
256 void DataMgr::convertDB(const std::string basePath) {
257  // no need for locking, as this is only called in the constructor
258 
259  /* check that "mapd_data" directory exists and it's empty */
260  std::string mapdDataPath(basePath + "/../mapd_data/");
261  boost::filesystem::path path(mapdDataPath);
262  if (boost::filesystem::exists(path)) {
263  if (!boost::filesystem::is_directory(path)) {
264  LOG(FATAL) << "Path to directory mapd_data to convert DB is not a directory.";
265  }
266  } else { // data directory does not exist
267  LOG(FATAL) << "Path to directory mapd_data to convert DB does not exist.";
268  }
269 
270  File_Namespace::GlobalFileMgr* gfm{nullptr};
271  gfm = dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])->getGlobalFileMgr();
272  CHECK(gfm);
273 
274  size_t defaultPageSize = gfm->getDefaultPageSize();
275  LOG(INFO) << "Database conversion started.";
277  gfm,
278  defaultPageSize,
279  basePath); // this call also copies data into new DB structure
280  delete fm_base_db;
281 
282  /* write content of DB into newly created/converted DB structure & location */
283  checkpoint(); // outputs data files as well as metadata files
284  LOG(INFO) << "Database conversion completed.";
285 }
286 
288  const { // create metadata shared by all tables of all DBs
289  ChunkKey chunkKey(2);
290  chunkKey[0] = 0; // top level db_id
291  chunkKey[1] = 0; // top level tb_id
292 
293  File_Namespace::GlobalFileMgr* gfm{nullptr};
294  gfm = dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])->getGlobalFileMgr();
295  CHECK(gfm);
296 
297  auto fm_top = gfm->getFileMgr(chunkKey);
298  if (dynamic_cast<File_Namespace::FileMgr*>(fm_top)) {
299  static_cast<File_Namespace::FileMgr*>(fm_top)->createTopLevelMetadata();
300  }
301 }
302 
303 std::vector<MemoryInfo> DataMgr::getMemoryInfo(const MemoryLevel memLevel) {
304  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
305 
306  std::vector<MemoryInfo> mem_info;
307  if (memLevel == MemoryLevel::CPU_LEVEL) {
308  Buffer_Namespace::CpuBufferMgr* cpu_buffer =
309  dynamic_cast<Buffer_Namespace::CpuBufferMgr*>(
311  CHECK(cpu_buffer);
312  MemoryInfo mi;
313 
314  mi.pageSize = cpu_buffer->getPageSize();
315  mi.maxNumPages = cpu_buffer->getMaxSize() / mi.pageSize;
316  mi.isAllocationCapped = cpu_buffer->isAllocationCapped();
317  mi.numPageAllocated = cpu_buffer->getAllocated() / mi.pageSize;
318 
319  const auto& slab_segments = cpu_buffer->getSlabSegments();
320  for (size_t slab_num = 0; slab_num < slab_segments.size(); ++slab_num) {
321  for (auto segment : slab_segments[slab_num]) {
322  MemoryData md;
323  md.slabNum = slab_num;
324  md.startPage = segment.start_page;
325  md.numPages = segment.num_pages;
326  md.touch = segment.last_touched;
327  md.memStatus = segment.mem_status;
328  md.chunk_key.insert(
329  md.chunk_key.end(), segment.chunk_key.begin(), segment.chunk_key.end());
330  mi.nodeMemoryData.push_back(md);
331  }
332  }
333  mem_info.push_back(mi);
334  } else if (hasGpus_) {
335  int numGpus = cudaMgr_->getDeviceCount();
336  for (int gpuNum = 0; gpuNum < numGpus; ++gpuNum) {
338  dynamic_cast<Buffer_Namespace::GpuCudaBufferMgr*>(
340  CHECK(gpu_buffer);
341  MemoryInfo mi;
342 
343  mi.pageSize = gpu_buffer->getPageSize();
344  mi.maxNumPages = gpu_buffer->getMaxSize() / mi.pageSize;
345  mi.isAllocationCapped = gpu_buffer->isAllocationCapped();
346  mi.numPageAllocated = gpu_buffer->getAllocated() / mi.pageSize;
347 
348  const auto& slab_segments = gpu_buffer->getSlabSegments();
349  for (size_t slab_num = 0; slab_num < slab_segments.size(); ++slab_num) {
350  for (auto segment : slab_segments[slab_num]) {
351  MemoryData md;
352  md.slabNum = slab_num;
353  md.startPage = segment.start_page;
354  md.numPages = segment.num_pages;
355  md.touch = segment.last_touched;
356  md.chunk_key.insert(
357  md.chunk_key.end(), segment.chunk_key.begin(), segment.chunk_key.end());
358  md.memStatus = segment.mem_status;
359  mi.nodeMemoryData.push_back(md);
360  }
361  }
362  mem_info.push_back(mi);
363  }
364  }
365  return mem_info;
366 }
367 
368 std::string DataMgr::dumpLevel(const MemoryLevel memLevel) {
369  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
370 
371  // if gpu we need to iterate through all the buffermanagers for each card
372  if (memLevel == MemoryLevel::GPU_LEVEL) {
373  int numGpus = cudaMgr_->getDeviceCount();
374  std::ostringstream tss;
375  for (int gpuNum = 0; gpuNum < numGpus; ++gpuNum) {
376  tss << bufferMgrs_[memLevel][gpuNum]->printSlabs();
377  }
378  return tss.str();
379  } else {
380  return bufferMgrs_[memLevel][0]->printSlabs();
381  }
382 }
383 
384 void DataMgr::clearMemory(const MemoryLevel memLevel) {
385  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
386 
387  // if gpu we need to iterate through all the buffermanagers for each card
388  if (memLevel == MemoryLevel::GPU_LEVEL) {
389  if (cudaMgr_) {
390  int numGpus = cudaMgr_->getDeviceCount();
391  for (int gpuNum = 0; gpuNum < numGpus; ++gpuNum) {
392  LOG(INFO) << "clear slabs on gpu " << gpuNum;
393  auto buffer_mgr_for_gpu =
394  dynamic_cast<Buffer_Namespace::BufferMgr*>(bufferMgrs_[memLevel][gpuNum]);
395  CHECK(buffer_mgr_for_gpu);
396  buffer_mgr_for_gpu->clearSlabs();
397  }
398  } else {
399  LOG(WARNING) << "Unable to clear GPU memory: No GPUs detected";
400  }
401  } else {
402  auto buffer_mgr_for_cpu =
403  dynamic_cast<Buffer_Namespace::BufferMgr*>(bufferMgrs_[memLevel][0]);
404  CHECK(buffer_mgr_for_cpu);
405  buffer_mgr_for_cpu->clearSlabs();
406  }
407 }
408 
410  const MemoryLevel memLevel,
411  const int deviceId) {
412  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
413  return bufferMgrs_[memLevel][deviceId]->isBufferOnDevice(key);
414 }
415 
417  const ChunkKey& keyPrefix) {
418  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
419  bufferMgrs_[0][0]->getChunkMetadataVecForKeyPrefix(chunkMetadataVec, keyPrefix);
420 }
421 
423  const MemoryLevel memoryLevel,
424  const int deviceId,
425  const size_t page_size) {
426  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
427  int level = static_cast<int>(memoryLevel);
428  return bufferMgrs_[level][deviceId]->createBuffer(key, page_size);
429 }
430 
432  const MemoryLevel memoryLevel,
433  const int deviceId,
434  const size_t numBytes) {
435  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
436  const auto level = static_cast<size_t>(memoryLevel);
437  CHECK_LT(level, levelSizes_.size()); // make sure we have a legit buffermgr
438  CHECK_LT(deviceId, levelSizes_[level]); // make sure we have a legit buffermgr
439  return bufferMgrs_[level][deviceId]->getBuffer(key, numBytes);
440 }
441 
443  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
444 
445  int numLevels = bufferMgrs_.size();
446  for (int level = numLevels - 1; level >= 0; --level) {
447  for (int device = 0; device < levelSizes_[level]; ++device) {
448  bufferMgrs_[level][device]->deleteBuffersWithPrefix(keyPrefix);
449  }
450  }
451 }
452 
453 // only deletes the chunks at the given memory level
455  const MemoryLevel memLevel) {
456  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
457 
458  if (bufferMgrs_.size() <= memLevel) {
459  return;
460  }
461  for (int device = 0; device < levelSizes_[memLevel]; ++device) {
462  bufferMgrs_[memLevel][device]->deleteBuffersWithPrefix(keyPrefix);
463  }
464 }
465 
467  const int deviceId,
468  const size_t numBytes) {
469  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
470  const auto level = static_cast<int>(memoryLevel);
471  CHECK_LT(deviceId, levelSizes_[level]);
472  return bufferMgrs_[level][deviceId]->alloc(numBytes);
473 }
474 
476  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
477  int level = static_cast<int>(buffer->getType());
478  bufferMgrs_[level][buffer->getDeviceId()]->free(buffer);
479 }
480 
481 void DataMgr::copy(AbstractBuffer* destBuffer, AbstractBuffer* srcBuffer) {
482  destBuffer->write(srcBuffer->getMemoryPtr(),
483  srcBuffer->size(),
484  0,
485  srcBuffer->getType(),
486  srcBuffer->getDeviceId());
487 }
488 
489 // could add function below to do arbitrary copies between buffers
490 
491 // void DataMgr::copy(AbstractBuffer *destBuffer, const AbstractBuffer *srcBuffer, const
492 // size_t numBytes, const size_t destOffset, const size_t srcOffset) {
493 //} /
494 
495 void DataMgr::checkpoint(const int db_id, const int tb_id) {
496  // TODO(adb): do we need a buffer mgr lock here?
497  // MAT Yes to reduce Parallel Executor TSAN issues (and correctness for now)
498  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
499  for (auto levelIt = bufferMgrs_.rbegin(); levelIt != bufferMgrs_.rend(); ++levelIt) {
500  // use reverse iterator so we start at GPU level, then CPU then DISK
501  for (auto deviceIt = levelIt->begin(); deviceIt != levelIt->end(); ++deviceIt) {
502  (*deviceIt)->checkpoint(db_id, tb_id);
503  }
504  }
505 }
506 
507 void DataMgr::checkpoint(const int db_id,
508  const int table_id,
509  const MemoryLevel memory_level) {
510  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
511  CHECK_LT(static_cast<size_t>(memory_level), bufferMgrs_.size());
512  CHECK_LT(static_cast<size_t>(memory_level), levelSizes_.size());
513  for (int device_id = 0; device_id < levelSizes_[memory_level]; device_id++) {
514  bufferMgrs_[memory_level][device_id]->checkpoint(db_id, table_id);
515  }
516 }
517 
519  // TODO(adb): SAA
520  // MAT Yes to reduce Parallel Executor TSAN issues (and correctness for now)
521  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
522  for (auto levelIt = bufferMgrs_.rbegin(); levelIt != bufferMgrs_.rend(); ++levelIt) {
523  // use reverse iterator so we start at GPU level, then CPU then DISK
524  for (auto deviceIt = levelIt->begin(); deviceIt != levelIt->end(); ++deviceIt) {
525  (*deviceIt)->checkpoint();
526  }
527  }
528 }
529 
530 void DataMgr::removeTableRelatedDS(const int db_id, const int tb_id) {
531  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
532  bufferMgrs_[0][0]->removeTableRelatedDS(db_id, tb_id);
533 }
534 
535 void DataMgr::setTableEpoch(const int db_id, const int tb_id, const int start_epoch) {
536  File_Namespace::GlobalFileMgr* gfm{nullptr};
537  gfm = dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])->getGlobalFileMgr();
538  CHECK(gfm);
539  gfm->setTableEpoch(db_id, tb_id, start_epoch);
540 }
541 
542 size_t DataMgr::getTableEpoch(const int db_id, const int tb_id) {
543  File_Namespace::GlobalFileMgr* gfm{nullptr};
544  gfm = dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])->getGlobalFileMgr();
545  CHECK(gfm);
546  return gfm->getTableEpoch(db_id, tb_id);
547 }
548 
550  File_Namespace::GlobalFileMgr* global_file_mgr{nullptr};
551  global_file_mgr =
552  dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])->getGlobalFileMgr();
553  CHECK(global_file_mgr);
554  return global_file_mgr;
555 }
556 
557 std::shared_ptr<ForeignStorageInterface> DataMgr::getForeignStorageInterface() const {
558  return dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])
560 }
561 
562 std::ostream& operator<<(std::ostream& os, const DataMgr::SystemMemoryUsage& mem_info) {
563  os << "jsonlog ";
564  os << "{";
565  os << " \"name\": \"CPU Memory Info\",";
566  os << " \"TotalMB\": " << mem_info.total / (1024. * 1024.) << ",";
567  os << " \"FreeMB\": " << mem_info.free / (1024. * 1024.) << ",";
568  os << " \"ProcessMB\": " << mem_info.resident / (1024. * 1024.) << ",";
569  os << " \"VirtualMB\": " << mem_info.vtotal / (1024. * 1024.) << ",";
570  os << " \"ProcessPlusSwapMB\": " << mem_info.regular / (1024. * 1024.) << ",";
571  os << " \"ProcessSharedMB\": " << mem_info.shared / (1024. * 1024.) << ",";
572  os << " \"FragmentationPercent\": " << mem_info.frag;
573  os << " }";
574  return os;
575 }
576 
578  return dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0]);
579 }
580 
581 } // namespace Data_Namespace
size_t getAllocated() override
Definition: BufferMgr.cpp:495
std::mutex buffer_access_mutex_
Definition: DataMgr.h:246
std::vector< int > ChunkKey
Definition: types.h:37
void resetPersistentStorage(const File_Namespace::DiskCacheConfig &cache_config, const size_t num_reader_threads, const SystemParameters &sys_params)
Definition: DataMgr.cpp:165
std::vector< MemoryData > nodeMemoryData
Definition: DataMgr.h:65
Buffer_Namespace::MemStatus memStatus
Definition: DataMgr.h:57
size_t getMaxSize() override
Definition: BufferMgr.cpp:490
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:241
std::vector< int > levelSizes_
Definition: DataMgr.h:213
std::ostream & operator<<(std::ostream &os, const DataMgr::SystemMemoryUsage &mem_info)
Definition: DataMgr.cpp:562
#define LOG(tag)
Definition: Logger.h:203
SystemMemoryUsage getSystemMemoryUsage() const
Definition: DataMgr.cpp:84
PersistentStorageMgr * getPersistentStorageMgr() const
Definition: DataMgr.cpp:577
virtual int8_t * getMemoryPtr()=0
virtual MemoryLevel getType() const =0
void clearMemory(const MemoryLevel memLevel)
Definition: DataMgr.cpp:384
std::string dumpLevel(const MemoryLevel memLevel)
Definition: DataMgr.cpp:368
void convertDB(const std::string basePath)
Definition: DataMgr.cpp:256
static size_t getTotalSystemMemory()
Definition: DataMgr.cpp:141
Note(s): Forbid Copying Idiom 4.1.
Definition: BufferMgr.h:96
size_t getTableEpoch(const int db_id, const int tb_id)
Definition: DataMgr.cpp:542
std::shared_ptr< ForeignStorageInterface > getForeignStorageInterface() const
Definition: DataMgr.cpp:557
void createTopLevelMetadata() const
Definition: DataMgr.cpp:287
bool isAllocationCapped() override
Definition: BufferMgr.cpp:500
std::unique_ptr< CudaMgr_Namespace::CudaMgr > cudaMgr_
Definition: DataMgr.h:242
void getChunkMetadataVecForKeyPrefix(ChunkMetadataVector &chunkMetadataVec, const ChunkKey &keyPrefix)
Definition: DataMgr.cpp:416
void populateMgrs(const SystemParameters &system_parameters, const size_t userSpecifiedNumReaderThreads, const File_Namespace::DiskCacheConfig &cache_config)
Definition: DataMgr.cpp:179
std::vector< std::pair< ChunkKey, std::shared_ptr< ChunkMetadata >>> ChunkMetadataVector
An AbstractBuffer is a unit of data management for a data manager.
virtual void write(int8_t *src, const size_t num_bytes, const size_t offset=0, const MemoryLevel src_buffer_type=CPU_LEVEL, const int src_device_id=-1)=0
std::vector< MemoryInfo > getMemoryInfo(const MemoryLevel memLevel)
Definition: DataMgr.cpp:303
File_Namespace::GlobalFileMgr * getGlobalFileMgr() const
Definition: DataMgr.cpp:549
Parse /proc/meminfo into key/value pairs.
Definition: DataMgr.h:69
#define CHECK_LT(x, y)
Definition: Logger.h:219
void deleteChunksWithPrefix(const ChunkKey &keyPrefix)
Definition: DataMgr.cpp:442
const std::vector< BufferList > & getSlabSegments()
Definition: BufferMgr.cpp:881
bool isBufferOnDevice(const ChunkKey &key, const MemoryLevel memLevel, const int deviceId)
Definition: DataMgr.cpp:409
AbstractBuffer * getChunkBuffer(const ChunkKey &key, const MemoryLevel memoryLevel, const int deviceId=0, const size_t numBytes=0)
Definition: DataMgr.cpp:431
void removeTableRelatedDS(const int db_id, const int tb_id)
Definition: DataMgr.cpp:530
DataMgr(const std::string &dataDir, const SystemParameters &system_parameters, std::unique_ptr< CudaMgr_Namespace::CudaMgr > cudaMgr, const bool useGpus, const size_t reservedGpuMem=(1<< 27), const size_t numReaderThreads=0, const File_Namespace::DiskCacheConfig cacheConfig=File_Namespace::DiskCacheConfig())
Definition: DataMgr.cpp:43
#define CHECK(condition)
Definition: Logger.h:209
void copy(AbstractBuffer *destBuffer, AbstractBuffer *srcBuffer)
Definition: DataMgr.cpp:481
std::vector< int32_t > chunk_key
Definition: DataMgr.h:56
AbstractBuffer * createChunkBuffer(const ChunkKey &key, const MemoryLevel memoryLevel, const int deviceId=0, const size_t page_size=0)
Definition: DataMgr.cpp:422
static PersistentStorageMgr * createPersistentStorageMgr(const std::string &data_dir, const size_t num_reader_threads, const File_Namespace::DiskCacheConfig &disk_cache_config)
void free(AbstractBuffer *buffer)
Definition: DataMgr.cpp:475
bool g_enable_fsi
Definition: Catalog.cpp:93
#define VLOG(n)
Definition: Logger.h:303
Parse /proc/buddyinfo into a Fragmentation health score.
Definition: DataMgr.h:102
void setTableEpoch(const int db_id, const int tb_id, const int start_epoch)
Definition: DataMgr.cpp:535
AbstractBuffer * alloc(const MemoryLevel memoryLevel, const int deviceId, const size_t numBytes)
Definition: DataMgr.cpp:466
std::string dataDir_
Definition: DataMgr.h:243