OmniSciDB  0b528656ed
DataMgr.cpp
Go to the documentation of this file.
1 /*
2  * Copyright 2020 OmniSci, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
22 #include "DataMgr/DataMgr.h"
25 #include "CudaMgr/CudaMgr.h"
26 #include "FileMgr/GlobalFileMgr.h"
28 
29 #ifdef __APPLE__
30 #include <sys/sysctl.h>
31 #include <sys/types.h>
32 #else
33 #include <unistd.h>
34 #endif
35 
36 #include <boost/filesystem.hpp>
37 
38 #include <algorithm>
39 #include <limits>
40 
41 using namespace std;
42 using namespace Buffer_Namespace;
43 using namespace File_Namespace;
44 
45 extern bool g_enable_fsi;
46 
47 namespace Data_Namespace {
48 
49 DataMgr::DataMgr(const string& dataDir,
50  const SystemParameters& system_parameters,
51  const bool useGpus,
52  const int numGpus,
53  const int startGpu,
54  const size_t reservedGpuMem,
55  const size_t numReaderThreads)
56  : dataDir_(dataDir) {
57  if (useGpus) {
58  try {
59  cudaMgr_ = std::make_unique<CudaMgr_Namespace::CudaMgr>(numGpus, startGpu);
60  reservedGpuMem_ = reservedGpuMem;
61  hasGpus_ = true;
62  } catch (const std::exception& e) {
63  LOG(ERROR) << "Unable to instantiate CudaMgr, falling back to CPU-only mode. "
64  << e.what();
65  hasGpus_ = false;
66  }
67  } else {
68  hasGpus_ = false;
69  }
70 
71  populateMgrs(system_parameters, numReaderThreads);
73 }
74 
76  int numLevels = bufferMgrs_.size();
77  for (int level = numLevels - 1; level >= 0; --level) {
78  for (size_t device = 0; device < bufferMgrs_[level].size(); device++) {
79  delete bufferMgrs_[level][device];
80  }
81  }
82 }
83 
85  SystemMemoryUsage usage;
86 
87 #ifdef __linux__
88 
89  // Determine Linux available memory and total memory.
90  // Available memory is different from free memory because
91  // when Linux sees free memory, it tries to use it for
92  // stuff like disk caching. However, the memory is not
93  // reserved and is still available to be allocated by
94  // user processes.
95  // Parsing /proc/meminfo for this info isn't very elegant
96  // but as a virtual file it should be reasonably fast.
97  // See also:
98  // https://github.com/torvalds/linux/commit/34e431b0ae398fc54ea69ff85ec700722c9da773
100  usage.free = mi["MemAvailable"];
101  usage.total = mi["MemTotal"];
102 
103  // Determine process memory in use.
104  // See also:
105  // https://stackoverflow.com/questions/669438/how-to-get-memory-usage-at-runtime-using-c
106  // http://man7.org/linux/man-pages/man5/proc.5.html
107  int64_t size = 0;
108  int64_t resident = 0;
109  int64_t shared = 0;
110 
111  std::ifstream fstatm("/proc/self/statm");
112  fstatm >> size >> resident >> shared;
113  fstatm.close();
114 
115  long page_size =
116  sysconf(_SC_PAGE_SIZE); // in case x86-64 is configured to use 2MB pages
117 
118  usage.resident = resident * page_size;
119  usage.vtotal = size * page_size;
120  usage.regular = (resident - shared) * page_size;
121  usage.shared = shared * page_size;
122 
123 #else
124 
125  usage.total = 0;
126  usage.free = 0;
127  usage.resident = 0;
128  usage.vtotal = 0;
129  usage.regular = 0;
130  usage.shared = 0;
131 
132 #endif
133 
134  return usage;
135 }
136 
138 #ifdef __APPLE__
139  int mib[2];
140  size_t physical_memory;
141  size_t length;
142  // Get the Physical memory size
143  mib[0] = CTL_HW;
144  mib[1] = HW_MEMSIZE;
145  length = sizeof(size_t);
146  sysctl(mib, 2, &physical_memory, &length, NULL, 0);
147  return physical_memory;
148 
149 #else // Linux
150  long pages = sysconf(_SC_PHYS_PAGES);
151  long page_size = sysconf(_SC_PAGE_SIZE);
152  return pages * page_size;
153 #endif
154 }
155 
156 void DataMgr::populateMgrs(const SystemParameters& system_parameters,
157  const size_t userSpecifiedNumReaderThreads) {
158  // no need for locking, as this is only called in the constructor
159  bufferMgrs_.resize(2);
160  if (g_enable_fsi) {
161  bufferMgrs_[0].push_back(
162  new PersistentStorageMgr(dataDir_, userSpecifiedNumReaderThreads));
163  } else {
164  bufferMgrs_[0].push_back(
165  new GlobalFileMgr(0, dataDir_, userSpecifiedNumReaderThreads));
166  }
167  levelSizes_.push_back(1);
168  size_t page_size{512};
169  size_t cpuBufferSize = system_parameters.cpu_buffer_mem_bytes;
170  if (cpuBufferSize == 0) { // if size is not specified
171  const auto total_system_memory = getTotalSystemMemory();
172  VLOG(1) << "Detected " << (float)total_system_memory / (1024 * 1024)
173  << "M of total system memory.";
174  cpuBufferSize = total_system_memory *
175  0.8; // should get free memory instead of this ugly heuristic
176  }
177  size_t minCpuSlabSize = std::min(system_parameters.min_cpu_slab_size, cpuBufferSize);
178  minCpuSlabSize = (minCpuSlabSize / page_size) * page_size;
179  size_t maxCpuSlabSize = std::min(system_parameters.max_cpu_slab_size, cpuBufferSize);
180  maxCpuSlabSize = (maxCpuSlabSize / page_size) * page_size;
181  LOG(INFO) << "Min CPU Slab Size is " << (float)minCpuSlabSize / (1024 * 1024) << "MB";
182  LOG(INFO) << "Max CPU Slab Size is " << (float)maxCpuSlabSize / (1024 * 1024) << "MB";
183  LOG(INFO) << "Max memory pool size for CPU is " << (float)cpuBufferSize / (1024 * 1024)
184  << "MB";
185  if (hasGpus_) {
186  LOG(INFO) << "Reserved GPU memory is " << (float)reservedGpuMem_ / (1024 * 1024)
187  << "MB includes render buffer allocation";
188  bufferMgrs_.resize(3);
189  bufferMgrs_[1].push_back(new CpuBufferMgr(0,
190  cpuBufferSize,
191  cudaMgr_.get(),
192  minCpuSlabSize,
193  maxCpuSlabSize,
194  page_size,
195  bufferMgrs_[0][0]));
196  levelSizes_.push_back(1);
197  int numGpus = cudaMgr_->getDeviceCount();
198  for (int gpuNum = 0; gpuNum < numGpus; ++gpuNum) {
199  size_t gpuMaxMemSize =
200  system_parameters.gpu_buffer_mem_bytes != 0
201  ? system_parameters.gpu_buffer_mem_bytes
202  : (cudaMgr_->getDeviceProperties(gpuNum)->globalMem) - (reservedGpuMem_);
203  size_t minGpuSlabSize =
204  std::min(system_parameters.min_gpu_slab_size, gpuMaxMemSize);
205  minGpuSlabSize = (minGpuSlabSize / page_size) * page_size;
206  size_t maxGpuSlabSize =
207  std::min(system_parameters.max_gpu_slab_size, gpuMaxMemSize);
208  maxGpuSlabSize = (maxGpuSlabSize / page_size) * page_size;
209  LOG(INFO) << "Min GPU Slab size for GPU " << gpuNum << " is "
210  << (float)minGpuSlabSize / (1024 * 1024) << "MB";
211  LOG(INFO) << "Max GPU Slab size for GPU " << gpuNum << " is "
212  << (float)maxGpuSlabSize / (1024 * 1024) << "MB";
213  LOG(INFO) << "Max memory pool size for GPU " << gpuNum << " is "
214  << (float)gpuMaxMemSize / (1024 * 1024) << "MB";
215  bufferMgrs_[2].push_back(new GpuCudaBufferMgr(gpuNum,
216  gpuMaxMemSize,
217  cudaMgr_.get(),
218  minGpuSlabSize,
219  maxGpuSlabSize,
220  page_size,
221  bufferMgrs_[1][0]));
222  }
223  levelSizes_.push_back(numGpus);
224  } else {
225  bufferMgrs_[1].push_back(new CpuBufferMgr(0,
226  cpuBufferSize,
227  cudaMgr_.get(),
228  minCpuSlabSize,
229  maxCpuSlabSize,
230  page_size,
231  bufferMgrs_[0][0]));
232  levelSizes_.push_back(1);
233  }
234 }
235 
236 void DataMgr::convertDB(const std::string basePath) {
237  // no need for locking, as this is only called in the constructor
238 
239  /* check that "mapd_data" directory exists and it's empty */
240  std::string mapdDataPath(basePath + "/../mapd_data/");
241  boost::filesystem::path path(mapdDataPath);
242  if (boost::filesystem::exists(path)) {
243  if (!boost::filesystem::is_directory(path)) {
244  LOG(FATAL) << "Path to directory mapd_data to convert DB is not a directory.";
245  }
246  } else { // data directory does not exist
247  LOG(FATAL) << "Path to directory mapd_data to convert DB does not exist.";
248  }
249 
250  GlobalFileMgr* gfm;
251  if (g_enable_fsi) {
252  gfm = dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])->getGlobalFileMgr();
253  } else {
254  gfm = dynamic_cast<GlobalFileMgr*>(bufferMgrs_[0][0]);
255  }
256  size_t defaultPageSize = gfm->getDefaultPageSize();
257  LOG(INFO) << "Database conversion started.";
258  FileMgr* fm_base_db =
259  new FileMgr(gfm,
260  defaultPageSize,
261  basePath); // this call also copies data into new DB structure
262  delete fm_base_db;
263 
264  /* write content of DB into newly created/converted DB structure & location */
265  checkpoint(); // outputs data files as well as metadata files
266  LOG(INFO) << "Database conversion completed.";
267 }
268 
270  const { // create metadata shared by all tables of all DBs
271  ChunkKey chunkKey(2);
272  chunkKey[0] = 0; // top level db_id
273  chunkKey[1] = 0; // top level tb_id
274 
275  GlobalFileMgr* gfm;
276  if (g_enable_fsi) {
277  gfm = dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])->getGlobalFileMgr();
278  } else {
279  gfm = dynamic_cast<GlobalFileMgr*>(bufferMgrs_[0][0]);
280  }
281  auto fm_top = gfm->getFileMgr(chunkKey);
282  if (dynamic_cast<File_Namespace::FileMgr*>(fm_top)) {
283  static_cast<File_Namespace::FileMgr*>(fm_top)->createTopLevelMetadata();
284  }
285 }
286 
287 std::vector<MemoryInfo> DataMgr::getMemoryInfo(const MemoryLevel memLevel) {
288  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
289 
290  std::vector<MemoryInfo> mem_info;
291  if (memLevel == MemoryLevel::CPU_LEVEL) {
292  CpuBufferMgr* cpu_buffer =
293  dynamic_cast<CpuBufferMgr*>(bufferMgrs_[MemoryLevel::CPU_LEVEL][0]);
294  CHECK(cpu_buffer);
295  MemoryInfo mi;
296 
297  mi.pageSize = cpu_buffer->getPageSize();
298  mi.maxNumPages = cpu_buffer->getMaxSize() / mi.pageSize;
299  mi.isAllocationCapped = cpu_buffer->isAllocationCapped();
300  mi.numPageAllocated = cpu_buffer->getAllocated() / mi.pageSize;
301 
302  const auto& slab_segments = cpu_buffer->getSlabSegments();
303  for (size_t slab_num = 0; slab_num < slab_segments.size(); ++slab_num) {
304  for (auto segment : slab_segments[slab_num]) {
305  MemoryData md;
306  md.slabNum = slab_num;
307  md.startPage = segment.start_page;
308  md.numPages = segment.num_pages;
309  md.touch = segment.last_touched;
310  md.memStatus = segment.mem_status;
311  md.chunk_key.insert(
312  md.chunk_key.end(), segment.chunk_key.begin(), segment.chunk_key.end());
313  mi.nodeMemoryData.push_back(md);
314  }
315  }
316  mem_info.push_back(mi);
317  } else if (hasGpus_) {
318  int numGpus = cudaMgr_->getDeviceCount();
319  for (int gpuNum = 0; gpuNum < numGpus; ++gpuNum) {
320  GpuCudaBufferMgr* gpu_buffer =
321  dynamic_cast<GpuCudaBufferMgr*>(bufferMgrs_[MemoryLevel::GPU_LEVEL][gpuNum]);
322  CHECK(gpu_buffer);
323  MemoryInfo mi;
324 
325  mi.pageSize = gpu_buffer->getPageSize();
326  mi.maxNumPages = gpu_buffer->getMaxSize() / mi.pageSize;
327  mi.isAllocationCapped = gpu_buffer->isAllocationCapped();
328  mi.numPageAllocated = gpu_buffer->getAllocated() / mi.pageSize;
329 
330  const auto& slab_segments = gpu_buffer->getSlabSegments();
331  for (size_t slab_num = 0; slab_num < slab_segments.size(); ++slab_num) {
332  for (auto segment : slab_segments[slab_num]) {
333  MemoryData md;
334  md.slabNum = slab_num;
335  md.startPage = segment.start_page;
336  md.numPages = segment.num_pages;
337  md.touch = segment.last_touched;
338  md.chunk_key.insert(
339  md.chunk_key.end(), segment.chunk_key.begin(), segment.chunk_key.end());
340  md.memStatus = segment.mem_status;
341  mi.nodeMemoryData.push_back(md);
342  }
343  }
344  mem_info.push_back(mi);
345  }
346  }
347  return mem_info;
348 }
349 
350 std::string DataMgr::dumpLevel(const MemoryLevel memLevel) {
351  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
352 
353  // if gpu we need to iterate through all the buffermanagers for each card
354  if (memLevel == MemoryLevel::GPU_LEVEL) {
355  int numGpus = cudaMgr_->getDeviceCount();
356  std::ostringstream tss;
357  for (int gpuNum = 0; gpuNum < numGpus; ++gpuNum) {
358  tss << bufferMgrs_[memLevel][gpuNum]->printSlabs();
359  }
360  return tss.str();
361  } else {
362  return bufferMgrs_[memLevel][0]->printSlabs();
363  }
364 }
365 
366 void DataMgr::clearMemory(const MemoryLevel memLevel) {
367  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
368 
369  // if gpu we need to iterate through all the buffermanagers for each card
370  if (memLevel == MemoryLevel::GPU_LEVEL) {
371  if (cudaMgr_) {
372  int numGpus = cudaMgr_->getDeviceCount();
373  for (int gpuNum = 0; gpuNum < numGpus; ++gpuNum) {
374  LOG(INFO) << "clear slabs on gpu " << gpuNum;
375  bufferMgrs_[memLevel][gpuNum]->clearSlabs();
376  }
377  } else {
378  throw std::runtime_error("Unable to clear GPU memory: No GPUs detected");
379  }
380  } else {
381  bufferMgrs_[memLevel][0]->clearSlabs();
382  }
383 }
384 
386  const MemoryLevel memLevel,
387  const int deviceId) {
388  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
389  return bufferMgrs_[memLevel][deviceId]->isBufferOnDevice(key);
390 }
391 
393  // Can we always assume this will just be at the disklevel bc we just
394  // started?
395  // access to this object is locked by the file mgr
396  bufferMgrs_[0][0]->getChunkMetadataVec(chunkMetadataVec);
397 }
398 
400  const ChunkKey& keyPrefix) {
401  bufferMgrs_[0][0]->getChunkMetadataVecForKeyPrefix(chunkMetadataVec, keyPrefix);
402 }
403 
405  const MemoryLevel memoryLevel,
406  const int deviceId,
407  const size_t page_size) {
408  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
409  int level = static_cast<int>(memoryLevel);
410  return bufferMgrs_[level][deviceId]->createBuffer(key, page_size);
411 }
412 
414  const MemoryLevel memoryLevel,
415  const int deviceId,
416  const size_t numBytes) {
417  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
418  const auto level = static_cast<size_t>(memoryLevel);
419  CHECK_LT(level, levelSizes_.size()); // make sure we have a legit buffermgr
420  CHECK_LT(deviceId, levelSizes_[level]); // make sure we have a legit buffermgr
421  return bufferMgrs_[level][deviceId]->getBuffer(key, numBytes);
422 }
423 
425  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
426 
427  int numLevels = bufferMgrs_.size();
428  for (int level = numLevels - 1; level >= 0; --level) {
429  for (int device = 0; device < levelSizes_[level]; ++device) {
430  bufferMgrs_[level][device]->deleteBuffersWithPrefix(keyPrefix);
431  }
432  }
433 }
434 
435 // only deletes the chunks at the given memory level
437  const MemoryLevel memLevel) {
438  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
439 
440  if (bufferMgrs_.size() <= memLevel) {
441  return;
442  }
443  for (int device = 0; device < levelSizes_[memLevel]; ++device) {
444  bufferMgrs_[memLevel][device]->deleteBuffersWithPrefix(keyPrefix);
445  }
446 }
447 
449  const int deviceId,
450  const size_t numBytes) {
451  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
452  const auto level = static_cast<int>(memoryLevel);
453  CHECK_LT(deviceId, levelSizes_[level]);
454  return bufferMgrs_[level][deviceId]->alloc(numBytes);
455 }
456 
458  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
459  int level = static_cast<int>(buffer->getType());
460  bufferMgrs_[level][buffer->getDeviceId()]->free(buffer);
461 }
462 
463 void DataMgr::copy(AbstractBuffer* destBuffer, AbstractBuffer* srcBuffer) {
464  destBuffer->write(srcBuffer->getMemoryPtr(),
465  srcBuffer->size(),
466  0,
467  srcBuffer->getType(),
468  srcBuffer->getDeviceId());
469 }
470 
471 // could add function below to do arbitrary copies between buffers
472 
473 // void DataMgr::copy(AbstractBuffer *destBuffer, const AbstractBuffer *srcBuffer, const
474 // size_t numBytes, const size_t destOffset, const size_t srcOffset) {
475 //} /
476 
477 void DataMgr::checkpoint(const int db_id, const int tb_id) {
478  // TODO(adb): do we need a buffer mgr lock here?
479  for (auto levelIt = bufferMgrs_.rbegin(); levelIt != bufferMgrs_.rend(); ++levelIt) {
480  // use reverse iterator so we start at GPU level, then CPU then DISK
481  for (auto deviceIt = levelIt->begin(); deviceIt != levelIt->end(); ++deviceIt) {
482  (*deviceIt)->checkpoint(db_id, tb_id);
483  }
484  }
485 }
486 
488  // TODO(adb): SAA
489  for (auto levelIt = bufferMgrs_.rbegin(); levelIt != bufferMgrs_.rend(); ++levelIt) {
490  // use reverse iterator so we start at GPU level, then CPU then DISK
491  for (auto deviceIt = levelIt->begin(); deviceIt != levelIt->end(); ++deviceIt) {
492  (*deviceIt)->checkpoint();
493  }
494  }
495 }
496 
497 void DataMgr::removeTableRelatedDS(const int db_id, const int tb_id) {
498  std::lock_guard<std::mutex> buffer_lock(buffer_access_mutex_);
499  bufferMgrs_[0][0]->removeTableRelatedDS(db_id, tb_id);
500 }
501 
502 void DataMgr::setTableEpoch(const int db_id, const int tb_id, const int start_epoch) {
503  GlobalFileMgr* gfm;
504  if (g_enable_fsi) {
505  gfm = dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])->getGlobalFileMgr();
506  } else {
507  gfm = dynamic_cast<GlobalFileMgr*>(bufferMgrs_[0][0]);
508  }
509  gfm->setTableEpoch(db_id, tb_id, start_epoch);
510 }
511 
512 size_t DataMgr::getTableEpoch(const int db_id, const int tb_id) {
513  GlobalFileMgr* gfm;
514  if (g_enable_fsi) {
515  gfm = dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])->getGlobalFileMgr();
516  } else {
517  gfm = dynamic_cast<GlobalFileMgr*>(bufferMgrs_[0][0]);
518  }
519  return gfm->getTableEpoch(db_id, tb_id);
520 }
521 
523  GlobalFileMgr* global_file_mgr;
524  if (g_enable_fsi) {
525  global_file_mgr =
526  dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])->getGlobalFileMgr();
527  } else {
528  global_file_mgr = dynamic_cast<GlobalFileMgr*>(bufferMgrs_[0][0]);
529  }
530  CHECK(global_file_mgr);
531  return global_file_mgr;
532 }
533 
534 std::ostream& operator<<(std::ostream& os, const DataMgr::SystemMemoryUsage& mem_info) {
535  os << "CPU Memory Info:";
536  os << "\n\tTotal: " << mem_info.total / (1024. * 1024.) << " MB";
537  os << "\n\tFree: " << mem_info.free / (1024. * 1024.) << " MB";
538  os << "\n\tProcess: " << mem_info.resident / (1024. * 1024.) << " MB";
539  os << "\n\tVirtual: " << mem_info.vtotal / (1024. * 1024.) << " MB";
540  os << "\n\tProcess + Swap: " << mem_info.regular / (1024. * 1024.) << " MB";
541  os << "\n\tProcess Shared: " << mem_info.shared / (1024. * 1024.) << " MB";
542  return os;
543 }
544 
546  if (g_enable_fsi) {
547  return dynamic_cast<PersistentStorageMgr*>(bufferMgrs_[0][0])->getForeignStorageMgr();
548  }
549  return nullptr;
550 }
551 
552 } // namespace Data_Namespace
size_t getAllocated() override
Definition: BufferMgr.cpp:493
std::mutex buffer_access_mutex_
Definition: DataMgr.h:178
std::vector< MemoryData > nodeMemoryData
Definition: DataMgr.h:65
Buffer_Namespace::MemStatus memStatus
Definition: DataMgr.h:57
size_t getMaxSize() override
Definition: BufferMgr.cpp:488
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
Definition: DataMgr.h:173
std::vector< int > levelSizes_
Definition: DataMgr.h:150
std::ostream & operator<<(std::ostream &os, const DataMgr::SystemMemoryUsage &mem_info)
Definition: DataMgr.cpp:534
#define LOG(tag)
Definition: Logger.h:188
virtual size_t size() const =0
void getChunkMetadataVec(ChunkMetadataVector &chunkMetadataVec)
Definition: DataMgr.cpp:392
virtual int8_t * getMemoryPtr()=0
virtual MemoryLevel getType() const =0
void clearMemory(const MemoryLevel memLevel)
Definition: DataMgr.cpp:366
size_t getTableEpoch(const int db_id, const int tb_id)
SystemMemoryUsage getSystemMemoryUsage() const
Definition: DataMgr.cpp:84
std::string dumpLevel(const MemoryLevel memLevel)
Definition: DataMgr.cpp:350
void convertDB(const std::string basePath)
Definition: DataMgr.cpp:236
foreign_storage::ForeignStorageMgr * getForeignStorageMgr() const
Definition: DataMgr.cpp:545
void setTableEpoch(const int db_id, const int tb_id, const int start_epoch)
static size_t getTotalSystemMemory()
Definition: DataMgr.cpp:137
size_t getTableEpoch(const int db_id, const int tb_id)
Definition: DataMgr.cpp:512
void createTopLevelMetadata() const
Definition: DataMgr.cpp:269
bool isAllocationCapped() override
Definition: BufferMgr.cpp:498
std::unique_ptr< CudaMgr_Namespace::CudaMgr > cudaMgr_
Definition: DataMgr.h:174
void getChunkMetadataVecForKeyPrefix(ChunkMetadataVector &chunkMetadataVec, const ChunkKey &keyPrefix)
Definition: DataMgr.cpp:399
An AbstractBuffer is a unit of data management for a data manager.
virtual void write(int8_t *src, const size_t num_bytes, const size_t offset=0, const MemoryLevel src_buffer_type=CPU_LEVEL, const int src_device_id=-1)=0
std::vector< MemoryInfo > getMemoryInfo(const MemoryLevel memLevel)
Definition: DataMgr.cpp:287
AbstractBufferMgr * getFileMgr(const int db_id, const int tb_id)
Parse /proc/meminfo into key/value pairs.
Definition: DataMgr.h:69
#define CHECK_LT(x, y)
Definition: Logger.h:207
void deleteChunksWithPrefix(const ChunkKey &keyPrefix)
Definition: DataMgr.cpp:424
const std::vector< BufferList > & getSlabSegments()
Definition: BufferMgr.cpp:886
bool isBufferOnDevice(const ChunkKey &key, const MemoryLevel memLevel, const int deviceId)
Definition: DataMgr.cpp:385
AbstractBuffer * getChunkBuffer(const ChunkKey &key, const MemoryLevel memoryLevel, const int deviceId=0, const size_t numBytes=0)
Definition: DataMgr.cpp:413
File_Namespace::GlobalFileMgr * getGlobalFileMgr() const
Definition: DataMgr.cpp:522
bool g_enable_fsi
Definition: Catalog.cpp:90
virtual int getDeviceId() const
void removeTableRelatedDS(const int db_id, const int tb_id)
Definition: DataMgr.cpp:497
#define CHECK(condition)
Definition: Logger.h:197
void copy(AbstractBuffer *destBuffer, AbstractBuffer *srcBuffer)
Definition: DataMgr.cpp:463
std::vector< int > ChunkKey
Definition: types.h:35
void populateMgrs(const SystemParameters &system_parameters, const size_t userSpecifiedNumReaderThreads)
Definition: DataMgr.cpp:156
std::vector< int32_t > chunk_key
Definition: DataMgr.h:56
AbstractBuffer * createChunkBuffer(const ChunkKey &key, const MemoryLevel memoryLevel, const int deviceId=0, const size_t page_size=0)
Definition: DataMgr.cpp:404
std::vector< std::pair< ChunkKey, std::shared_ptr< ChunkMetadata > >> ChunkMetadataVector
void free(AbstractBuffer *buffer)
Definition: DataMgr.cpp:457
#define VLOG(n)
Definition: Logger.h:291
void setTableEpoch(const int db_id, const int tb_id, const int start_epoch)
Definition: DataMgr.cpp:502
friend class GlobalFileMgr
Definition: DataMgr.h:100
AbstractBuffer * alloc(const MemoryLevel memoryLevel, const int deviceId, const size_t numBytes)
Definition: DataMgr.cpp:448
std::string dataDir_
Definition: DataMgr.h:175