31 #include <sys/sysctl.h>
32 #include <sys/types.h>
35 #include <boost/filesystem.hpp>
42 namespace Data_Namespace {
46 std::unique_ptr<CudaMgr_Namespace::CudaMgr> cudaMgr,
48 const size_t reservedGpuMem,
49 const size_t numReaderThreads,
51 : cudaMgr_{std::move(cudaMgr)}
54 , reservedGpuMem_{reservedGpuMem} {
59 LOG(
ERROR) <<
"CudaMgr instance is invalid, falling back to CPU-only mode.";
72 populateMgrs(system_parameters, numReaderThreads, cache_config);
73 createTopLevelMetadata();
78 for (
int level = numLevels - 1; level >= 0; --level) {
79 for (
size_t device = 0; device <
bufferMgrs_[level].size(); device++) {
101 usage.
free = mi[
"MemAvailable"];
102 usage.
total = mi[
"MemTotal"];
109 int64_t resident = 0;
112 std::ifstream fstatm(
"/proc/self/statm");
113 fstatm >> size >> resident >> shared;
117 sysconf(_SC_PAGE_SIZE);
119 usage.
resident = resident * page_size;
120 usage.
vtotal = size * page_size;
121 usage.
regular = (resident - shared) * page_size;
122 usage.
shared = shared * page_size;
145 size_t physical_memory;
150 length =
sizeof(size_t);
151 sysctl(mib, 2, &physical_memory, &length, NULL, 0);
152 return physical_memory;
153 #elif defined(_MSC_VER)
154 MEMORYSTATUSEX status;
155 status.dwLength =
sizeof(status);
156 GlobalMemoryStatusEx(&status);
157 return status.ullTotalPhys;
159 long pages = sysconf(_SC_PHYS_PAGES);
160 long page_size = sysconf(_SC_PAGE_SIZE);
161 return pages * page_size;
167 const size_t num_reader_threads,
170 for (
int level = numLevels - 1; level >= 0; --level) {
171 for (
size_t device = 0; device <
bufferMgrs_[level].size(); device++) {
176 populateMgrs(sys_params, num_reader_threads, cache_config);
181 const size_t userSpecifiedNumReaderThreads,
186 dataDir_, userSpecifiedNumReaderThreads, cache_config));
189 size_t page_size{512};
191 if (cpuBufferSize == 0) {
193 VLOG(1) <<
"Detected " << (float)total_system_memory / (1024 * 1024)
194 <<
"M of total system memory.";
195 cpuBufferSize = total_system_memory *
198 size_t minCpuSlabSize = std::min(system_parameters.
min_cpu_slab_size, cpuBufferSize);
199 minCpuSlabSize = (minCpuSlabSize / page_size) * page_size;
200 size_t maxCpuSlabSize = std::min(system_parameters.
max_cpu_slab_size, cpuBufferSize);
201 maxCpuSlabSize = (maxCpuSlabSize / page_size) * page_size;
202 LOG(
INFO) <<
"Min CPU Slab Size is " << (float)minCpuSlabSize / (1024 * 1024) <<
"MB";
203 LOG(
INFO) <<
"Max CPU Slab Size is " << (float)maxCpuSlabSize / (1024 * 1024) <<
"MB";
204 LOG(
INFO) <<
"Max memory pool size for CPU is " << (float)cpuBufferSize / (1024 * 1024)
208 <<
"MB includes render buffer allocation";
218 int numGpus =
cudaMgr_->getDeviceCount();
219 for (
int gpuNum = 0; gpuNum < numGpus; ++gpuNum) {
220 size_t gpuMaxMemSize =
224 size_t minGpuSlabSize =
226 minGpuSlabSize = (minGpuSlabSize / page_size) * page_size;
227 size_t maxGpuSlabSize =
229 maxGpuSlabSize = (maxGpuSlabSize / page_size) * page_size;
230 LOG(
INFO) <<
"Min GPU Slab size for GPU " << gpuNum <<
" is "
231 << (float)minGpuSlabSize / (1024 * 1024) <<
"MB";
232 LOG(
INFO) <<
"Max GPU Slab size for GPU " << gpuNum <<
" is "
233 << (float)maxGpuSlabSize / (1024 * 1024) <<
"MB";
234 LOG(
INFO) <<
"Max memory pool size for GPU " << gpuNum <<
" is "
235 << (float)gpuMaxMemSize / (1024 * 1024) <<
"MB";
261 std::string mapdDataPath(basePath +
"/../mapd_data/");
262 boost::filesystem::path path(mapdDataPath);
263 if (boost::filesystem::exists(path)) {
264 if (!boost::filesystem::is_directory(path)) {
265 LOG(
FATAL) <<
"Path to directory mapd_data to convert DB is not a directory.";
268 LOG(
FATAL) <<
"Path to directory mapd_data to convert DB does not exist.";
275 size_t defaultPageSize = gfm->getDefaultPageSize();
276 LOG(
INFO) <<
"Database conversion started.";
285 LOG(
INFO) <<
"Database conversion completed.";
298 auto fm_top = gfm->getFileMgr(chunkKey);
299 if (dynamic_cast<File_Namespace::FileMgr*>(fm_top)) {
307 std::vector<MemoryInfo> mem_info;
321 for (
size_t slab_num = 0; slab_num < slab_segments.size(); ++slab_num) {
322 for (
auto segment : slab_segments[slab_num]) {
327 md.
touch = segment.last_touched;
330 md.
chunk_key.end(), segment.chunk_key.begin(), segment.chunk_key.end());
334 mem_info.push_back(mi);
336 int numGpus =
cudaMgr_->getDeviceCount();
337 for (
int gpuNum = 0; gpuNum < numGpus; ++gpuNum) {
350 for (
size_t slab_num = 0; slab_num < slab_segments.size(); ++slab_num) {
351 for (
auto segment : slab_segments[slab_num]) {
356 md.
touch = segment.last_touched;
358 md.
chunk_key.end(), segment.chunk_key.begin(), segment.chunk_key.end());
363 mem_info.push_back(mi);
374 int numGpus =
cudaMgr_->getDeviceCount();
375 std::ostringstream tss;
376 for (
int gpuNum = 0; gpuNum < numGpus; ++gpuNum) {
377 tss <<
bufferMgrs_[memLevel][gpuNum]->printSlabs();
391 int numGpus =
cudaMgr_->getDeviceCount();
392 for (
int gpuNum = 0; gpuNum < numGpus; ++gpuNum) {
393 LOG(
INFO) <<
"clear slabs on gpu " << gpuNum;
397 LOG(
WARNING) <<
"Unable to clear GPU memory: No GPUs detected";
406 const int deviceId) {
408 return bufferMgrs_[memLevel][deviceId]->isBufferOnDevice(key);
413 bufferMgrs_[0][0]->getChunkMetadataVecForKeyPrefix(chunkMetadataVec, keyPrefix);
419 const size_t page_size) {
421 int level =
static_cast<int>(memoryLevel);
422 return bufferMgrs_[level][deviceId]->createBuffer(key, page_size);
428 const size_t numBytes) {
430 const auto level =
static_cast<size_t>(memoryLevel);
433 return bufferMgrs_[level][deviceId]->getBuffer(key, numBytes);
440 for (
int level = numLevels - 1; level >= 0; --level) {
441 for (
int device = 0; device <
levelSizes_[level]; ++device) {
442 bufferMgrs_[level][device]->deleteBuffersWithPrefix(keyPrefix);
455 for (
int device = 0; device <
levelSizes_[memLevel]; ++device) {
456 bufferMgrs_[memLevel][device]->deleteBuffersWithPrefix(keyPrefix);
462 const size_t numBytes) {
464 const auto level =
static_cast<int>(memoryLevel);
466 return bufferMgrs_[level][deviceId]->alloc(numBytes);
471 int level =
static_cast<int>(buffer->
getType());
493 for (
auto deviceIt = levelIt->begin(); deviceIt != levelIt->end(); ++deviceIt) {
494 (*deviceIt)->checkpoint(db_id, tb_id);
503 for (
auto deviceIt = levelIt->begin(); deviceIt != levelIt->end(); ++deviceIt) {
504 (*deviceIt)->checkpoint();
511 bufferMgrs_[0][0]->removeTableRelatedDS(db_id, tb_id);
518 gfm->setTableEpoch(db_id, tb_id, start_epoch);
525 return gfm->getTableEpoch(db_id, tb_id);
532 CHECK(global_file_mgr);
533 return global_file_mgr;
539 os <<
" \"name\": \"CPU Memory Info\",";
540 os <<
" \"TotalMB\": " << mem_info.
total / (1024. * 1024.) <<
",";
541 os <<
" \"FreeMB\": " << mem_info.
free / (1024. * 1024.) <<
",";
542 os <<
" \"ProcessMB\": " << mem_info.
resident / (1024. * 1024.) <<
",";
543 os <<
" \"VirtualMB\": " << mem_info.
vtotal / (1024. * 1024.) <<
",";
544 os <<
" \"ProcessPlusSwapMB\": " << mem_info.
regular / (1024. * 1024.) <<
",";
545 os <<
" \"ProcessSharedMB\": " << mem_info.
shared / (1024. * 1024.) <<
",";
546 os <<
" \"FragmentationPercent\": " << mem_info.
frag;
size_t getAllocated() override
std::mutex buffer_access_mutex_
std::vector< int > ChunkKey
std::vector< MemoryData > nodeMemoryData
Buffer_Namespace::MemStatus memStatus
size_t getMaxSize() override
std::vector< std::vector< AbstractBufferMgr * > > bufferMgrs_
std::vector< int > levelSizes_
std::ostream & operator<<(std::ostream &os, const DataMgr::SystemMemoryUsage &mem_info)
SystemMemoryUsage getSystemMemoryUsage() const
void populateMgrs(const SystemParameters &system_parameters, const size_t userSpecifiedNumReaderThreads, const DiskCacheConfig &cache_config)
DataMgr(const std::string &dataDir, const SystemParameters &system_parameters, std::unique_ptr< CudaMgr_Namespace::CudaMgr > cudaMgr, const bool useGpus, const size_t reservedGpuMem=(1<< 27), const size_t numReaderThreads=0, const DiskCacheConfig cacheConfig=DiskCacheConfig())
size_t cpu_buffer_mem_bytes
PersistentStorageMgr * getPersistentStorageMgr() const
virtual int8_t * getMemoryPtr()=0
virtual MemoryLevel getType() const =0
void clearMemory(const MemoryLevel memLevel)
std::string dumpLevel(const MemoryLevel memLevel)
void convertDB(const std::string basePath)
static size_t getTotalSystemMemory()
size_t getTableEpoch(const int db_id, const int tb_id)
void createTopLevelMetadata() const
bool isAllocationCapped() override
std::unique_ptr< CudaMgr_Namespace::CudaMgr > cudaMgr_
void getChunkMetadataVecForKeyPrefix(ChunkMetadataVector &chunkMetadataVec, const ChunkKey &keyPrefix)
An AbstractBuffer is a unit of data management for a data manager.
virtual void write(int8_t *src, const size_t num_bytes, const size_t offset=0, const MemoryLevel src_buffer_type=CPU_LEVEL, const int src_device_id=-1)=0
std::vector< MemoryInfo > getMemoryInfo(const MemoryLevel memLevel)
File_Namespace::GlobalFileMgr * getGlobalFileMgr() const
Parse /proc/meminfo into key/value pairs.
void deleteChunksWithPrefix(const ChunkKey &keyPrefix)
static PersistentStorageMgr * createPersistentStorageMgr(const std::string &data_dir, const size_t num_reader_threads, const DiskCacheConfig &disk_cache_config)
const std::vector< BufferList > & getSlabSegments()
bool isBufferOnDevice(const ChunkKey &key, const MemoryLevel memLevel, const int deviceId)
AbstractBuffer * getChunkBuffer(const ChunkKey &key, const MemoryLevel memoryLevel, const int deviceId=0, const size_t numBytes=0)
void removeTableRelatedDS(const int db_id, const int tb_id)
void resetPersistentStorage(const DiskCacheConfig &cache_config, const size_t num_reader_threads, const SystemParameters &sys_params)
void copy(AbstractBuffer *destBuffer, AbstractBuffer *srcBuffer)
size_t gpu_buffer_mem_bytes
std::vector< int32_t > chunk_key
AbstractBuffer * createChunkBuffer(const ChunkKey &key, const MemoryLevel memoryLevel, const int deviceId=0, const size_t page_size=0)
void free(AbstractBuffer *buffer)
auto getFragmentationPercent()
Parse /proc/buddyinfo into a Fragmentation health score.
void setTableEpoch(const int db_id, const int tb_id, const int start_epoch)
AbstractBuffer * alloc(const MemoryLevel memoryLevel, const int deviceId, const size_t numBytes)