OmniSciDB  72c90bc290
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
GpuCudaBufferMgr.cpp
Go to the documentation of this file.
1 /*
2  * Copyright 2022 HEAVY.AI, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
18 
19 #include "CudaMgr/CudaMgr.h"
21 #include "Logger/Logger.h"
22 
23 namespace Buffer_Namespace {
24 
26  const size_t max_buffer_pool_size,
28  const size_t min_slab_size,
29  const size_t max_slab_size,
30  const size_t page_size,
31  AbstractBufferMgr* parent_mgr)
32  : BufferMgr(device_id,
33  max_buffer_pool_size,
34  min_slab_size,
35  max_slab_size,
36  page_size,
37  parent_mgr)
38  , cuda_mgr_(cuda_mgr) {}
39 
41  try {
43  freeAllMem();
44 #ifdef HAVE_CUDA
45  } catch (const CudaMgr_Namespace::CudaErrorException& e) {
46  if (e.getStatus() == CUDA_ERROR_DEINITIALIZED) {
47  // TODO(adb / asuhan): Verify cuModuleUnload removes the context
48  return;
49  }
50 #endif
51  } catch (const std::runtime_error& e) {
52  LOG(ERROR) << "CUDA Error: " << e.what();
53  }
54 }
55 
56 void GpuCudaBufferMgr::addSlab(const size_t slab_size) {
57  slabs_.resize(slabs_.size() + 1);
58  try {
59  slabs_.back() =
60  cuda_mgr_->allocateDeviceMem(slab_size, device_id_, /* is_slab */ true);
61  } catch (std::runtime_error& error) {
62  slabs_.resize(slabs_.size() - 1);
63  throw FailedToCreateSlab(slab_size);
64  }
65  slab_segments_.resize(slab_segments_.size() + 1);
66  slab_segments_[slab_segments_.size() - 1].push_back(
67  BufferSeg(0, slab_size / page_size_));
68 }
69 
71  for (auto buf_it = slabs_.begin(); buf_it != slabs_.end(); ++buf_it) {
72  cuda_mgr_->freeDeviceMem(*buf_it);
73  }
74 }
75 
76 void GpuCudaBufferMgr::allocateBuffer(BufferList::iterator seg_it,
77  const size_t page_size,
78  const size_t initial_size) {
79  new GpuCudaBuffer(this,
80  seg_it,
81  device_id_,
82  cuda_mgr_,
83  page_size,
84  initial_size); // this line is admittedly a bit weird
85  // but the segment iterator passed into
86  // buffer takes the address of the new
87  // Buffer in its buffer member
88 }
89 
90 } // namespace Buffer_Namespace
#define LOG(tag)
Definition: Logger.h:285
std::vector< BufferList > slab_segments_
Definition: BufferMgr.h:180
GpuCudaBufferMgr(const int device_id, const size_t max_buffer_pool_size, CudaMgr_Namespace::CudaMgr *cuda_mgr, const size_t min_slab_size, const size_t max_slab_size, const size_t page_size, AbstractBufferMgr *parent_mgr=0)
Note(s): Forbid Copying Idiom 4.1.
Definition: BufferMgr.h:96
void addSlab(const size_t slab_size) override
void freeDeviceMem(int8_t *device_ptr)
Definition: CudaMgr.cpp:392
void allocateBuffer(BufferList::iterator seg_it, const size_t page_size, const size_t initial_size) override
CudaMgr_Namespace::CudaMgr * cuda_mgr_
virtual int8_t * allocateDeviceMem(const size_t num_bytes, const int device_num, const bool is_slab=false)
Definition: CudaMgr.cpp:333
void synchronizeDevices() const
Definition: CudaMgr.cpp:120
std::vector< int8_t * > slabs_
Definition: BufferMgr.h:178