OmniSciDB  72c90bc290
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
TieredCpuBufferMgr.cpp
Go to the documentation of this file.
1 /*
2  * Copyright 2022 HEAVY.AI, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
18 #include "CudaMgr/CudaMgr.h"
22 #include "Shared/misc.h"
23 
24 #include <iostream>
25 
26 namespace {
27 std::string tier_to_string(CpuTier tier) {
28  switch (tier) {
29  case DRAM:
30  return "DRAM";
31  case PMEM:
32  return "PMEM";
33  default:
34  return "<UNKNOWN>";
35  }
36 }
37 } // namespace
38 
39 namespace Buffer_Namespace {
40 
42  const size_t total_size,
44  const size_t min_slab_size,
45  const size_t max_slab_size,
46  const size_t page_size,
47  const CpuTierSizeVector& cpu_tier_sizes,
48  AbstractBufferMgr* parent_mgr)
49  : CpuBufferMgr(device_id,
50  total_size,
51  cuda_mgr,
52  min_slab_size,
53  max_slab_size,
54  page_size,
55  parent_mgr) {
56  CHECK(cpu_tier_sizes.size() == numCpuTiers);
57  allocators_.emplace_back(
58  std::make_unique<DramArena>(max_slab_size_ + kArenaBlockOverhead),
59  cpu_tier_sizes[CpuTier::DRAM]);
60  allocators_.emplace_back(
61  std::make_unique<PMemArena>(max_slab_size_ + kArenaBlockOverhead),
62  cpu_tier_sizes[CpuTier::PMEM]);
63 }
64 
67 }
68 
69 void TieredCpuBufferMgr::addSlab(const size_t slab_size) {
70  CHECK(!allocators_.empty());
71  CHECK(allocators_.begin()->first.get() != nullptr);
72  slabs_.resize(slabs_.size() + 1);
73  auto allocated_slab = false;
74  CpuTier last_tier;
75  for (auto allocator_type : {CpuTier::DRAM, CpuTier::PMEM}) {
76  last_tier = allocator_type;
77  auto& [allocator, allocator_limit] = allocators_.at(allocator_type);
78  // If there is no space in the current allocator then move to the next one.
79  if (allocator_limit >= allocator->bytesUsed() + slab_size) {
80  try {
81  slabs_.back() = reinterpret_cast<int8_t*>(allocator->allocate(slab_size));
82  } catch (std::bad_alloc&) {
83  // If anything goes wrong with an allocation, then throw an exception rather than
84  // go to the next allocator.
85  slabs_.resize(slabs_.size() - 1);
86  throw FailedToCreateSlab(slab_size);
87  }
88  slab_to_allocator_map_[slabs_.size() - 1] = allocator.get();
89  allocated_slab = true;
90  break;
91  }
92  }
93  if (allocated_slab) {
94  // We allocated a new slab, so add segments for it.
95  slab_segments_.resize(slab_segments_.size() + 1);
96  slab_segments_[slab_segments_.size() - 1].push_back(
97  BufferSeg(0, slab_size / page_size_));
98  LOG(INFO) << "Allocated slab using " << tier_to_string(last_tier) << ".";
99  } else {
100  // None of the allocators allocated a slab, so revert to original size and throw.
101  slabs_.resize(slabs_.size() - 1);
102  throw FailedToCreateSlab(slab_size);
103  }
104 }
105 
107  CHECK(!allocators_.empty());
108  CHECK(allocators_.begin()->first.get() != nullptr);
109  initializeMem();
110 }
111 
113  allocators_[CpuTier::DRAM].first =
114  std::make_unique<DramArena>(max_slab_size_ + kArenaBlockOverhead);
115  allocators_[CpuTier::PMEM].first =
116  std::make_unique<PMemArena>(max_slab_size_ + kArenaBlockOverhead);
117  slab_to_allocator_map_.clear();
118 }
119 
120 std::string TieredCpuBufferMgr::dump() const {
121  size_t allocator_num = 0;
122  std::stringstream ss;
123  ss << "TieredCpuBufferMgr:\n";
124  for (auto& [allocator, allocator_limit] : allocators_) {
125  ss << " allocator[" << allocator_num++ << "]\n limit = " << allocator_limit
126  << "\n used = " << allocator->bytesUsed() << "\n";
127  }
128  return ss.str();
129 }
130 
131 } // namespace Buffer_Namespace
constexpr size_t kArenaBlockOverhead
#define LOG(tag)
Definition: Logger.h:285
std::vector< BufferList > slab_segments_
Definition: BufferMgr.h:180
constexpr size_t numCpuTiers
std::map< int32_t, Arena * > slab_to_allocator_map_
std::vector< std::pair< std::unique_ptr< Arena >, size_t > > allocators_
TieredCpuBufferMgr(const int device_id, const size_t total_size, CudaMgr_Namespace::CudaMgr *cuda_mgr, const size_t min_slab_size, const size_t max_slab_size, const size_t page_size, const CpuTierSizeVector &cpu_tier_sizes, AbstractBufferMgr *parent_mgr=nullptr)
V & get_from_map(std::map< K, V, comp > &map, const K &key)
Definition: misc.h:61
void addSlab(const size_t slab_size) override
#define CHECK(condition)
Definition: Logger.h:291
const size_t max_slab_size_
Definition: BufferMgr.h:175
Arena * getAllocatorForSlab(int32_t slab_num) const
std::vector< int8_t * > slabs_
Definition: BufferMgr.h:178
std::vector< size_t > CpuTierSizeVector