OmniSciDB  72c90bc290
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
Buffer_Namespace::TieredCpuBufferMgr Class Reference

#include <TieredCpuBufferMgr.h>

+ Inheritance diagram for Buffer_Namespace::TieredCpuBufferMgr:
+ Collaboration diagram for Buffer_Namespace::TieredCpuBufferMgr:

Public Member Functions

 TieredCpuBufferMgr (const int device_id, const size_t total_size, CudaMgr_Namespace::CudaMgr *cuda_mgr, const size_t min_slab_size, const size_t max_slab_size, const size_t page_size, const CpuTierSizeVector &cpu_tier_sizes, AbstractBufferMgr *parent_mgr=nullptr)
 
 ~TieredCpuBufferMgr () override
 
std::vector< std::pair
< std::unique_ptr< Arena >
, size_t > > & 
getAllocators ()
 
MgrType getMgrType () override
 
std::string getStringMgrType () override
 
ArenagetAllocatorForSlab (int32_t slab_num) const
 
std::string dump () const
 
- Public Member Functions inherited from Buffer_Namespace::CpuBufferMgr
 CpuBufferMgr (const int device_id, const size_t max_buffer_pool_size, CudaMgr_Namespace::CudaMgr *cuda_mgr, const size_t min_slab_size, const size_t max_slab_size, const size_t page_size, AbstractBufferMgr *parent_mgr=nullptr)
 
 ~CpuBufferMgr () override
 
MgrType getMgrType () override
 
std::string getStringMgrType () override
 
void setAllocator (std::unique_ptr< DramArena > allocator)
 
- Public Member Functions inherited from Buffer_Namespace::BufferMgr
 BufferMgr (const int device_id, const size_t max_buffer_size, const size_t min_slab_size, const size_t max_slab_size, const size_t page_size, AbstractBufferMgr *parent_mgr=0)
 Constructs a BufferMgr object that allocates memSize bytes. More...
 
 ~BufferMgr () override
 Destructor. More...
 
std::string printSlab (size_t slab_num)
 
std::string printSlabs () override
 
void clearSlabs ()
 
std::string printMap ()
 
void printSegs ()
 
std::string printSeg (BufferList::iterator &seg_it)
 
size_t getInUseSize () override
 
size_t getMaxSize () override
 
size_t getAllocated () override
 
size_t getMaxBufferSize ()
 
size_t getMaxSlabSize ()
 
size_t getPageSize ()
 
bool isAllocationCapped () override
 
const std::vector< BufferList > & getSlabSegments ()
 
AbstractBuffercreateBuffer (const ChunkKey &key, const size_t page_size=0, const size_t initial_size=0) override
 Creates a chunk with the specified key and page size. More...
 
void deleteBuffer (const ChunkKey &key, const bool purge=true) override
 Deletes the chunk with the specified key. More...
 
void deleteBuffersWithPrefix (const ChunkKey &key_prefix, const bool purge=true) override
 
AbstractBuffergetBuffer (const ChunkKey &key, const size_t num_bytes=0) override
 Returns the a pointer to the chunk with the specified key. More...
 
bool isBufferOnDevice (const ChunkKey &key) override
 Puts the contents of d into the Buffer with ChunkKey key. More...
 
void fetchBuffer (const ChunkKey &key, AbstractBuffer *dest_buffer, const size_t num_bytes=0) override
 
AbstractBufferputBuffer (const ChunkKey &key, AbstractBuffer *d, const size_t num_bytes=0) override
 
void checkpoint () override
 
void checkpoint (const int db_id, const int tb_id) override
 
void removeTableRelatedDS (const int db_id, const int table_id) override
 
AbstractBufferalloc (const size_t num_bytes=0) override
 client is responsible for deleting memory allocated for b->mem_ More...
 
void free (AbstractBuffer *buffer) override
 
size_t size ()
 Returns the total number of bytes allocated. More...
 
size_t getNumChunks () override
 
BufferList::iterator reserveBuffer (BufferList::iterator &seg_it, const size_t num_bytes)
 
void getChunkMetadataVecForKeyPrefix (ChunkMetadataVector &chunk_metadata_vec, const ChunkKey &key_prefix) override
 

Private Member Functions

void addSlab (const size_t slab_size) override
 
void freeAllMem () override
 
void initializeMem () override
 

Private Attributes

std::vector< std::pair
< std::unique_ptr< Arena >
, size_t > > 
allocators_
 
std::map< int32_t, Arena * > slab_to_allocator_map_
 

Additional Inherited Members

- Protected Member Functions inherited from Buffer_Namespace::CpuBufferMgr
void allocateBuffer (BufferList::iterator segment_iter, const size_t page_size, const size_t initial_size) override
 
- Protected Attributes inherited from Buffer_Namespace::CpuBufferMgr
CudaMgr_Namespace::CudaMgrcuda_mgr_
 
- Protected Attributes inherited from Buffer_Namespace::BufferMgr
const size_t max_buffer_pool_size_
 
const size_t min_slab_size_
 max number of bytes allocated for the buffer pool More...
 
const size_t max_slab_size_
 
const size_t page_size_
 
std::vector< int8_t * > slabs_
 
std::vector< BufferListslab_segments_
 

Detailed Description

Definition at line 33 of file TieredCpuBufferMgr.h.

Constructor & Destructor Documentation

Buffer_Namespace::TieredCpuBufferMgr::TieredCpuBufferMgr ( const int  device_id,
const size_t  total_size,
CudaMgr_Namespace::CudaMgr cuda_mgr,
const size_t  min_slab_size,
const size_t  max_slab_size,
const size_t  page_size,
const CpuTierSizeVector cpu_tier_sizes,
AbstractBufferMgr parent_mgr = nullptr 
)

Definition at line 41 of file TieredCpuBufferMgr.cpp.

References allocators_, CHECK, Data_Namespace::DRAM, kArenaBlockOverhead, Buffer_Namespace::BufferMgr::max_slab_size_, Data_Namespace::numCpuTiers, and Data_Namespace::PMEM.

49  : CpuBufferMgr(device_id,
50  total_size,
51  cuda_mgr,
52  min_slab_size,
53  max_slab_size,
54  page_size,
55  parent_mgr) {
56  CHECK(cpu_tier_sizes.size() == numCpuTiers);
57  allocators_.emplace_back(
58  std::make_unique<DramArena>(max_slab_size_ + kArenaBlockOverhead),
59  cpu_tier_sizes[CpuTier::DRAM]);
60  allocators_.emplace_back(
61  std::make_unique<PMemArena>(max_slab_size_ + kArenaBlockOverhead),
62  cpu_tier_sizes[CpuTier::PMEM]);
63 }
constexpr size_t kArenaBlockOverhead
CpuBufferMgr(const int device_id, const size_t max_buffer_pool_size, CudaMgr_Namespace::CudaMgr *cuda_mgr, const size_t min_slab_size, const size_t max_slab_size, const size_t page_size, AbstractBufferMgr *parent_mgr=nullptr)
Definition: CpuBufferMgr.h:31
constexpr size_t numCpuTiers
std::vector< std::pair< std::unique_ptr< Arena >, size_t > > allocators_
#define CHECK(condition)
Definition: Logger.h:291
const size_t max_slab_size_
Definition: BufferMgr.h:175
Buffer_Namespace::TieredCpuBufferMgr::~TieredCpuBufferMgr ( )
inlineoverride

Definition at line 44 of file TieredCpuBufferMgr.h.

44  {
45  // The destruction of the allocators automatically frees all memory
46  }

Member Function Documentation

void Buffer_Namespace::TieredCpuBufferMgr::addSlab ( const size_t  slab_size)
overrideprivatevirtual

Reimplemented from Buffer_Namespace::CpuBufferMgr.

Definition at line 69 of file TieredCpuBufferMgr.cpp.

References allocators_, CHECK, Data_Namespace::DRAM, logger::INFO, LOG, Buffer_Namespace::BufferMgr::page_size_, Data_Namespace::PMEM, Buffer_Namespace::BufferMgr::slab_segments_, slab_to_allocator_map_, Buffer_Namespace::BufferMgr::slabs_, and anonymous_namespace{TieredCpuBufferMgr.cpp}::tier_to_string().

69  {
70  CHECK(!allocators_.empty());
71  CHECK(allocators_.begin()->first.get() != nullptr);
72  slabs_.resize(slabs_.size() + 1);
73  auto allocated_slab = false;
74  CpuTier last_tier;
75  for (auto allocator_type : {CpuTier::DRAM, CpuTier::PMEM}) {
76  last_tier = allocator_type;
77  auto& [allocator, allocator_limit] = allocators_.at(allocator_type);
78  // If there is no space in the current allocator then move to the next one.
79  if (allocator_limit >= allocator->bytesUsed() + slab_size) {
80  try {
81  slabs_.back() = reinterpret_cast<int8_t*>(allocator->allocate(slab_size));
82  } catch (std::bad_alloc&) {
83  // If anything goes wrong with an allocation, then throw an exception rather than
84  // go to the next allocator.
85  slabs_.resize(slabs_.size() - 1);
86  throw FailedToCreateSlab(slab_size);
87  }
88  slab_to_allocator_map_[slabs_.size() - 1] = allocator.get();
89  allocated_slab = true;
90  break;
91  }
92  }
93  if (allocated_slab) {
94  // We allocated a new slab, so add segments for it.
95  slab_segments_.resize(slab_segments_.size() + 1);
96  slab_segments_[slab_segments_.size() - 1].push_back(
97  BufferSeg(0, slab_size / page_size_));
98  LOG(INFO) << "Allocated slab using " << tier_to_string(last_tier) << ".";
99  } else {
100  // None of the allocators allocated a slab, so revert to original size and throw.
101  slabs_.resize(slabs_.size() - 1);
102  throw FailedToCreateSlab(slab_size);
103  }
104 }
#define LOG(tag)
Definition: Logger.h:285
std::vector< BufferList > slab_segments_
Definition: BufferMgr.h:180
std::map< int32_t, Arena * > slab_to_allocator_map_
std::vector< std::pair< std::unique_ptr< Arena >, size_t > > allocators_
#define CHECK(condition)
Definition: Logger.h:291
std::vector< int8_t * > slabs_
Definition: BufferMgr.h:178

+ Here is the call graph for this function:

std::string Buffer_Namespace::TieredCpuBufferMgr::dump ( ) const

Definition at line 120 of file TieredCpuBufferMgr.cpp.

References allocators_.

120  {
121  size_t allocator_num = 0;
122  std::stringstream ss;
123  ss << "TieredCpuBufferMgr:\n";
124  for (auto& [allocator, allocator_limit] : allocators_) {
125  ss << " allocator[" << allocator_num++ << "]\n limit = " << allocator_limit
126  << "\n used = " << allocator->bytesUsed() << "\n";
127  }
128  return ss.str();
129 }
std::vector< std::pair< std::unique_ptr< Arena >, size_t > > allocators_
void Buffer_Namespace::TieredCpuBufferMgr::freeAllMem ( )
overrideprivatevirtual

Reimplemented from Buffer_Namespace::CpuBufferMgr.

Definition at line 106 of file TieredCpuBufferMgr.cpp.

References allocators_, CHECK, and initializeMem().

106  {
107  CHECK(!allocators_.empty());
108  CHECK(allocators_.begin()->first.get() != nullptr);
109  initializeMem();
110 }
std::vector< std::pair< std::unique_ptr< Arena >, size_t > > allocators_
#define CHECK(condition)
Definition: Logger.h:291

+ Here is the call graph for this function:

Arena * Buffer_Namespace::TieredCpuBufferMgr::getAllocatorForSlab ( int32_t  slab_num) const

Definition at line 65 of file TieredCpuBufferMgr.cpp.

References shared::get_from_map(), and slab_to_allocator_map_.

65  {
67 }
std::map< int32_t, Arena * > slab_to_allocator_map_
V & get_from_map(std::map< K, V, comp > &map, const K &key)
Definition: misc.h:61

+ Here is the call graph for this function:

std::vector<std::pair<std::unique_ptr<Arena>, size_t> >& Buffer_Namespace::TieredCpuBufferMgr::getAllocators ( )
inline

Definition at line 49 of file TieredCpuBufferMgr.h.

References allocators_.

49  {
50  return allocators_;
51  }
std::vector< std::pair< std::unique_ptr< Arena >, size_t > > allocators_
MgrType Buffer_Namespace::TieredCpuBufferMgr::getMgrType ( )
inlineoverride

Definition at line 53 of file TieredCpuBufferMgr.h.

53 { return TIERED_CPU_MGR; }
std::string Buffer_Namespace::TieredCpuBufferMgr::getStringMgrType ( )
inlineoverride

Definition at line 54 of file TieredCpuBufferMgr.h.

54 { return ToString(TIERED_CPU_MGR); }
void Buffer_Namespace::TieredCpuBufferMgr::initializeMem ( )
overrideprivatevirtual

Reimplemented from Buffer_Namespace::CpuBufferMgr.

Definition at line 112 of file TieredCpuBufferMgr.cpp.

References allocators_, Data_Namespace::DRAM, kArenaBlockOverhead, Buffer_Namespace::BufferMgr::max_slab_size_, Data_Namespace::PMEM, and slab_to_allocator_map_.

Referenced by freeAllMem().

112  {
113  allocators_[CpuTier::DRAM].first =
114  std::make_unique<DramArena>(max_slab_size_ + kArenaBlockOverhead);
115  allocators_[CpuTier::PMEM].first =
116  std::make_unique<PMemArena>(max_slab_size_ + kArenaBlockOverhead);
117  slab_to_allocator_map_.clear();
118 }
constexpr size_t kArenaBlockOverhead
std::map< int32_t, Arena * > slab_to_allocator_map_
std::vector< std::pair< std::unique_ptr< Arena >, size_t > > allocators_
const size_t max_slab_size_
Definition: BufferMgr.h:175

+ Here is the caller graph for this function:

Member Data Documentation

std::vector<std::pair<std::unique_ptr<Arena>, size_t> > Buffer_Namespace::TieredCpuBufferMgr::allocators_
private
std::map<int32_t, Arena*> Buffer_Namespace::TieredCpuBufferMgr::slab_to_allocator_map_
private

Definition at line 69 of file TieredCpuBufferMgr.h.

Referenced by addSlab(), getAllocatorForSlab(), and initializeMem().


The documentation for this class was generated from the following files: