OmniSciDB  06b3bd477c
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
QueryMemoryInitializer.h
Go to the documentation of this file.
1 /*
2  * Copyright 2019 OmniSci, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #pragma once
18 
20 
22 #include "GpuMemUtils.h"
23 #include "ResultSet.h"
24 
26 
27 #include <memory>
28 
29 #ifdef HAVE_CUDA
30 #include <cuda.h>
31 #else
32 #include <Shared/nocuda.h>
33 #endif
34 
36  public:
37  // Row-based execution constructor
40  const int device_id,
41  const ExecutorDeviceType device_type,
42  const ExecutorDispatchMode dispatch_mode,
43  const bool output_columnar,
44  const bool sort_on_gpu,
45  const int64_t num_rows,
46  const std::vector<std::vector<const int8_t*>>& col_buffers,
47  const std::vector<std::vector<uint64_t>>& frag_offsets,
48  RenderAllocatorMap* render_allocator_map,
49  RenderInfo* render_info,
50  std::shared_ptr<RowSetMemoryOwner> row_set_mem_owner,
51  DeviceAllocator* gpu_allocator,
52  const Executor* executor);
53 
54  // Table functions execution constructor
56  const QueryMemoryDescriptor& query_mem_desc,
57  const int device_id,
58  const ExecutorDeviceType device_type,
59  const int64_t num_rows,
60  const std::vector<std::vector<const int8_t*>>& col_buffers,
61  const std::vector<std::vector<uint64_t>>& frag_offsets,
62  std::shared_ptr<RowSetMemoryOwner> row_set_mem_owner,
63  DeviceAllocator* device_allocator,
64  const Executor* executor);
65 
67 
69 
70  const auto getCountDistinctBitmapBytes() const {
72  }
73 
74  ResultSet* getResultSet(const size_t index) const {
75  CHECK_LT(index, result_sets_.size());
76  return result_sets_[index].get();
77  }
78 
79  std::unique_ptr<ResultSet> getResultSetOwned(const size_t index) {
80  CHECK_LT(index, result_sets_.size());
81  return std::move(result_sets_[index]);
82  }
83 
84  void resetResultSet(const size_t index) {
85  CHECK_LT(index, result_sets_.size());
86  result_sets_[index].reset();
87  }
88 
89  int64_t getAggInitValForIndex(const size_t index) const {
90  CHECK_LT(index, init_agg_vals_.size());
91  return init_agg_vals_[index];
92  }
93 
94  const auto getGroupByBuffersPtr() {
95  return reinterpret_cast<int64_t**>(group_by_buffers_.data());
96  }
97 
98  const auto getGroupByBuffersSize() const { return group_by_buffers_.size(); }
99 
100  const auto getNumBuffers() const {
102  return num_buffers_;
103  }
104 
105 #ifdef HAVE_CUDA
106  GpuGroupByBuffers setupTableFunctionGpuBuffers(
107  const QueryMemoryDescriptor& query_mem_desc,
108  const int device_id,
109  const unsigned block_size_x,
110  const unsigned grid_size_x);
111 #endif
112 
114  const QueryMemoryDescriptor& query_mem_desc,
115  const size_t entry_count,
116  const GpuGroupByBuffers& gpu_group_by_buffers,
117  const RelAlgExecutionUnit* ra_exe_unit,
118  const unsigned block_size_x,
119  const unsigned grid_size_x,
120  const int device_id,
121  const bool prepend_index_buffer) const;
122 
123  private:
124  void initGroupByBuffer(int64_t* buffer,
125  const RelAlgExecutionUnit& ra_exe_unit,
126  const QueryMemoryDescriptor& query_mem_desc,
127  const ExecutorDeviceType device_type,
128  const bool output_columnar,
129  const Executor* executor);
130 
131  void initGroups(const QueryMemoryDescriptor& query_mem_desc,
132  int64_t* groups_buffer,
133  const std::vector<int64_t>& init_vals,
134  const int32_t groups_buffer_entry_count,
135  const size_t warp_size,
136  const Executor* executor);
137 
138  void initColumnarGroups(const QueryMemoryDescriptor& query_mem_desc,
139  int64_t* groups_buffer,
140  const std::vector<int64_t>& init_vals,
141  const Executor* executor);
142 
143  void initColumnPerRow(const QueryMemoryDescriptor& query_mem_desc,
144  int8_t* row_ptr,
145  const size_t bin,
146  const std::vector<int64_t>& init_vals,
147  const std::vector<ssize_t>& bitmap_sizes);
148 
149  void allocateCountDistinctGpuMem(const QueryMemoryDescriptor& query_mem_desc);
150 
151  std::vector<ssize_t> allocateCountDistinctBuffers(
152  const QueryMemoryDescriptor& query_mem_desc,
153  const bool deferred,
154  const Executor* executor);
155 
156  int64_t allocateCountDistinctBitmap(const size_t bitmap_byte_sz);
157 
158  int64_t allocateCountDistinctSet();
159 
160 #ifdef HAVE_CUDA
161  GpuGroupByBuffers prepareTopNHeapsDevBuffer(const QueryMemoryDescriptor& query_mem_desc,
162  const CUdeviceptr init_agg_vals_dev_ptr,
163  const size_t n,
164  const int device_id,
165  const unsigned block_size_x,
166  const unsigned grid_size_x);
167 
168  GpuGroupByBuffers createAndInitializeGroupByBufferGpu(
169  const RelAlgExecutionUnit& ra_exe_unit,
170  const QueryMemoryDescriptor& query_mem_desc,
171  const CUdeviceptr init_agg_vals_dev_ptr,
172  const int device_id,
173  const ExecutorDispatchMode dispatch_mode,
174  const unsigned block_size_x,
175  const unsigned grid_size_x,
176  const int8_t warp_size,
177  const bool can_sort_on_gpu,
178  const bool output_columnar,
179  RenderAllocator* render_allocator);
180 #endif
181 
182  size_t computeNumberOfBuffers(const QueryMemoryDescriptor& query_mem_desc,
183  const ExecutorDeviceType device_type,
184  const Executor* executor) const;
185 
186  void compactProjectionBuffersCpu(const QueryMemoryDescriptor& query_mem_desc,
187  const size_t projection_count);
188  void compactProjectionBuffersGpu(const QueryMemoryDescriptor& query_mem_desc,
189  Data_Namespace::DataMgr* data_mgr,
190  const GpuGroupByBuffers& gpu_group_by_buffers,
191  const size_t projection_count,
192  const int device_id);
193 
194  void applyStreamingTopNOffsetCpu(const QueryMemoryDescriptor& query_mem_desc,
195  const RelAlgExecutionUnit& ra_exe_unit);
196 
198  const QueryMemoryDescriptor& query_mem_desc,
199  const GpuGroupByBuffers& gpu_group_by_buffers,
200  const RelAlgExecutionUnit& ra_exe_unit,
201  const unsigned total_thread_count,
202  const int device_id);
203 
204  const int64_t num_rows_;
205 
206  std::shared_ptr<RowSetMemoryOwner> row_set_mem_owner_;
207  std::vector<std::unique_ptr<ResultSet>> result_sets_;
208 
209  std::vector<int64_t> init_agg_vals_;
210 
211  const size_t num_buffers_;
212  std::vector<int64_t*> group_by_buffers_;
213 
218 
220  std::vector<Data_Namespace::AbstractBuffer*> temporary_buffers_;
221 
222  friend class Executor; // Accesses result_sets_
223  friend class QueryExecutionContext;
224 };
#define CHECK_EQ(x, y)
Definition: Logger.h:205
void initGroups(const QueryMemoryDescriptor &query_mem_desc, int64_t *groups_buffer, const std::vector< int64_t > &init_vals, const int32_t groups_buffer_entry_count, const size_t warp_size, const Executor *executor)
std::vector< ssize_t > allocateCountDistinctBuffers(const QueryMemoryDescriptor &query_mem_desc, const bool deferred, const Executor *executor)
const int32_t groups_buffer_size return groups_buffer
void resetResultSet(const size_t index)
const int8_t const int64_t * num_rows
DeviceAllocator * device_allocator_
ExecutorDeviceType
unsigned long long CUdeviceptr
Definition: nocuda.h:27
const int64_t const uint32_t groups_buffer_entry_count
size_t computeNumberOfBuffers(const QueryMemoryDescriptor &query_mem_desc, const ExecutorDeviceType device_type, const Executor *executor) const
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
ExecutorDispatchMode
void compactProjectionBuffersGpu(const QueryMemoryDescriptor &query_mem_desc, Data_Namespace::DataMgr *data_mgr, const GpuGroupByBuffers &gpu_group_by_buffers, const size_t projection_count, const int device_id)
std::vector< int64_t > init_agg_vals_
std::unique_ptr< ResultSet > getResultSetOwned(const size_t index)
void applyStreamingTopNOffsetCpu(const QueryMemoryDescriptor &query_mem_desc, const RelAlgExecutionUnit &ra_exe_unit)
int64_t getAggInitValForIndex(const size_t index) const
const auto getCountDistinctBitmapPtr() const
void initGroupByBuffer(int64_t *buffer, const RelAlgExecutionUnit &ra_exe_unit, const QueryMemoryDescriptor &query_mem_desc, const ExecutorDeviceType device_type, const bool output_columnar, const Executor *executor)
void compactProjectionBuffersCpu(const QueryMemoryDescriptor &query_mem_desc, const size_t projection_count)
std::vector< int64_t * > group_by_buffers_
void initColumnarGroups(const QueryMemoryDescriptor &query_mem_desc, int64_t *groups_buffer, const std::vector< int64_t > &init_vals, const Executor *executor)
#define CHECK_LT(x, y)
Definition: Logger.h:207
const auto getNumBuffers() const
Abstract class for managing device memory allocations.
const auto getCountDistinctBitmapBytes() const
Descriptor for the result set buffer layout.
void copyGroupByBuffersFromGpu(Data_Namespace::DataMgr *data_mgr, const QueryMemoryDescriptor &query_mem_desc, const size_t entry_count, const GpuGroupByBuffers &gpu_group_by_buffers, const RelAlgExecutionUnit *ra_exe_unit, const unsigned block_size_x, const unsigned grid_size_x, const int device_id, const bool prepend_index_buffer) const
std::vector< Data_Namespace::AbstractBuffer * > temporary_buffers_
QueryMemoryInitializer(const RelAlgExecutionUnit &ra_exe_unit, const QueryMemoryDescriptor &query_mem_desc, const int device_id, const ExecutorDeviceType device_type, const ExecutorDispatchMode dispatch_mode, const bool output_columnar, const bool sort_on_gpu, const int64_t num_rows, const std::vector< std::vector< const int8_t * >> &col_buffers, const std::vector< std::vector< uint64_t >> &frag_offsets, RenderAllocatorMap *render_allocator_map, RenderInfo *render_info, std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner, DeviceAllocator *gpu_allocator, const Executor *executor)
void applyStreamingTopNOffsetGpu(Data_Namespace::DataMgr *data_mgr, const QueryMemoryDescriptor &query_mem_desc, const GpuGroupByBuffers &gpu_group_by_buffers, const RelAlgExecutionUnit &ra_exe_unit, const unsigned total_thread_count, const int device_id)
const auto getGroupByBuffersSize() const
Basic constructors and methods of the row set interface.
void sort_on_gpu(int64_t *val_buff, int32_t *idx_buff, const uint64_t entry_count, const bool desc, const uint32_t chosen_bytes, ThrustAllocator &alloc)
const int64_t * init_vals
ResultSet * getResultSet(const size_t index) const
std::vector< std::unique_ptr< ResultSet > > result_sets_
void allocateCountDistinctGpuMem(const QueryMemoryDescriptor &query_mem_desc)
int64_t allocateCountDistinctBitmap(const size_t bitmap_byte_sz)
const auto getCountDistinctHostPtr() const
void initColumnPerRow(const QueryMemoryDescriptor &query_mem_desc, int8_t *row_ptr, const size_t bin, const std::vector< int64_t > &init_vals, const std::vector< ssize_t > &bitmap_sizes)