OmniSciDB  72c90bc290
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
GroupByAndAggregate.h
Go to the documentation of this file.
1 /*
2  * Copyright 2022 HEAVY.AI, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef QUERYENGINE_GROUPBYANDAGGREGATE_H
18 #define QUERYENGINE_GROUPBYANDAGGREGATE_H
19 
20 #include "BufferCompaction.h"
21 #include "ColumnarResults.h"
22 #include "CompilationOptions.h"
23 #include "GpuMemUtils.h"
24 #include "GpuSharedMemoryContext.h"
25 #include "InputMetadata.h"
26 #include "QueryExecutionContext.h"
27 #include "Rendering/RenderInfo.h"
28 #include "RuntimeFunctions.h"
29 
31 
32 #include "../Shared/sqltypes.h"
33 #include "Logger/Logger.h"
34 
35 #include <llvm/IR/Function.h>
36 #include <llvm/IR/Instructions.h>
37 #include <llvm/IR/Value.h>
38 #include <boost/algorithm/string/join.hpp>
39 #include <boost/make_unique.hpp>
40 
41 #include <stack>
42 #include <vector>
43 
44 extern bool g_enable_smem_group_by;
45 extern bool g_bigint_count;
46 
47 struct ColRangeInfo {
49  int64_t min;
50  int64_t max;
51  int64_t bucket;
52  bool has_nulls;
53  bool isEmpty() const;
54 };
55 
56 struct KeylessInfo {
57  const bool keyless;
58  const int32_t target_index;
59 };
60 
62  public:
63  GroupByAndAggregate(Executor* executor,
64  const ExecutorDeviceType device_type,
65  const RelAlgExecutionUnit& ra_exe_unit,
66  const std::vector<InputTableInfo>& query_infos,
67  std::shared_ptr<RowSetMemoryOwner> row_set_mem_owner,
68  const std::optional<int64_t>& group_cardinality_estimation);
69 
70  // returns true iff checking the error code after every row
71  // is required -- slow path group by queries for now
72  bool codegen(llvm::Value* filter_result,
73  llvm::BasicBlock* sc_false,
75  const CompilationOptions& co,
76  const GpuSharedMemoryContext& gpu_smem_context);
77 
78  static size_t shard_count_for_top_groups(const RelAlgExecutionUnit& ra_exe_unit);
79 
80  private:
81  bool gpuCanHandleOrderEntries(const std::list<Analyzer::OrderEntry>& order_entries);
82 
83  std::unique_ptr<QueryMemoryDescriptor> initQueryMemoryDescriptor(
84  const bool allow_multifrag,
85  const size_t max_groups_buffer_entry_count,
86  const int8_t crt_min_byte_width,
87  RenderInfo* render_info,
88  const bool output_columnar_hint);
89 
90  std::unique_ptr<QueryMemoryDescriptor> initQueryMemoryDescriptorImpl(
91  const bool allow_multifrag,
92  const size_t max_groups_buffer_entry_count,
93  const int8_t crt_min_byte_width,
94  const bool sort_on_gpu_hint,
95  RenderInfo* render_info,
96  const bool must_use_baseline_sort,
97  const bool output_columnar_hint);
98 
99  int64_t getShardedTopBucket(const ColRangeInfo& col_range_info,
100  const size_t shard_count) const;
101 
102  llvm::Value* codegenOutputSlot(llvm::Value* groups_buffer,
103  const QueryMemoryDescriptor& query_mem_desc,
104  const CompilationOptions& co,
105  DiamondCodegen& diamond_codegen);
106 
107  std::tuple<llvm::Value*, llvm::Value*> codegenGroupBy(
108  const QueryMemoryDescriptor& query_mem_desc,
109  const CompilationOptions& co,
110  DiamondCodegen& codegen);
111 
112  llvm::Value* codegenVarlenOutputBuffer(const QueryMemoryDescriptor& query_mem_desc);
113 
114  std::tuple<llvm::Value*, llvm::Value*> codegenSingleColumnPerfectHash(
115  const QueryMemoryDescriptor& query_mem_desc,
116  const CompilationOptions& co,
117  llvm::Value* groups_buffer,
118  llvm::Value* group_expr_lv_translated,
119  llvm::Value* group_expr_lv_original,
120  const int32_t row_size_quad);
121 
122  std::tuple<llvm::Value*, llvm::Value*> codegenMultiColumnPerfectHash(
123  llvm::Value* groups_buffer,
124  llvm::Value* group_key,
125  llvm::Value* key_size_lv,
126  const QueryMemoryDescriptor& query_mem_desc,
127  const int32_t row_size_quad);
128  llvm::Function* codegenPerfectHashFunction();
129 
130  std::tuple<llvm::Value*, llvm::Value*> codegenMultiColumnBaselineHash(
131  const CompilationOptions& co,
132  llvm::Value* groups_buffer,
133  llvm::Value* group_key,
134  llvm::Value* key_size_lv,
135  const QueryMemoryDescriptor& query_mem_desc,
136  const size_t key_width,
137  const int32_t row_size_quad);
138 
140 
141  static int64_t getBucketedCardinality(const ColRangeInfo& col_range_info);
142 
143  llvm::Value* convertNullIfAny(const SQLTypeInfo& arg_type,
144  const TargetInfo& agg_info,
145  llvm::Value* target);
146 
147  bool codegenAggCalls(const std::tuple<llvm::Value*, llvm::Value*>& agg_out_ptr_w_idx,
148  llvm::Value* varlen_output_buffer,
149  const std::vector<llvm::Value*>& agg_out_vec,
150  QueryMemoryDescriptor& query_mem_desc,
151  const CompilationOptions& co,
152  const GpuSharedMemoryContext& gpu_smem_context,
153  DiamondCodegen& diamond_codegen);
154 
155  llvm::Value* codegenWindowRowPointer(const Analyzer::WindowFunction* window_func,
156  const QueryMemoryDescriptor& query_mem_desc,
157  const CompilationOptions& co,
158  DiamondCodegen& diamond_codegen);
159 
160  llvm::Value* codegenAggColumnPtr(
161  llvm::Value* output_buffer_byte_stream,
162  llvm::Value* out_row_idx,
163  const std::tuple<llvm::Value*, llvm::Value*>& agg_out_ptr_w_idx,
164  const QueryMemoryDescriptor& query_mem_desc,
165  const size_t chosen_bytes,
166  const size_t agg_out_off,
167  const size_t target_idx);
168 
169  void codegenEstimator(std::stack<llvm::BasicBlock*>& array_loops,
170  DiamondCodegen& diamond_codegen,
171  const QueryMemoryDescriptor& query_mem_desc,
172  const CompilationOptions&);
173 
174  void codegenCountDistinct(const size_t target_idx,
175  const Analyzer::Expr* target_expr,
176  std::vector<llvm::Value*>& agg_args,
177  const QueryMemoryDescriptor&,
178  const ExecutorDeviceType);
179 
180  void codegenApproxQuantile(const size_t target_idx,
181  const Analyzer::Expr* target_expr,
182  std::vector<llvm::Value*>& agg_args,
183  const QueryMemoryDescriptor& query_mem_desc,
184  const ExecutorDeviceType device_type);
185 
186  void codegenMode(const size_t target_idx,
187  const Analyzer::Expr* target_expr,
188  std::vector<llvm::Value*>& agg_args,
189  const QueryMemoryDescriptor& query_mem_desc,
190  const ExecutorDeviceType device_type);
191 
192  llvm::Value* getAdditionalLiteral(const int32_t off);
193 
194  std::vector<llvm::Value*> codegenAggArg(const Analyzer::Expr* target_expr,
195  const CompilationOptions& co);
196 
197  llvm::Value* emitCall(const std::string& fname, const std::vector<llvm::Value*>& args);
198 
199  void checkErrorCode(llvm::Value* retCode);
200 
201  bool needsUnnestDoublePatch(llvm::Value const* val_ptr,
202  const std::string& agg_base_name,
203  const bool threads_share_memory,
204  const CompilationOptions& co) const;
205 
206  void prependForceSync();
207 
210  const std::vector<InputTableInfo>& query_infos_;
211  std::shared_ptr<RowSetMemoryOwner> row_set_mem_owner_;
214 
215  const std::optional<int64_t> group_cardinality_estimation_;
216 
217  friend class Executor;
218  friend class QueryMemoryDescriptor;
219  friend class CodeGenerator;
220  friend class ExecutionKernel;
221  friend struct TargetExprCodegen;
223 };
224 
225 inline size_t get_count_distinct_sub_bitmap_count(const size_t bitmap_sz_bits,
227  const ExecutorDeviceType device_type) {
228  // For count distinct on a column with a very small number of distinct values
229  // contention can be very high, especially for non-grouped queries. We'll split
230  // the bitmap into multiple sub-bitmaps which are unified to get the full result.
231  // The threshold value for bitmap_sz_bits works well on Kepler.
232  return bitmap_sz_bits < 50000 && ra_exe_unit.groupby_exprs.empty() &&
233  (device_type == ExecutorDeviceType::GPU || g_cluster)
234  ? 64 // NB: must be a power of 2 to keep runtime offset computations cheap
235  : 1;
236 }
237 
238 #endif // QUERYENGINE_GROUPBYANDAGGREGATE_H
const RelAlgExecutionUnit & ra_exe_unit
bool g_enable_smem_group_by
bool gpuCanHandleOrderEntries(const std::list< Analyzer::OrderEntry > &order_entries)
static int64_t getBucketedCardinality(const ColRangeInfo &col_range_info)
llvm::Value * getAdditionalLiteral(const int32_t off)
llvm::Value * codegenAggColumnPtr(llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const QueryMemoryDescriptor &query_mem_desc, const size_t chosen_bytes, const size_t agg_out_off, const size_t target_idx)
: returns the pointer to where the aggregation should be stored.
bool codegen(llvm::Value *filter_result, llvm::BasicBlock *sc_false, QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, const GpuSharedMemoryContext &gpu_smem_context)
const bool keyless
std::unique_ptr< QueryMemoryDescriptor > initQueryMemoryDescriptorImpl(const bool allow_multifrag, const size_t max_groups_buffer_entry_count, const int8_t crt_min_byte_width, const bool sort_on_gpu_hint, RenderInfo *render_info, const bool must_use_baseline_sort, const bool output_columnar_hint)
void codegenMode(const size_t target_idx, const Analyzer::Expr *target_expr, std::vector< llvm::Value * > &agg_args, const QueryMemoryDescriptor &query_mem_desc, const ExecutorDeviceType device_type)
ColRangeInfo getColRangeInfo()
QueryDescriptionType hash_type_
llvm::Value * emitCall(const std::string &fname, const std::vector< llvm::Value * > &args)
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
Macros and functions for groupby buffer compaction.
llvm::Value * codegenVarlenOutputBuffer(const QueryMemoryDescriptor &query_mem_desc)
void codegenApproxQuantile(const size_t target_idx, const Analyzer::Expr *target_expr, std::vector< llvm::Value * > &agg_args, const QueryMemoryDescriptor &query_mem_desc, const ExecutorDeviceType device_type)
void checkErrorCode(llvm::Value *retCode)
const std::list< std::shared_ptr< Analyzer::Expr > > groupby_exprs
bool needsUnnestDoublePatch(llvm::Value const *val_ptr, const std::string &agg_base_name, const bool threads_share_memory, const CompilationOptions &co) const
std::tuple< llvm::Value *, llvm::Value * > codegenMultiColumnBaselineHash(const CompilationOptions &co, llvm::Value *groups_buffer, llvm::Value *group_key, llvm::Value *key_size_lv, const QueryMemoryDescriptor &query_mem_desc, const size_t key_width, const int32_t row_size_quad)
ExecutorDeviceType
size_t get_count_distinct_sub_bitmap_count(const size_t bitmap_sz_bits, const RelAlgExecutionUnit &ra_exe_unit, const ExecutorDeviceType device_type)
bool isEmpty() const
GroupByAndAggregate(Executor *executor, const ExecutorDeviceType device_type, const RelAlgExecutionUnit &ra_exe_unit, const std::vector< InputTableInfo > &query_infos, std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner, const std::optional< int64_t > &group_cardinality_estimation)
llvm::Value * convertNullIfAny(const SQLTypeInfo &arg_type, const TargetInfo &agg_info, llvm::Value *target)
std::tuple< llvm::Value *, llvm::Value * > codegenSingleColumnPerfectHash(const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, llvm::Value *groups_buffer, llvm::Value *group_expr_lv_translated, llvm::Value *group_expr_lv_original, const int32_t row_size_quad)
bool codegenAggCalls(const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, llvm::Value *varlen_output_buffer, const std::vector< llvm::Value * > &agg_out_vec, QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, const GpuSharedMemoryContext &gpu_smem_context, DiamondCodegen &diamond_codegen)
std::tuple< llvm::Value *, llvm::Value * > codegenGroupBy(const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, DiamondCodegen &codegen)
bool g_bigint_count
void codegenCountDistinct(const size_t target_idx, const Analyzer::Expr *target_expr, std::vector< llvm::Value * > &agg_args, const QueryMemoryDescriptor &, const ExecutorDeviceType)
std::tuple< llvm::Value *, llvm::Value * > codegenMultiColumnPerfectHash(llvm::Value *groups_buffer, llvm::Value *group_key, llvm::Value *key_size_lv, const QueryMemoryDescriptor &query_mem_desc, const int32_t row_size_quad)
int64_t getShardedTopBucket(const ColRangeInfo &col_range_info, const size_t shard_count) const
const int32_t target_index
const std::vector< InputTableInfo > & query_infos_
std::unique_ptr< QueryMemoryDescriptor > initQueryMemoryDescriptor(const bool allow_multifrag, const size_t max_groups_buffer_entry_count, const int8_t crt_min_byte_width, RenderInfo *render_info, const bool output_columnar_hint)
const ExecutorDeviceType device_type_
void codegenEstimator(std::stack< llvm::BasicBlock * > &array_loops, DiamondCodegen &diamond_codegen, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &)
static size_t shard_count_for_top_groups(const RelAlgExecutionUnit &ra_exe_unit)
std::vector< llvm::Value * > codegenAggArg(const Analyzer::Expr *target_expr, const CompilationOptions &co)
llvm::Function * codegenPerfectHashFunction()
llvm::Value * codegenWindowRowPointer(const Analyzer::WindowFunction *window_func, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, DiamondCodegen &diamond_codegen)
const std::optional< int64_t > group_cardinality_estimation_
llvm::Value * codegenOutputSlot(llvm::Value *groups_buffer, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, DiamondCodegen &diamond_codegen)
QueryDescriptionType
Definition: Types.h:29
bool g_cluster
const RelAlgExecutionUnit & ra_exe_unit_