OmniSciDB  7bf56492aa
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
GroupByAndAggregate.h
Go to the documentation of this file.
1 /*
2  * Copyright 2017 MapD Technologies, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef QUERYENGINE_GROUPBYANDAGGREGATE_H
18 #define QUERYENGINE_GROUPBYANDAGGREGATE_H
19 
20 #include "BufferCompaction.h"
21 #include "ColumnarResults.h"
22 #include "CompilationOptions.h"
23 #include "GpuMemUtils.h"
24 #include "InputMetadata.h"
25 #include "QueryExecutionContext.h"
26 #include "Rendering/RenderInfo.h"
27 #include "RuntimeFunctions.h"
28 
29 #include "../Shared/sqltypes.h"
30 #include "Shared/Logger.h"
31 
32 #include <llvm/IR/Function.h>
33 #include <llvm/IR/Instructions.h>
34 #include <llvm/IR/Value.h>
35 #include <boost/algorithm/string/join.hpp>
36 #include <boost/make_unique.hpp>
37 
38 #include <stack>
39 #include <vector>
40 
41 extern bool g_enable_smem_group_by;
42 extern bool g_bigint_count;
43 
44 class ReductionRanOutOfSlots : public std::runtime_error {
45  public:
46  ReductionRanOutOfSlots() : std::runtime_error("ReductionRanOutOfSlots") {}
47 };
48 
49 inline std::string nullable_str_to_string(const NullableString& str) {
50  auto nptr = boost::get<void*>(&str);
51  if (nptr) {
52  CHECK(!*nptr);
53  return "NULL";
54  }
55  auto sptr = boost::get<std::string>(&str);
56  CHECK(sptr);
57  return *sptr;
58 }
59 
60 inline std::string datum_to_string(const TargetValue& tv,
61  const SQLTypeInfo& ti,
62  const std::string& delim) {
63  if (ti.is_array()) {
64  const auto array_tv = boost::get<ArrayTargetValue>(&tv);
65  CHECK(array_tv);
66  if (array_tv->is_initialized()) {
67  const auto& vec = array_tv->get();
68  std::vector<std::string> elem_strs;
69  elem_strs.reserve(vec.size());
70  const auto& elem_ti = ti.get_elem_type();
71  for (const auto& elem_tv : vec) {
72  elem_strs.push_back(datum_to_string(elem_tv, elem_ti, delim));
73  }
74  return "{" + boost::algorithm::join(elem_strs, delim) + "}";
75  }
76  return "NULL";
77  }
78  const auto scalar_tv = boost::get<ScalarTargetValue>(&tv);
79  if (ti.is_time()) {
80  Datum datum;
81  datum.bigintval = *boost::get<int64_t>(scalar_tv);
82  if (datum.bigintval == NULL_BIGINT) {
83  return "NULL";
84  }
85  return DatumToString(datum, ti);
86  }
87  if (ti.is_boolean()) {
88  const auto bool_val = *boost::get<int64_t>(scalar_tv);
89  return bool_val == NULL_BOOLEAN ? "NULL" : (bool_val ? "true" : "false");
90  }
91  auto iptr = boost::get<int64_t>(scalar_tv);
92  if (iptr) {
93  return *iptr == inline_int_null_val(ti) ? "NULL" : std::to_string(*iptr);
94  }
95  auto fptr = boost::get<float>(scalar_tv);
96  if (fptr) {
97  return *fptr == inline_fp_null_val(ti) ? "NULL" : std::to_string(*fptr);
98  }
99  auto dptr = boost::get<double>(scalar_tv);
100  if (dptr) {
101  return *dptr == inline_fp_null_val(ti.is_decimal() ? SQLTypeInfo(kDOUBLE, false) : ti)
102  ? "NULL"
103  : std::to_string(*dptr);
104  }
105  auto sptr = boost::get<NullableString>(scalar_tv);
106  CHECK(sptr);
107  return nullable_str_to_string(*sptr);
108 }
109 
110 struct ColRangeInfo {
112  int64_t min;
113  int64_t max;
114  int64_t bucket;
115  bool has_nulls;
116  bool isEmpty() { return min == 0 && max == -1; }
117 };
118 
119 struct KeylessInfo {
120  const bool keyless;
121  const int32_t target_index;
122  const bool shared_mem_support; // TODO(Saman) remove, all aggregate operations should
123  // eventually be potentially done with shared memory.
124  // The decision will be made when the query memory
125  // descriptor is created, not here. This member just
126  // indicates the possibility.
127 };
128 
130  public:
131  GroupByAndAggregate(Executor* executor,
132  const ExecutorDeviceType device_type,
133  const RelAlgExecutionUnit& ra_exe_unit,
134  const std::vector<InputTableInfo>& query_infos,
135  std::shared_ptr<RowSetMemoryOwner>);
136 
137  // returns true iff checking the error code after every row
138  // is required -- slow path group by queries for now
139  bool codegen(llvm::Value* filter_result,
140  llvm::BasicBlock* sc_false,
142  const CompilationOptions& co);
143 
144  static void addTransientStringLiterals(
145  const RelAlgExecutionUnit& ra_exe_unit,
146  Executor* executor,
147  std::shared_ptr<RowSetMemoryOwner> row_set_mem_owner);
148 
149  static size_t shard_count_for_top_groups(const RelAlgExecutionUnit& ra_exe_unit,
150  const Catalog_Namespace::Catalog& catalog);
151 
152  private:
153  struct DiamondCodegen {
154  DiamondCodegen(llvm::Value* cond,
155  Executor* executor,
156  const bool chain_to_next,
157  const std::string& label_prefix,
158  DiamondCodegen* parent,
159  const bool share_false_edge_with_parent);
160  void setChainToNext();
161  void setFalseTarget(llvm::BasicBlock* cond_false);
162  ~DiamondCodegen();
163 
165  llvm::BasicBlock* cond_true_;
166  llvm::BasicBlock* cond_false_;
167  llvm::BasicBlock* orig_cond_false_;
170  };
171 
172  bool supportedTypeForGpuSharedMemUsage(const SQLTypeInfo& target_type_info) const;
173 
175 
176  bool gpuCanHandleOrderEntries(const std::list<Analyzer::OrderEntry>& order_entries);
177 
178  std::unique_ptr<QueryMemoryDescriptor> initQueryMemoryDescriptor(
179  const bool allow_multifrag,
180  const size_t max_groups_buffer_entry_count,
181  const int8_t crt_min_byte_width,
182  RenderInfo* render_info,
183  const bool output_columnar_hint);
184 
185  std::unique_ptr<QueryMemoryDescriptor> initQueryMemoryDescriptorImpl(
186  const bool allow_multifrag,
187  const size_t max_groups_buffer_entry_count,
188  const int8_t crt_min_byte_width,
189  const bool sort_on_gpu_hint,
190  RenderInfo* render_info,
191  const bool must_use_baseline_sort,
192  const bool output_columnar_hint);
193 
194  int64_t getShardedTopBucket(const ColRangeInfo& col_range_info,
195  const size_t shard_count) const;
196 
198 
200 
201  llvm::Value* codegenOutputSlot(llvm::Value* groups_buffer,
202  const QueryMemoryDescriptor& query_mem_desc,
203  const CompilationOptions& co,
204  DiamondCodegen& diamond_codegen);
205 
206  std::tuple<llvm::Value*, llvm::Value*> codegenGroupBy(
207  const QueryMemoryDescriptor& query_mem_desc,
208  const CompilationOptions& co,
209  DiamondCodegen& codegen);
210 
211  std::tuple<llvm::Value*, llvm::Value*> codegenSingleColumnPerfectHash(
212  const QueryMemoryDescriptor& query_mem_desc,
213  const CompilationOptions& co,
214  llvm::Value* groups_buffer,
215  llvm::Value* group_expr_lv_translated,
216  llvm::Value* group_expr_lv_original,
217  const int32_t row_size_quad);
218 
219  std::tuple<llvm::Value*, llvm::Value*> codegenMultiColumnPerfectHash(
220  llvm::Value* groups_buffer,
221  llvm::Value* group_key,
222  llvm::Value* key_size_lv,
223  const QueryMemoryDescriptor& query_mem_desc,
224  const int32_t row_size_quad);
225  llvm::Function* codegenPerfectHashFunction();
226 
227  std::tuple<llvm::Value*, llvm::Value*> codegenMultiColumnBaselineHash(
228  const CompilationOptions& co,
229  llvm::Value* groups_buffer,
230  llvm::Value* group_key,
231  llvm::Value* key_size_lv,
232  const QueryMemoryDescriptor& query_mem_desc,
233  const size_t key_width,
234  const int32_t row_size_quad);
235 
237 
238  ColRangeInfo getExprRangeInfo(const Analyzer::Expr* expr) const;
239 
240  static int64_t getBucketedCardinality(const ColRangeInfo& col_range_info);
241 
242  KeylessInfo getKeylessInfo(const std::vector<Analyzer::Expr*>& target_expr_list,
243  const bool is_group_by) const;
244 
245  llvm::Value* convertNullIfAny(const SQLTypeInfo& arg_type,
246  const TargetInfo& agg_info,
247  llvm::Value* target);
248 
249  bool codegenAggCalls(const std::tuple<llvm::Value*, llvm::Value*>& agg_out_ptr_w_idx,
250  const std::vector<llvm::Value*>& agg_out_vec,
251  const QueryMemoryDescriptor& query_mem_desc,
252  const CompilationOptions& co,
253  DiamondCodegen& diamond_codegen);
254 
255  llvm::Value* codegenWindowRowPointer(const Analyzer::WindowFunction* window_func,
256  const QueryMemoryDescriptor& query_mem_desc,
257  const CompilationOptions& co,
258  DiamondCodegen& diamond_codegen);
259 
260  llvm::Value* codegenAggColumnPtr(
261  llvm::Value* output_buffer_byte_stream,
262  llvm::Value* out_row_idx,
263  const std::tuple<llvm::Value*, llvm::Value*>& agg_out_ptr_w_idx,
264  const QueryMemoryDescriptor& query_mem_desc,
265  const size_t chosen_bytes,
266  const size_t agg_out_off,
267  const size_t target_idx);
268 
269  void codegenEstimator(std::stack<llvm::BasicBlock*>& array_loops,
270  GroupByAndAggregate::DiamondCodegen& diamond_codegen,
271  const QueryMemoryDescriptor& query_mem_desc,
272  const CompilationOptions&);
273 
274  void codegenCountDistinct(const size_t target_idx,
275  const Analyzer::Expr* target_expr,
276  std::vector<llvm::Value*>& agg_args,
277  const QueryMemoryDescriptor&,
278  const ExecutorDeviceType);
279 
280  llvm::Value* getAdditionalLiteral(const int32_t off);
281 
282  std::vector<llvm::Value*> codegenAggArg(const Analyzer::Expr* target_expr,
283  const CompilationOptions& co);
284 
285  llvm::Value* emitCall(const std::string& fname, const std::vector<llvm::Value*>& args);
286 
287  void checkErrorCode(llvm::Value* retCode);
288 
289  bool needsUnnestDoublePatch(llvm::Value* val_ptr,
290  const std::string& agg_base_name,
291  const bool threads_share_memory,
292  const CompilationOptions& co) const;
293 
294  void prependForceSync();
295 
298  const std::vector<InputTableInfo>& query_infos_;
299  std::shared_ptr<RowSetMemoryOwner> row_set_mem_owner_;
302 
303  friend class Executor;
304  friend class QueryMemoryDescriptor;
305  friend struct TargetExprCodegen;
307 };
308 
309 inline int64_t extract_from_datum(const Datum datum, const SQLTypeInfo& ti) {
310  const auto type = ti.is_decimal() ? decimal_to_int_type(ti) : ti.get_type();
311  switch (type) {
312  case kBOOLEAN:
313  return datum.tinyintval;
314  case kTINYINT:
315  return datum.tinyintval;
316  case kSMALLINT:
317  return datum.smallintval;
318  case kCHAR:
319  case kVARCHAR:
320  case kTEXT:
322  case kINT:
323  return datum.intval;
324  case kBIGINT:
325  return datum.bigintval;
326  case kTIME:
327  case kTIMESTAMP:
328  case kDATE:
329  return datum.bigintval;
330  default:
331  abort();
332  }
333 }
334 
335 inline int64_t extract_min_stat(const ChunkStats& stats, const SQLTypeInfo& ti) {
336  return extract_from_datum(stats.min, ti);
337 }
338 
339 inline int64_t extract_max_stat(const ChunkStats& stats, const SQLTypeInfo& ti) {
340  return extract_from_datum(stats.max, ti);
341 }
342 
343 inline size_t get_count_distinct_sub_bitmap_count(const size_t bitmap_sz_bits,
345  const ExecutorDeviceType device_type) {
346  // For count distinct on a column with a very small number of distinct values
347  // contention can be very high, especially for non-grouped queries. We'll split
348  // the bitmap into multiple sub-bitmaps which are unified to get the full result.
349  // The threshold value for bitmap_sz_bits works well on Kepler.
350  return bitmap_sz_bits < 50000 && ra_exe_unit.groupby_exprs.empty() &&
351  (device_type == ExecutorDeviceType::GPU || g_cluster)
352  ? 64 // NB: must be a power of 2 to keep runtime offset computations cheap
353  : 1;
354 }
355 
356 template <class T>
357 inline std::vector<int8_t> get_col_byte_widths(
358  const T& col_expr_list,
359  const std::vector<ssize_t>& col_exprs_to_not_project) {
360  // Note that non-projected col exprs could be projected cols that we can lazy fetch or
361  // grouped cols with keyless hash
362  if (!col_exprs_to_not_project.empty()) {
363  CHECK_EQ(col_expr_list.size(), col_exprs_to_not_project.size());
364  }
365  std::vector<int8_t> col_widths;
366  size_t col_expr_idx = 0;
367  for (const auto col_expr : col_expr_list) {
368  if (!col_exprs_to_not_project.empty() &&
369  col_exprs_to_not_project[col_expr_idx] != -1) {
370  col_widths.push_back(0);
371  ++col_expr_idx;
372  continue;
373  }
374  if (!col_expr) {
375  // row index
376  col_widths.push_back(sizeof(int64_t));
377  } else {
378  const auto agg_info = get_target_info(col_expr, g_bigint_count);
379  const auto chosen_type = get_compact_type(agg_info);
380  if ((chosen_type.is_string() && chosen_type.get_compression() == kENCODING_NONE) ||
381  chosen_type.is_array()) {
382  col_widths.push_back(sizeof(int64_t));
383  col_widths.push_back(sizeof(int64_t));
384  ++col_expr_idx;
385  continue;
386  }
387  if (chosen_type.is_geometry()) {
388  for (auto i = 0; i < chosen_type.get_physical_coord_cols(); ++i) {
389  col_widths.push_back(sizeof(int64_t));
390  col_widths.push_back(sizeof(int64_t));
391  }
392  ++col_expr_idx;
393  continue;
394  }
395  const auto col_expr_bitwidth = get_bit_width(chosen_type);
396  CHECK_EQ(size_t(0), col_expr_bitwidth % 8);
397  col_widths.push_back(static_cast<int8_t>(col_expr_bitwidth >> 3));
398  // for average, we'll need to keep the count as well
399  if (agg_info.agg_kind == kAVG) {
400  CHECK(agg_info.is_agg);
401  col_widths.push_back(sizeof(int64_t));
402  }
403  }
404  ++col_expr_idx;
405  }
406  return col_widths;
407 }
408 
409 inline int8_t get_min_byte_width() {
411 }
412 
413 struct RelAlgExecutionUnit;
414 
415 #endif // QUERYENGINE_GROUPBYANDAGGREGATE_H
int8_t tinyintval
Definition: sqltypes.h:124
void codegenEstimator(std::stack< llvm::BasicBlock * > &array_loops, GroupByAndAggregate::DiamondCodegen &diamond_codegen, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &)
const RelAlgExecutionUnit & ra_exe_unit
#define CHECK_EQ(x, y)
Definition: Logger.h:205
bool g_enable_smem_group_by
bool gpuCanHandleOrderEntries(const std::list< Analyzer::OrderEntry > &order_entries)
static int64_t getBucketedCardinality(const ColRangeInfo &col_range_info)
const int32_t groups_buffer_size return groups_buffer
llvm::Value * getAdditionalLiteral(const int32_t off)
llvm::Value * codegenAggColumnPtr(llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const QueryMemoryDescriptor &query_mem_desc, const size_t chosen_bytes, const size_t agg_out_off, const size_t target_idx)
: returns the pointer to where the aggregation should be stored.
const bool shared_mem_support
std::string DatumToString(Datum d, const SQLTypeInfo &ti)
Definition: Datum.cpp:224
class for a per-database catalog. also includes metadata for the current database and the current use...
Definition: Catalog.h:86
Definition: sqltypes.h:50
const bool keyless
std::vector< int8_t > get_col_byte_widths(const T &col_expr_list, const std::vector< ssize_t > &col_exprs_to_not_project)
GroupByAndAggregate(Executor *executor, const ExecutorDeviceType device_type, const RelAlgExecutionUnit &ra_exe_unit, const std::vector< InputTableInfo > &query_infos, std::shared_ptr< RowSetMemoryOwner >)
ExecutorDeviceType
std::unique_ptr< QueryMemoryDescriptor > initQueryMemoryDescriptorImpl(const bool allow_multifrag, const size_t max_groups_buffer_entry_count, const int8_t crt_min_byte_width, const bool sort_on_gpu_hint, RenderInfo *render_info, const bool must_use_baseline_sort, const bool output_columnar_hint)
#define NULL_BIGINT
Definition: sqltypes.h:173
TargetInfo get_target_info(const PointerType target_expr, const bool bigint_count)
Definition: TargetInfo.h:66
std::string datum_to_string(const TargetValue &tv, const SQLTypeInfo &ti, const std::string &delim)
ColRangeInfo getColRangeInfo()
std::string nullable_str_to_string(const NullableString &str)
QueryDescriptionType hash_type_
std::string join(T const &container, std::string const &delim)
static bool supportedExprForGpuSharedMemUsage(Analyzer::Expr *expr)
llvm::Value * emitCall(const std::string &fname, const std::vector< llvm::Value * > &args)
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
void checkErrorCode(llvm::Value *retCode)
bool needsUnnestDoublePatch(llvm::Value *val_ptr, const std::string &agg_base_name, const bool threads_share_memory, const CompilationOptions &co) const
const std::list< std::shared_ptr< Analyzer::Expr > > groupby_exprs
HOST DEVICE SQLTypes get_type() const
Definition: sqltypes.h:248
bool supportedTypeForGpuSharedMemUsage(const SQLTypeInfo &target_type_info) const
std::tuple< llvm::Value *, llvm::Value * > codegenMultiColumnBaselineHash(const CompilationOptions &co, llvm::Value *groups_buffer, llvm::Value *group_key, llvm::Value *key_size_lv, const QueryMemoryDescriptor &query_mem_desc, const size_t key_width, const int32_t row_size_quad)
int64_t extract_from_datum(const Datum datum, const SQLTypeInfo &ti)
double inline_fp_null_val(const SQL_TYPE_INFO &ti)
int32_t intval
Definition: sqltypes.h:126
bool is_time() const
Definition: sqltypes.h:405
std::string to_string(char const *&&v)
int8_t get_min_byte_width()
size_t get_count_distinct_sub_bitmap_count(const size_t bitmap_sz_bits, const RelAlgExecutionUnit &ra_exe_unit, const ExecutorDeviceType device_type)
const SQLTypeInfo get_compact_type(const TargetInfo &target)
CountDistinctDescriptors initCountDistinctDescriptors()
size_t get_bit_width(const SQLTypeInfo &ti)
CHECK(cgen_state)
bool codegen(llvm::Value *filter_result, llvm::BasicBlock *sc_false, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co)
std::vector< CountDistinctDescriptor > CountDistinctDescriptors
Definition: CountDistinct.h:35
llvm::Value * convertNullIfAny(const SQLTypeInfo &arg_type, const TargetInfo &agg_info, llvm::Value *target)
int64_t bigintval
Definition: sqltypes.h:127
void setFalseTarget(llvm::BasicBlock *cond_false)
std::tuple< llvm::Value *, llvm::Value * > codegenSingleColumnPerfectHash(const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, llvm::Value *groups_buffer, llvm::Value *group_expr_lv_translated, llvm::Value *group_expr_lv_original, const int32_t row_size_quad)
std::tuple< llvm::Value *, llvm::Value * > codegenGroupBy(const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, DiamondCodegen &codegen)
bool g_bigint_count
int16_t smallintval
Definition: sqltypes.h:125
KeylessInfo getKeylessInfo(const std::vector< Analyzer::Expr * > &target_expr_list, const bool is_group_by) const
void codegenCountDistinct(const size_t target_idx, const Analyzer::Expr *target_expr, std::vector< llvm::Value * > &agg_args, const QueryMemoryDescriptor &, const ExecutorDeviceType)
bool is_boolean() const
Definition: sqltypes.h:406
DiamondCodegen(llvm::Value *cond, Executor *executor, const bool chain_to_next, const std::string &label_prefix, DiamondCodegen *parent, const bool share_false_edge_with_parent)
bool codegenAggCalls(const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const std::vector< llvm::Value * > &agg_out_vec, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, DiamondCodegen &diamond_codegen)
std::tuple< llvm::Value *, llvm::Value * > codegenMultiColumnPerfectHash(llvm::Value *groups_buffer, llvm::Value *group_key, llvm::Value *key_size_lv, const QueryMemoryDescriptor &query_mem_desc, const int32_t row_size_quad)
int64_t getShardedTopBucket(const ColRangeInfo &col_range_info, const size_t shard_count) const
SQLTypes decimal_to_int_type(const SQLTypeInfo &ti)
Definition: Datum.cpp:299
const int32_t target_index
Definition: sqltypes.h:53
Definition: sqltypes.h:54
const std::vector< InputTableInfo > & query_infos_
HOST DEVICE EncodingType get_compression() const
Definition: sqltypes.h:256
std::unique_ptr< QueryMemoryDescriptor > initQueryMemoryDescriptor(const bool allow_multifrag, const size_t max_groups_buffer_entry_count, const int8_t crt_min_byte_width, RenderInfo *render_info, const bool output_columnar_hint)
const ExecutorDeviceType device_type_
std::vector< llvm::Value * > codegenAggArg(const Analyzer::Expr *target_expr, const CompilationOptions &co)
llvm::Function * codegenPerfectHashFunction()
llvm::Value * codegenWindowRowPointer(const Analyzer::WindowFunction *window_func, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, DiamondCodegen &diamond_codegen)
Definition: sqltypes.h:42
boost::variant< std::string, void * > NullableString
Definition: TargetValue.h:155
llvm::Value * codegenOutputSlot(llvm::Value *groups_buffer, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, DiamondCodegen &diamond_codegen)
int64_t extract_min_stat(const ChunkStats &stats, const SQLTypeInfo &ti)
int64_t inline_int_null_val(const SQL_TYPE_INFO &ti)
QueryDescriptionType
Definition: Types.h:26
bool g_cluster
ColRangeInfo getExprRangeInfo(const Analyzer::Expr *expr) const
constexpr int8_t MAX_BYTE_WIDTH_SUPPORTED
boost::variant< ScalarTargetValue, ArrayTargetValue, GeoTargetValue, GeoTargetValuePtr > TargetValue
Definition: TargetValue.h:167
Definition: sqltypes.h:46
const RelAlgExecutionUnit & ra_exe_unit_
SQLTypeInfo get_elem_type() const
Definition: sqltypes.h:603
bool is_decimal() const
Definition: sqltypes.h:402
Definition: sqldefs.h:72
#define NULL_BOOLEAN
Definition: sqltypes.h:169
bool is_array() const
Definition: sqltypes.h:407
int64_t extract_max_stat(const ChunkStats &stats, const SQLTypeInfo &ti)
static size_t shard_count_for_top_groups(const RelAlgExecutionUnit &ra_exe_unit, const Catalog_Namespace::Catalog &catalog)