OmniSciDB  1dac507f6e
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
GroupByAndAggregate.h
Go to the documentation of this file.
1 /*
2  * Copyright 2017 MapD Technologies, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef QUERYENGINE_GROUPBYANDAGGREGATE_H
18 #define QUERYENGINE_GROUPBYANDAGGREGATE_H
19 
20 #include "BufferCompaction.h"
21 #include "ColumnarResults.h"
22 #include "CompilationOptions.h"
23 #include "GpuMemUtils.h"
24 #include "InputMetadata.h"
25 #include "QueryExecutionContext.h"
26 #include "Rendering/RenderInfo.h"
27 #include "RuntimeFunctions.h"
28 
29 #include "../Planner/Planner.h"
30 #include "../Shared/sqltypes.h"
31 #include "Shared/Logger.h"
32 
33 #include <llvm/IR/Function.h>
34 #include <llvm/IR/Instructions.h>
35 #include <llvm/IR/Value.h>
36 #include <boost/algorithm/string/join.hpp>
37 #include <boost/make_unique.hpp>
38 
39 #include <stack>
40 #include <vector>
41 
42 extern bool g_enable_smem_group_by;
43 extern bool g_bigint_count;
44 
45 class ReductionRanOutOfSlots : public std::runtime_error {
46  public:
47  ReductionRanOutOfSlots() : std::runtime_error("ReductionRanOutOfSlots") {}
48 };
49 
50 inline std::string nullable_str_to_string(const NullableString& str) {
51  auto nptr = boost::get<void*>(&str);
52  if (nptr) {
53  CHECK(!*nptr);
54  return "NULL";
55  }
56  auto sptr = boost::get<std::string>(&str);
57  CHECK(sptr);
58  return *sptr;
59 }
60 
61 inline std::string datum_to_string(const TargetValue& tv,
62  const SQLTypeInfo& ti,
63  const std::string& delim) {
64  if (ti.is_array()) {
65  const auto array_tv = boost::get<ArrayTargetValue>(&tv);
66  CHECK(array_tv);
67  if (array_tv->is_initialized()) {
68  const auto& vec = array_tv->get();
69  std::vector<std::string> elem_strs;
70  elem_strs.reserve(vec.size());
71  const auto& elem_ti = ti.get_elem_type();
72  for (const auto& elem_tv : vec) {
73  elem_strs.push_back(datum_to_string(elem_tv, elem_ti, delim));
74  }
75  return "{" + boost::algorithm::join(elem_strs, delim) + "}";
76  }
77  return "NULL";
78  }
79  const auto scalar_tv = boost::get<ScalarTargetValue>(&tv);
80  if (ti.is_time()) {
81  Datum datum;
82  datum.bigintval = *boost::get<int64_t>(scalar_tv);
83  if (datum.bigintval == NULL_BIGINT) {
84  return "NULL";
85  }
86  return DatumToString(datum, ti);
87  }
88  if (ti.is_boolean()) {
89  const auto bool_val = *boost::get<int64_t>(scalar_tv);
90  return bool_val == NULL_BOOLEAN ? "NULL" : (bool_val ? "true" : "false");
91  }
92  auto iptr = boost::get<int64_t>(scalar_tv);
93  if (iptr) {
94  return *iptr == inline_int_null_val(ti) ? "NULL" : std::to_string(*iptr);
95  }
96  auto fptr = boost::get<float>(scalar_tv);
97  if (fptr) {
98  return *fptr == inline_fp_null_val(ti) ? "NULL" : std::to_string(*fptr);
99  }
100  auto dptr = boost::get<double>(scalar_tv);
101  if (dptr) {
102  return *dptr == inline_fp_null_val(ti.is_decimal() ? SQLTypeInfo(kDOUBLE, false) : ti)
103  ? "NULL"
104  : std::to_string(*dptr);
105  }
106  auto sptr = boost::get<NullableString>(scalar_tv);
107  CHECK(sptr);
108  return nullable_str_to_string(*sptr);
109 }
110 
111 struct ColRangeInfo {
113  int64_t min;
114  int64_t max;
115  int64_t bucket;
116  bool has_nulls;
117  bool isEmpty() { return min == 0 && max == -1; }
118 };
119 
120 struct KeylessInfo {
121  const bool keyless;
122  const int32_t target_index;
123  const bool shared_mem_support; // TODO(Saman) remove, all aggregate operations should
124  // eventually be potentially done with shared memory.
125  // The decision will be made when the query memory
126  // descriptor is created, not here. This member just
127  // indicates the possibility.
128 };
129 
131  public:
132  GroupByAndAggregate(Executor* executor,
133  const ExecutorDeviceType device_type,
134  const RelAlgExecutionUnit& ra_exe_unit,
135  const std::vector<InputTableInfo>& query_infos,
136  std::shared_ptr<RowSetMemoryOwner>);
137 
138  // returns true iff checking the error code after every row
139  // is required -- slow path group by queries for now
140  bool codegen(llvm::Value* filter_result,
141  llvm::BasicBlock* sc_false,
143  const CompilationOptions& co);
144 
145  static void addTransientStringLiterals(
146  const RelAlgExecutionUnit& ra_exe_unit,
147  Executor* executor,
148  std::shared_ptr<RowSetMemoryOwner> row_set_mem_owner);
149 
150  static size_t shard_count_for_top_groups(const RelAlgExecutionUnit& ra_exe_unit,
151  const Catalog_Namespace::Catalog& catalog);
152 
153  private:
154  struct DiamondCodegen {
155  DiamondCodegen(llvm::Value* cond,
156  Executor* executor,
157  const bool chain_to_next,
158  const std::string& label_prefix,
159  DiamondCodegen* parent,
160  const bool share_false_edge_with_parent);
161  void setChainToNext();
162  void setFalseTarget(llvm::BasicBlock* cond_false);
163  ~DiamondCodegen();
164 
166  llvm::BasicBlock* cond_true_;
167  llvm::BasicBlock* cond_false_;
168  llvm::BasicBlock* orig_cond_false_;
171  };
172 
173  bool supportedTypeForGpuSharedMemUsage(const SQLTypeInfo& target_type_info) const;
174 
176 
177  bool gpuCanHandleOrderEntries(const std::list<Analyzer::OrderEntry>& order_entries);
178 
179  std::unique_ptr<QueryMemoryDescriptor> initQueryMemoryDescriptor(
180  const bool allow_multifrag,
181  const size_t max_groups_buffer_entry_count,
182  const int8_t crt_min_byte_width,
183  RenderInfo* render_info,
184  const bool output_columnar_hint);
185 
186  std::unique_ptr<QueryMemoryDescriptor> initQueryMemoryDescriptorImpl(
187  const bool allow_multifrag,
188  const size_t max_groups_buffer_entry_count,
189  const int8_t crt_min_byte_width,
190  const bool sort_on_gpu_hint,
191  RenderInfo* render_info,
192  const bool must_use_baseline_sort,
193  const bool output_columnar_hint);
194 
195  int64_t getShardedTopBucket(const ColRangeInfo& col_range_info,
196  const size_t shard_count) const;
197 
199 
201 
202  llvm::Value* codegenOutputSlot(llvm::Value* groups_buffer,
203  const QueryMemoryDescriptor& query_mem_desc,
204  const CompilationOptions& co,
205  DiamondCodegen& diamond_codegen);
206 
207  std::tuple<llvm::Value*, llvm::Value*> codegenGroupBy(
208  const QueryMemoryDescriptor& query_mem_desc,
209  const CompilationOptions& co,
210  DiamondCodegen& codegen);
211 
212  std::tuple<llvm::Value*, llvm::Value*> codegenSingleColumnPerfectHash(
213  const QueryMemoryDescriptor& query_mem_desc,
214  const CompilationOptions& co,
215  llvm::Value* groups_buffer,
216  llvm::Value* group_expr_lv_translated,
217  llvm::Value* group_expr_lv_original,
218  const int32_t row_size_quad);
219 
220  std::tuple<llvm::Value*, llvm::Value*> codegenMultiColumnPerfectHash(
221  llvm::Value* groups_buffer,
222  llvm::Value* group_key,
223  llvm::Value* key_size_lv,
224  const QueryMemoryDescriptor& query_mem_desc,
225  const int32_t row_size_quad);
226  llvm::Function* codegenPerfectHashFunction();
227 
228  std::tuple<llvm::Value*, llvm::Value*> codegenMultiColumnBaselineHash(
229  const CompilationOptions& co,
230  llvm::Value* groups_buffer,
231  llvm::Value* group_key,
232  llvm::Value* key_size_lv,
233  const QueryMemoryDescriptor& query_mem_desc,
234  const size_t key_width,
235  const int32_t row_size_quad);
236 
238 
239  ColRangeInfo getExprRangeInfo(const Analyzer::Expr* expr) const;
240 
241  static int64_t getBucketedCardinality(const ColRangeInfo& col_range_info);
242 
243  KeylessInfo getKeylessInfo(const std::vector<Analyzer::Expr*>& target_expr_list,
244  const bool is_group_by) const;
245 
246  llvm::Value* convertNullIfAny(const SQLTypeInfo& arg_type,
247  const TargetInfo& agg_info,
248  llvm::Value* target);
249 
250  bool codegenAggCalls(const std::tuple<llvm::Value*, llvm::Value*>& agg_out_ptr_w_idx,
251  const std::vector<llvm::Value*>& agg_out_vec,
252  const QueryMemoryDescriptor& query_mem_desc,
253  const CompilationOptions& co,
254  DiamondCodegen& diamond_codegen);
255 
256  llvm::Value* codegenWindowRowPointer(const Analyzer::WindowFunction* window_func,
257  const QueryMemoryDescriptor& query_mem_desc,
258  const CompilationOptions& co,
259  DiamondCodegen& diamond_codegen);
260 
261  llvm::Value* codegenAggColumnPtr(
262  llvm::Value* output_buffer_byte_stream,
263  llvm::Value* out_row_idx,
264  const std::tuple<llvm::Value*, llvm::Value*>& agg_out_ptr_w_idx,
265  const QueryMemoryDescriptor& query_mem_desc,
266  const size_t chosen_bytes,
267  const size_t agg_out_off,
268  const size_t target_idx);
269 
270  void codegenEstimator(std::stack<llvm::BasicBlock*>& array_loops,
271  GroupByAndAggregate::DiamondCodegen& diamond_codegen,
272  const QueryMemoryDescriptor& query_mem_desc,
273  const CompilationOptions&);
274 
275  void codegenCountDistinct(const size_t target_idx,
276  const Analyzer::Expr* target_expr,
277  std::vector<llvm::Value*>& agg_args,
278  const QueryMemoryDescriptor&,
279  const ExecutorDeviceType);
280 
281  llvm::Value* getAdditionalLiteral(const int32_t off);
282 
283  std::vector<llvm::Value*> codegenAggArg(const Analyzer::Expr* target_expr,
284  const CompilationOptions& co);
285 
286  llvm::Value* emitCall(const std::string& fname, const std::vector<llvm::Value*>& args);
287 
288  bool needsUnnestDoublePatch(llvm::Value* val_ptr,
289  const std::string& agg_base_name,
290  const bool threads_share_memory,
291  const CompilationOptions& co) const;
292 
293  void prependForceSync();
294 
297  const std::vector<InputTableInfo>& query_infos_;
298  std::shared_ptr<RowSetMemoryOwner> row_set_mem_owner_;
301 
302  friend class Executor;
303  friend class QueryMemoryDescriptor;
304  friend struct TargetExprCodegen;
306 };
307 
308 inline int64_t extract_from_datum(const Datum datum, const SQLTypeInfo& ti) {
309  const auto type = ti.is_decimal() ? decimal_to_int_type(ti) : ti.get_type();
310  switch (type) {
311  case kBOOLEAN:
312  return datum.tinyintval;
313  case kTINYINT:
314  return datum.tinyintval;
315  case kSMALLINT:
316  return datum.smallintval;
317  case kCHAR:
318  case kVARCHAR:
319  case kTEXT:
321  case kINT:
322  return datum.intval;
323  case kBIGINT:
324  return datum.bigintval;
325  case kTIME:
326  case kTIMESTAMP:
327  case kDATE:
328  return datum.bigintval;
329  default:
330  abort();
331  }
332 }
333 
334 inline int64_t extract_min_stat(const ChunkStats& stats, const SQLTypeInfo& ti) {
335  return extract_from_datum(stats.min, ti);
336 }
337 
338 inline int64_t extract_max_stat(const ChunkStats& stats, const SQLTypeInfo& ti) {
339  return extract_from_datum(stats.max, ti);
340 }
341 
342 inline size_t get_count_distinct_sub_bitmap_count(const size_t bitmap_sz_bits,
344  const ExecutorDeviceType device_type) {
345  // For count distinct on a column with a very small number of distinct values
346  // contention can be very high, especially for non-grouped queries. We'll split
347  // the bitmap into multiple sub-bitmaps which are unified to get the full result.
348  // The threshold value for bitmap_sz_bits works well on Kepler.
349  return bitmap_sz_bits < 50000 && ra_exe_unit.groupby_exprs.empty() &&
350  (device_type == ExecutorDeviceType::GPU || g_cluster)
351  ? 64 // NB: must be a power of 2 to keep runtime offset computations cheap
352  : 1;
353 }
354 
355 template <class T>
356 inline std::vector<int8_t> get_col_byte_widths(
357  const T& col_expr_list,
358  const std::vector<ssize_t>& col_exprs_to_not_project) {
359  // Note that non-projected col exprs could be projected cols that we can lazy fetch or
360  // grouped cols with keyless hash
361  if (!col_exprs_to_not_project.empty()) {
362  CHECK_EQ(col_expr_list.size(), col_exprs_to_not_project.size());
363  }
364  std::vector<int8_t> col_widths;
365  size_t col_expr_idx = 0;
366  for (const auto col_expr : col_expr_list) {
367  if (!col_exprs_to_not_project.empty() &&
368  col_exprs_to_not_project[col_expr_idx] != -1) {
369  col_widths.push_back(0);
370  ++col_expr_idx;
371  continue;
372  }
373  if (!col_expr) {
374  // row index
375  col_widths.push_back(sizeof(int64_t));
376  } else {
377  const auto agg_info = get_target_info(col_expr, g_bigint_count);
378  const auto chosen_type = get_compact_type(agg_info);
379  if ((chosen_type.is_string() && chosen_type.get_compression() == kENCODING_NONE) ||
380  chosen_type.is_array()) {
381  col_widths.push_back(sizeof(int64_t));
382  col_widths.push_back(sizeof(int64_t));
383  ++col_expr_idx;
384  continue;
385  }
386  if (chosen_type.is_geometry()) {
387  for (auto i = 0; i < chosen_type.get_physical_coord_cols(); ++i) {
388  col_widths.push_back(sizeof(int64_t));
389  col_widths.push_back(sizeof(int64_t));
390  }
391  ++col_expr_idx;
392  continue;
393  }
394  const auto col_expr_bitwidth = get_bit_width(chosen_type);
395  CHECK_EQ(size_t(0), col_expr_bitwidth % 8);
396  col_widths.push_back(static_cast<int8_t>(col_expr_bitwidth >> 3));
397  // for average, we'll need to keep the count as well
398  if (agg_info.agg_kind == kAVG) {
399  CHECK(agg_info.is_agg);
400  col_widths.push_back(sizeof(int64_t));
401  }
402  }
403  ++col_expr_idx;
404  }
405  return col_widths;
406 }
407 
408 inline int8_t get_min_byte_width() {
410 }
411 
412 struct RelAlgExecutionUnit;
413 
414 #endif // QUERYENGINE_GROUPBYANDAGGREGATE_H
int8_t tinyintval
Definition: sqltypes.h:126
void codegenEstimator(std::stack< llvm::BasicBlock * > &array_loops, GroupByAndAggregate::DiamondCodegen &diamond_codegen, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &)
const RelAlgExecutionUnit & ra_exe_unit
bool is_boolean() const
Definition: sqltypes.h:484
QueryDescriptionType
Definition: Types.h:26
#define CHECK_EQ(x, y)
Definition: Logger.h:198
bool g_enable_smem_group_by
bool gpuCanHandleOrderEntries(const std::list< Analyzer::OrderEntry > &order_entries)
static int64_t getBucketedCardinality(const ColRangeInfo &col_range_info)
const int32_t groups_buffer_size return groups_buffer
llvm::Value * getAdditionalLiteral(const int32_t off)
llvm::Value * codegenAggColumnPtr(llvm::Value *output_buffer_byte_stream, llvm::Value *out_row_idx, const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const QueryMemoryDescriptor &query_mem_desc, const size_t chosen_bytes, const size_t agg_out_off, const size_t target_idx)
: returns the pointer to where the aggregation should be stored.
const bool shared_mem_support
std::string DatumToString(Datum d, const SQLTypeInfo &ti)
Definition: Datum.cpp:193
class for a per-database catalog. also includes metadata for the current database and the current use...
Definition: Catalog.h:81
Definition: sqltypes.h:52
HOST DEVICE EncodingType get_compression() const
Definition: sqltypes.h:334
bool g_cluster
const bool keyless
std::vector< int8_t > get_col_byte_widths(const T &col_expr_list, const std::vector< ssize_t > &col_exprs_to_not_project)
GroupByAndAggregate(Executor *executor, const ExecutorDeviceType device_type, const RelAlgExecutionUnit &ra_exe_unit, const std::vector< InputTableInfo > &query_infos, std::shared_ptr< RowSetMemoryOwner >)
ExecutorDeviceType
std::unique_ptr< QueryMemoryDescriptor > initQueryMemoryDescriptorImpl(const bool allow_multifrag, const size_t max_groups_buffer_entry_count, const int8_t crt_min_byte_width, const bool sort_on_gpu_hint, RenderInfo *render_info, const bool must_use_baseline_sort, const bool output_columnar_hint)
#define NULL_BIGINT
Definition: sqltypes.h:177
TargetInfo get_target_info(const PointerType target_expr, const bool bigint_count)
Definition: TargetInfo.h:65
std::string datum_to_string(const TargetValue &tv, const SQLTypeInfo &ti, const std::string &delim)
ColRangeInfo getColRangeInfo()
std::string nullable_str_to_string(const NullableString &str)
QueryDescriptionType hash_type_
std::string join(T const &container, std::string const &delim)
static bool supportedExprForGpuSharedMemUsage(Analyzer::Expr *expr)
llvm::Value * emitCall(const std::string &fname, const std::vector< llvm::Value * > &args)
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
bool needsUnnestDoublePatch(llvm::Value *val_ptr, const std::string &agg_base_name, const bool threads_share_memory, const CompilationOptions &co) const
const std::list< std::shared_ptr< Analyzer::Expr > > groupby_exprs
bool supportedTypeForGpuSharedMemUsage(const SQLTypeInfo &target_type_info) const
std::tuple< llvm::Value *, llvm::Value * > codegenMultiColumnBaselineHash(const CompilationOptions &co, llvm::Value *groups_buffer, llvm::Value *group_key, llvm::Value *key_size_lv, const QueryMemoryDescriptor &query_mem_desc, const size_t key_width, const int32_t row_size_quad)
int64_t extract_from_datum(const Datum datum, const SQLTypeInfo &ti)
double inline_fp_null_val(const SQL_TYPE_INFO &ti)
int32_t intval
Definition: sqltypes.h:128
std::string to_string(char const *&&v)
int8_t get_min_byte_width()
size_t get_count_distinct_sub_bitmap_count(const size_t bitmap_sz_bits, const RelAlgExecutionUnit &ra_exe_unit, const ExecutorDeviceType device_type)
const SQLTypeInfo get_compact_type(const TargetInfo &target)
CountDistinctDescriptors initCountDistinctDescriptors()
size_t get_bit_width(const SQLTypeInfo &ti)
CHECK(cgen_state)
bool codegen(llvm::Value *filter_result, llvm::BasicBlock *sc_false, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co)
bool is_array() const
Definition: sqltypes.h:485
bool is_time() const
Definition: sqltypes.h:483
std::vector< CountDistinctDescriptor > CountDistinctDescriptors
Definition: CountDistinct.h:35
HOST DEVICE SQLTypes get_type() const
Definition: sqltypes.h:326
llvm::Value * convertNullIfAny(const SQLTypeInfo &arg_type, const TargetInfo &agg_info, llvm::Value *target)
int64_t bigintval
Definition: sqltypes.h:129
void setFalseTarget(llvm::BasicBlock *cond_false)
std::tuple< llvm::Value *, llvm::Value * > codegenSingleColumnPerfectHash(const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, llvm::Value *groups_buffer, llvm::Value *group_expr_lv_translated, llvm::Value *group_expr_lv_original, const int32_t row_size_quad)
std::tuple< llvm::Value *, llvm::Value * > codegenGroupBy(const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, DiamondCodegen &codegen)
bool g_bigint_count
int16_t smallintval
Definition: sqltypes.h:127
KeylessInfo getKeylessInfo(const std::vector< Analyzer::Expr * > &target_expr_list, const bool is_group_by) const
void codegenCountDistinct(const size_t target_idx, const Analyzer::Expr *target_expr, std::vector< llvm::Value * > &agg_args, const QueryMemoryDescriptor &, const ExecutorDeviceType)
DiamondCodegen(llvm::Value *cond, Executor *executor, const bool chain_to_next, const std::string &label_prefix, DiamondCodegen *parent, const bool share_false_edge_with_parent)
bool codegenAggCalls(const std::tuple< llvm::Value *, llvm::Value * > &agg_out_ptr_w_idx, const std::vector< llvm::Value * > &agg_out_vec, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, DiamondCodegen &diamond_codegen)
std::tuple< llvm::Value *, llvm::Value * > codegenMultiColumnPerfectHash(llvm::Value *groups_buffer, llvm::Value *group_key, llvm::Value *key_size_lv, const QueryMemoryDescriptor &query_mem_desc, const int32_t row_size_quad)
SQLTypeInfoCore< ArrayContextTypeSizer, ExecutorTypePackaging, DateTimeFacilities > SQLTypeInfo
Definition: sqltypes.h:852
int64_t getShardedTopBucket(const ColRangeInfo &col_range_info, const size_t shard_count) const
SQLTypes decimal_to_int_type(const SQLTypeInfo &ti)
Definition: Datum.cpp:268
const int32_t target_index
Definition: sqltypes.h:55
Definition: sqltypes.h:56
const std::vector< InputTableInfo > & query_infos_
std::unique_ptr< QueryMemoryDescriptor > initQueryMemoryDescriptor(const bool allow_multifrag, const size_t max_groups_buffer_entry_count, const int8_t crt_min_byte_width, RenderInfo *render_info, const bool output_columnar_hint)
const ExecutorDeviceType device_type_
std::vector< llvm::Value * > codegenAggArg(const Analyzer::Expr *target_expr, const CompilationOptions &co)
llvm::Function * codegenPerfectHashFunction()
llvm::Value * codegenWindowRowPointer(const Analyzer::WindowFunction *window_func, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, DiamondCodegen &diamond_codegen)
Definition: sqltypes.h:44
boost::variant< std::string, void * > NullableString
Definition: TargetValue.h:155
llvm::Value * codegenOutputSlot(llvm::Value *groups_buffer, const QueryMemoryDescriptor &query_mem_desc, const CompilationOptions &co, DiamondCodegen &diamond_codegen)
int64_t extract_min_stat(const ChunkStats &stats, const SQLTypeInfo &ti)
int64_t inline_int_null_val(const SQL_TYPE_INFO &ti)
ColRangeInfo getExprRangeInfo(const Analyzer::Expr *expr) const
constexpr int8_t MAX_BYTE_WIDTH_SUPPORTED
boost::variant< ScalarTargetValue, ArrayTargetValue, GeoTargetValue, GeoTargetValuePtr > TargetValue
Definition: TargetValue.h:167
Definition: sqltypes.h:48
SQLTypeInfoCore get_elem_type() const
Definition: sqltypes.h:659
const RelAlgExecutionUnit & ra_exe_unit_
bool is_decimal() const
Definition: sqltypes.h:480
Definition: sqldefs.h:71
#define NULL_BOOLEAN
Definition: sqltypes.h:173
int64_t extract_max_stat(const ChunkStats &stats, const SQLTypeInfo &ti)
static size_t shard_count_for_top_groups(const RelAlgExecutionUnit &ra_exe_unit, const Catalog_Namespace::Catalog &catalog)