OmniSciDB  c1a53651b2
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
WindowContext.cpp
Go to the documentation of this file.
1 /*
2  * Copyright 2022 HEAVY.AI, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
18 
19 #include <numeric>
20 
22 #include "QueryEngine/Execute.h"
28 #include "Shared/Intervals.h"
29 #include "Shared/checked_alloc.h"
30 #include "Shared/funcannotations.h"
31 #include "Shared/sqltypes.h"
32 #include "Shared/threading.h"
33 
34 #ifdef HAVE_TBB
35 //#include <tbb/parallel_for.h>
36 #include <tbb/parallel_sort.h>
37 #else
38 #include <thrust/sort.h>
39 #endif
40 
43 
46 
48 
49 // Non-partitioned version (no hash table provided)
51  const Analyzer::WindowFunction* window_func,
52  const size_t elem_count,
53  const ExecutorDeviceType device_type,
54  std::shared_ptr<RowSetMemoryOwner> row_set_mem_owner)
55  : window_func_(window_func)
56  , partition_cache_key_(EMPTY_HASHED_PLAN_DAG_KEY)
57  , sorted_partition_cache_key_(EMPTY_HASHED_PLAN_DAG_KEY)
58  , partitions_(nullptr)
59  , elem_count_(elem_count)
60  , output_(nullptr)
61  , sorted_partition_buf_(nullptr)
62  , aggregate_trees_fan_out_(g_window_function_aggregation_tree_fanout)
63  , aggregate_trees_depth_(nullptr)
64  , ordered_partition_null_start_pos_(nullptr)
65  , ordered_partition_null_end_pos_(nullptr)
66  , partition_start_offset_(nullptr)
67  , partition_start_(nullptr)
68  , partition_end_(nullptr)
69  , device_type_(device_type)
70  , row_set_mem_owner_(row_set_mem_owner)
71  , dummy_count_(elem_count)
72  , dummy_offset_(0)
73  , dummy_payload_(nullptr) {
74  CHECK_LE(elem_count_, static_cast<size_t>(std::numeric_limits<int32_t>::max()));
76  reinterpret_cast<int32_t*>(checked_malloc(elem_count_ * sizeof(int32_t)));
78  if (window_func_->hasFraming() ||
80  // in this case, we consider all rows of the row belong to the same and only
81  // existing partition
83  reinterpret_cast<int64_t*>(checked_calloc(2, sizeof(int64_t)));
85  aggregate_trees_depth_ = reinterpret_cast<size_t*>(checked_calloc(1, sizeof(size_t)));
87  reinterpret_cast<int64_t*>(checked_calloc(1, sizeof(int64_t)));
89  reinterpret_cast<int64_t*>(checked_calloc(1, sizeof(int64_t)));
90  }
91 }
92 
93 // Partitioned version
95  const Analyzer::WindowFunction* window_func,
96  QueryPlanHash partition_cache_key,
97  const std::shared_ptr<HashJoin>& partitions,
98  const size_t elem_count,
99  const ExecutorDeviceType device_type,
100  std::shared_ptr<RowSetMemoryOwner> row_set_mem_owner,
101  size_t aggregation_tree_fan_out)
102  : window_func_(window_func)
103  , partition_cache_key_(partition_cache_key)
104  , sorted_partition_cache_key_(EMPTY_HASHED_PLAN_DAG_KEY)
105  , partitions_(partitions)
106  , elem_count_(elem_count)
107  , output_(nullptr)
108  , sorted_partition_buf_(nullptr)
109  , aggregate_trees_fan_out_(aggregation_tree_fan_out)
110  , aggregate_trees_depth_(nullptr)
111  , ordered_partition_null_start_pos_(nullptr)
112  , ordered_partition_null_end_pos_(nullptr)
113  , partition_start_offset_(nullptr)
114  , partition_start_(nullptr)
115  , partition_end_(nullptr)
116  , device_type_(device_type)
117  , row_set_mem_owner_(row_set_mem_owner)
118  , dummy_count_(elem_count)
119  , dummy_offset_(0)
120  , dummy_payload_(nullptr) {
121  CHECK(partitions_); // This version should have hash table
122  size_t partition_count = partitionCount();
124  reinterpret_cast<int64_t*>(checked_calloc(partition_count + 1, sizeof(int64_t)));
125  if (window_func_->hasFraming()) {
127  reinterpret_cast<size_t*>(checked_calloc(partition_count, sizeof(size_t)));
129  reinterpret_cast<int64_t*>(checked_calloc(partition_count, sizeof(int64_t)));
131  reinterpret_cast<int64_t*>(checked_calloc(partition_count, sizeof(int64_t)));
132  }
133  // the first partition starts at zero position
134  std::partial_sum(counts(), counts() + partition_count, partition_start_offset_ + 1);
135 }
136 
138  free(partition_start_);
139  free(partition_end_);
140  if (dummy_payload_) {
141  free(dummy_payload_);
142  }
145  }
148  }
151  }
154  }
155 }
156 
158  const int8_t* column,
159  const SQLTypeInfo& ti,
160  const std::vector<std::shared_ptr<Chunk_NS::Chunk>>& chunks_owner) {
161  order_columns_owner_.push_back(chunks_owner);
162  order_columns_.push_back(column);
163  order_columns_ti_.push_back(ti);
164 }
165 
167  const int8_t* column,
168  const std::vector<std::shared_ptr<Chunk_NS::Chunk>>& chunks_owner) {
169  window_func_expr_columns_owner_.push_back(chunks_owner);
170  window_func_expr_columns_.push_back(column);
171 };
172 
173 const std::vector<const int8_t*>&
176 }
177 
178 const std::vector<const int8_t*>& WindowFunctionContext::getOrderKeyColumnBuffers()
179  const {
180  return order_columns_;
181 }
182 
184  const {
185  return order_columns_ti_;
186 }
187 
189  sorted_partition_cache_key_ = cache_key;
190 }
191 
192 namespace {
193 
194 // Converts the sorted indices to a mapping from row position to row number.
195 std::vector<int64_t> index_to_row_number(const int64_t* index, const size_t index_size) {
196  std::vector<int64_t> row_numbers(index_size);
197  for (size_t i = 0; i < index_size; ++i) {
198  row_numbers[index[i]] = i + 1;
199  }
200  return row_numbers;
201 }
202 
203 // Returns true iff the current element is greater than the previous, according to the
204 // comparator. This is needed because peer rows have to have the same rank.
206  const std::function<bool(const int64_t lhs, const int64_t rhs)>& comparator,
207  const int64_t* index,
208  const size_t i) {
209  if (i == 0) {
210  return false;
211  }
212  return comparator(index[i - 1], index[i]);
213 }
214 
215 // Computes the mapping from row position to rank.
216 std::vector<int64_t> index_to_rank(
217  const int64_t* index,
218  const size_t index_size,
219  const std::function<bool(const int64_t lhs, const int64_t rhs)>& comparator) {
220  std::vector<int64_t> rank(index_size);
221  size_t crt_rank = 1;
222  for (size_t i = 0; i < index_size; ++i) {
223  if (advance_current_rank(comparator, index, i)) {
224  crt_rank = i + 1;
225  }
226  rank[index[i]] = crt_rank;
227  }
228  return rank;
229 }
230 
231 // Computes the mapping from row position to dense rank.
232 std::vector<int64_t> index_to_dense_rank(
233  const int64_t* index,
234  const size_t index_size,
235  const std::function<bool(const int64_t lhs, const int64_t rhs)>& comparator) {
236  std::vector<int64_t> dense_rank(index_size);
237  size_t crt_rank = 1;
238  for (size_t i = 0; i < index_size; ++i) {
239  if (advance_current_rank(comparator, index, i)) {
240  ++crt_rank;
241  }
242  dense_rank[index[i]] = crt_rank;
243  }
244  return dense_rank;
245 }
246 
247 // Computes the mapping from row position to percent rank.
248 std::vector<double> index_to_percent_rank(
249  const int64_t* index,
250  const size_t index_size,
251  const std::function<bool(const int64_t lhs, const int64_t rhs)>& comparator) {
252  std::vector<double> percent_rank(index_size);
253  size_t crt_rank = 1;
254  for (size_t i = 0; i < index_size; ++i) {
255  if (advance_current_rank(comparator, index, i)) {
256  crt_rank = i + 1;
257  }
258  percent_rank[index[i]] =
259  index_size == 1 ? 0 : static_cast<double>(crt_rank - 1) / (index_size - 1);
260  }
261  return percent_rank;
262 }
263 
264 // Computes the mapping from row position to cumulative distribution.
265 std::vector<double> index_to_cume_dist(
266  const int64_t* index,
267  const size_t index_size,
268  const std::function<bool(const int64_t lhs, const int64_t rhs)>& comparator) {
269  std::vector<double> cume_dist(index_size);
270  size_t start_peer_group = 0;
271  while (start_peer_group < index_size) {
272  size_t end_peer_group = start_peer_group + 1;
273  while (end_peer_group < index_size &&
274  !advance_current_rank(comparator, index, end_peer_group)) {
275  ++end_peer_group;
276  }
277  for (size_t i = start_peer_group; i < end_peer_group; ++i) {
278  cume_dist[index[i]] = static_cast<double>(end_peer_group) / index_size;
279  }
280  start_peer_group = end_peer_group;
281  }
282  return cume_dist;
283 }
284 
285 // Computes the mapping from row position to the n-tile statistic.
286 std::vector<int64_t> index_to_ntile(const int64_t* index,
287  const size_t index_size,
288  const size_t n) {
289  std::vector<int64_t> row_numbers(index_size);
290  if (!n) {
291  throw std::runtime_error("NTILE argument cannot be zero");
292  }
293  const size_t tile_size = (index_size + n - 1) / n;
294  for (size_t i = 0; i < index_size; ++i) {
295  row_numbers[index[i]] = i / tile_size + 1;
296  }
297  return row_numbers;
298 }
299 
300 // The element size in the result buffer for the given window function kind. Currently
301 // it's always 8.
303  return 8;
304 }
305 
306 // Extracts the integer constant from a constant expression.
308  const auto lag_constant = dynamic_cast<const Analyzer::Constant*>(expr);
309  if (!lag_constant) {
310  throw std::runtime_error("LAG with non-constant lag argument not supported yet");
311  }
312  const auto& lag_ti = lag_constant->get_type_info();
313  switch (lag_ti.get_type()) {
314  case kSMALLINT: {
315  return lag_constant->get_constval().smallintval;
316  }
317  case kINT: {
318  return lag_constant->get_constval().intval;
319  }
320  case kBIGINT: {
321  return lag_constant->get_constval().bigintval;
322  }
323  default: {
324  LOG(FATAL) << "Invalid type for the lag argument";
325  }
326  }
327  return 0;
328 }
329 
330 // Gets the lag or lead argument canonicalized as lag (lag = -lead).
332  CHECK(window_func->getKind() == SqlWindowFunctionKind::LAG ||
333  window_func->getKind() == SqlWindowFunctionKind::LEAD);
334  const auto& args = window_func->getArgs();
335  if (args.size() == 3) {
336  throw std::runtime_error("LAG with default not supported yet");
337  }
338  if (args.size() == 2) {
339  const int64_t lag_or_lead =
340  static_cast<int64_t>(get_int_constant_from_expr(args[1].get()));
341  return window_func->getKind() == SqlWindowFunctionKind::LAG ? lag_or_lead
342  : -lag_or_lead;
343  }
344  CHECK_EQ(args.size(), size_t(1));
345  return window_func->getKind() == SqlWindowFunctionKind::LAG ? 1 : -1;
346 }
347 
349  const Analyzer::WindowFunction* window_func,
350  const size_t partition_size) {
352  window_func->getKind() == SqlWindowFunctionKind::LAST_VALUE);
353  return window_func->getKind() == SqlWindowFunctionKind::FIRST_VALUE
354  ? 0
355  : partition_size - 1;
356 }
357 
358 // Redistributes the original_indices according to the permutation given by
359 // output_for_partition_buff, reusing it as an output buffer.
360 void apply_permutation_to_partition(int64_t* output_for_partition_buff,
361  const int32_t* original_indices,
362  const size_t partition_size) {
363  std::vector<int64_t> new_output_for_partition_buff(partition_size);
364  for (size_t i = 0; i < partition_size; ++i) {
365  new_output_for_partition_buff[i] = original_indices[output_for_partition_buff[i]];
366  }
367  std::copy(new_output_for_partition_buff.begin(),
368  new_output_for_partition_buff.end(),
369  output_for_partition_buff);
370 }
371 
372 // Applies a lag to the given sorted_indices, reusing it as an output buffer.
373 void apply_lag_to_partition(const int64_t lag,
374  const int32_t* original_indices,
375  int64_t* sorted_indices,
376  const size_t partition_size) {
377  std::vector<int64_t> lag_sorted_indices(partition_size, -1);
378  for (int64_t idx = 0; idx < static_cast<int64_t>(partition_size); ++idx) {
379  int64_t lag_idx = idx - lag;
380  if (lag_idx < 0 || lag_idx >= static_cast<int64_t>(partition_size)) {
381  continue;
382  }
383  lag_sorted_indices[idx] = sorted_indices[lag_idx];
384  }
385  std::vector<int64_t> lag_original_indices(partition_size);
386  for (size_t k = 0; k < partition_size; ++k) {
387  const auto lag_index = lag_sorted_indices[k];
388  lag_original_indices[sorted_indices[k]] =
389  lag_index != -1 ? original_indices[lag_index] : -1;
390  }
391  std::copy(lag_original_indices.begin(), lag_original_indices.end(), sorted_indices);
392 }
393 
394 void apply_nth_value_to_partition(const int32_t* original_indices,
395  int64_t* output_for_partition_buff,
396  const size_t partition_size,
397  const size_t target_pos) {
398  CHECK_LT(target_pos, partition_size);
399  const auto target_idx = original_indices[output_for_partition_buff[target_pos]];
400  std::fill(
401  output_for_partition_buff, output_for_partition_buff + partition_size, target_idx);
402 }
403 
404 void apply_original_index_to_partition(const int32_t* original_indices,
405  int64_t* output_for_partition_buff,
406  const size_t partition_size) {
407  for (size_t i = 0; i < partition_size; i++) {
408  const auto target_idx = original_indices[output_for_partition_buff[i]];
409  output_for_partition_buff[i] = target_idx;
410  }
411 }
412 
414  const int8_t* partition_end,
415  const size_t off,
416  const int64_t* index,
417  const size_t index_size,
418  const std::function<bool(const int64_t lhs, const int64_t rhs)>& comparator) {
419  int64_t partition_end_handle = reinterpret_cast<int64_t>(partition_end);
420  for (size_t i = 0; i < index_size; ++i) {
421  if (advance_current_rank(comparator, index, i)) {
422  agg_count_distinct_bitmap(&partition_end_handle, off + i - 1, 0);
423  }
424  }
425  CHECK(index_size);
426  agg_count_distinct_bitmap(&partition_end_handle, off + index_size - 1, 0);
427 }
428 
429 bool pos_is_set(const int64_t bitset, const int64_t pos) {
430  return (reinterpret_cast<const int8_t*>(bitset))[pos >> 3] & (1 << (pos & 7));
431 }
432 
433 // Write value to pending integer outputs collected for all the peer rows. The end of
434 // groups is represented by the bitset.
435 template <class T>
436 void apply_window_pending_outputs_int(const int64_t handle,
437  const int64_t value,
438  const int64_t bitset,
439  const int64_t pos) {
440  if (!pos_is_set(bitset, pos)) {
441  return;
442  }
443  auto& pending_output_slots = *reinterpret_cast<std::vector<void*>*>(handle);
444  for (auto pending_output_slot : pending_output_slots) {
445  *reinterpret_cast<T*>(pending_output_slot) = value;
446  }
447  pending_output_slots.clear();
448 }
449 
450 } // namespace
451 
452 extern "C" RUNTIME_EXPORT void apply_window_pending_outputs_int64(const int64_t handle,
453  const int64_t value,
454  const int64_t bitset,
455  const int64_t pos) {
456  apply_window_pending_outputs_int<int64_t>(handle, value, bitset, pos);
457 }
458 
459 extern "C" RUNTIME_EXPORT void apply_window_pending_outputs_int32(const int64_t handle,
460  const int64_t value,
461  const int64_t bitset,
462  const int64_t pos) {
463  apply_window_pending_outputs_int<int32_t>(handle, value, bitset, pos);
464 }
465 
466 extern "C" RUNTIME_EXPORT void apply_window_pending_outputs_int16(const int64_t handle,
467  const int64_t value,
468  const int64_t bitset,
469  const int64_t pos) {
470  apply_window_pending_outputs_int<int16_t>(handle, value, bitset, pos);
471 }
472 
473 extern "C" RUNTIME_EXPORT void apply_window_pending_outputs_int8(const int64_t handle,
474  const int64_t value,
475  const int64_t bitset,
476  const int64_t pos) {
477  apply_window_pending_outputs_int<int8_t>(handle, value, bitset, pos);
478 }
479 
480 extern "C" RUNTIME_EXPORT void apply_window_pending_outputs_double(const int64_t handle,
481  const double value,
482  const int64_t bitset,
483  const int64_t pos) {
484  if (!pos_is_set(bitset, pos)) {
485  return;
486  }
487  auto& pending_output_slots = *reinterpret_cast<std::vector<void*>*>(handle);
488  for (auto pending_output_slot : pending_output_slots) {
489  *reinterpret_cast<double*>(pending_output_slot) = value;
490  }
491  pending_output_slots.clear();
492 }
493 
494 extern "C" RUNTIME_EXPORT void apply_window_pending_outputs_float(const int64_t handle,
495  const float value,
496  const int64_t bitset,
497  const int64_t pos) {
498  if (!pos_is_set(bitset, pos)) {
499  return;
500  }
501  auto& pending_output_slots = *reinterpret_cast<std::vector<void*>*>(handle);
502  for (auto pending_output_slot : pending_output_slots) {
503  *reinterpret_cast<double*>(pending_output_slot) = value;
504  }
505  pending_output_slots.clear();
506 }
507 
509  const int64_t handle,
510  const float value,
511  const int64_t bitset,
512  const int64_t pos) {
513  if (!pos_is_set(bitset, pos)) {
514  return;
515  }
516  auto& pending_output_slots = *reinterpret_cast<std::vector<void*>*>(handle);
517  for (auto pending_output_slot : pending_output_slots) {
518  *reinterpret_cast<float*>(pending_output_slot) = value;
519  }
520  pending_output_slots.clear();
521 }
522 
523 // Add a pending output slot to be written back at the end of a peer row group.
524 extern "C" RUNTIME_EXPORT void add_window_pending_output(void* pending_output,
525  const int64_t handle) {
526  reinterpret_cast<std::vector<void*>*>(handle)->push_back(pending_output);
527 }
528 
529 // Returns true iff the aggregate window function requires special multiplicity handling
530 // to ensure that peer rows have the same value for the window function.
532  if (!window_function_is_aggregate(window_func->getKind())) {
533  return false;
534  }
535  if (window_func->getOrderKeys().empty()) {
536  return true;
537  }
538  switch (window_func->getKind()) {
541  return false;
542  }
543  default: {
544  return true;
545  }
546  }
547 }
548 
550  std::unordered_map<QueryPlanHash, size_t>& sorted_partition_key_ref_count_map,
551  std::unordered_map<QueryPlanHash, std::shared_ptr<std::vector<int64_t>>>&
552  sorted_partition_cache,
553  std::unordered_map<size_t, AggregateTreeForWindowFraming>& aggregate_tree_map) {
554  auto timer = DEBUG_TIMER(__func__);
555  CHECK(!output_);
556  if (elem_count_ == 0) {
557  return;
558  }
559  size_t output_buf_sz =
561  output_ = static_cast<int8_t*>(row_set_mem_owner_->allocate(output_buf_sz,
562  /*thread_idx=*/0));
563  const bool is_window_function_aggregate_or_has_framing =
565  if (is_window_function_aggregate_or_has_framing) {
570  }
571  }
572  std::unique_ptr<int64_t[]> scratchpad;
573  int64_t* intermediate_output_buffer;
574  if (is_window_function_aggregate_or_has_framing) {
575  intermediate_output_buffer = reinterpret_cast<int64_t*>(output_);
576  } else {
577  output_buf_sz = sizeof(int64_t) * elem_count_;
578  scratchpad.reset(new int64_t[elem_count_]);
579  intermediate_output_buffer = scratchpad.get();
580  }
581  const bool should_parallelize{g_enable_parallel_window_partition_compute &&
582  elem_count_ >=
584 
585  auto cached_sorted_partition_it =
586  sorted_partition_cache.find(sorted_partition_cache_key_);
587  if (cached_sorted_partition_it != sorted_partition_cache.end()) {
588  auto& sorted_partition = cached_sorted_partition_it->second;
589  VLOG(1) << "Reuse cached sorted partition to compute window function context (key: "
591  << ", ordering condition: " << ::toString(window_func_->getOrderKeys())
592  << ")";
593  DEBUG_TIMER("Window Function Cached Sorted Partition Copy");
594  std::memcpy(intermediate_output_buffer, sorted_partition->data(), output_buf_sz);
595  if (window_func_->hasFraming()) {
596  sorted_partition_buf_ = sorted_partition;
597  }
598  } else {
599  // ordering partitions if necessary
600  const auto sort_partitions = [&](const size_t start, const size_t end) {
601  for (size_t partition_idx = start; partition_idx < end; ++partition_idx) {
602  sortPartition(partition_idx,
603  intermediate_output_buffer + offsets()[partition_idx],
604  should_parallelize);
605  }
606  };
607 
608  if (should_parallelize) {
609  auto sorted_partition_copy_timer =
610  DEBUG_TIMER("Window Function Partition Sorting Parallelized");
611  tbb::parallel_for(tbb::blocked_range<int64_t>(0, partitionCount()),
612  [&, parent_thread_local_ids = logger::thread_local_ids()](
613  const tbb::blocked_range<int64_t>& r) {
615  parent_thread_local_ids.setNewThreadId();
616  sort_partitions(r.begin(), r.end());
617  });
618  } else {
619  auto sorted_partition_copy_timer =
620  DEBUG_TIMER("Window Function Partition Sorting Non-Parallelized");
621  sort_partitions(0, partitionCount());
622  }
623  auto sorted_partition_ref_cnt_it =
624  sorted_partition_key_ref_count_map.find(sorted_partition_cache_key_);
625  bool can_access_sorted_partition =
626  sorted_partition_ref_cnt_it != sorted_partition_key_ref_count_map.end() &&
627  sorted_partition_ref_cnt_it->second > 1;
628  if (can_access_sorted_partition || window_func_->hasFraming()) {
629  // keep the sorted partition only if it will be reused from other window function
630  // context of this query
631  sorted_partition_buf_ = std::make_shared<std::vector<int64_t>>(elem_count_);
632  DEBUG_TIMER("Window Function Sorted Partition Copy For Caching");
633  std::memcpy(
634  sorted_partition_buf_->data(), intermediate_output_buffer, output_buf_sz);
635  auto it = sorted_partition_cache.emplace(sorted_partition_cache_key_,
637  if (it.second) {
638  VLOG(1) << "Put sorted partition to cache (key: " << sorted_partition_cache_key_
639  << ", ordering condition: " << ::toString(window_func_->getOrderKeys())
640  << ")";
641  }
642  }
643  }
644 
645  if (window_func_->hasFraming()) {
646  const auto compute_ordered_partition_null_range = [=](const size_t start,
647  const size_t end) {
648  for (size_t partition_idx = start; partition_idx < end; ++partition_idx) {
650  window_func_->getOrderKeys().front()->get_type_info(),
651  partition_idx,
652  payload() + offsets()[partition_idx],
653  intermediate_output_buffer + offsets()[partition_idx]);
654  }
655  };
656  auto partition_count = partitionCount();
657 
658  if (should_parallelize) {
659  auto partition_compuation_timer =
660  DEBUG_TIMER("Window Function Ordered-Partition Null-Range Compute");
661  tbb::parallel_for(tbb::blocked_range<int64_t>(0, partitionCount()),
662  [&, parent_thread_local_ids = logger::thread_local_ids()](
663  const tbb::blocked_range<int64_t>& r) {
665  parent_thread_local_ids.setNewThreadId();
666  compute_ordered_partition_null_range(r.begin(), r.end());
667  });
668  } else {
669  auto partition_compuation_timer = DEBUG_TIMER(
670  "Window Function Non-Parallelized Ordered-Partition Null-Range Compute");
671  compute_ordered_partition_null_range(0, partitionCount());
672  }
673  auto const cache_key = computeAggregateTreeCacheKey();
674  auto const c_it = aggregate_tree_map.find(cache_key);
675  if (c_it != aggregate_tree_map.cend()) {
676  VLOG(1) << "Reuse aggregate tree for window function framing";
678  aggregate_trees_ = c_it->second;
679  memcpy(aggregate_trees_depth_,
681  sizeof(size_t) * partition_count);
682  } else {
684  const auto build_aggregation_tree_for_partitions = [=](const size_t start,
685  const size_t end) {
686  for (size_t partition_idx = start; partition_idx < end; ++partition_idx) {
687  // build a segment tree for the partition
688  // todo (yoonmin) : support generic window function expression
689  // i.e., when window_func_expr_columns_.size() > 1
690  SQLTypeInfo const input_col_ti =
691  window_func_->getArgs().front()->get_type_info();
692  const auto partition_size = counts()[partition_idx];
694  partition_idx,
695  partition_size,
696  payload() + offsets()[partition_idx],
697  intermediate_output_buffer,
698  input_col_ti);
699  }
700  };
702  if (should_parallelize) {
703  auto partition_compuation_timer = DEBUG_TIMER(
704  "Window Function Parallelized Segment Tree Construction for Partitions");
705  tbb::parallel_for(tbb::blocked_range<int64_t>(0, partitionCount()),
706  [=, parent_thread_local_ids = logger::thread_local_ids()](
707  const tbb::blocked_range<int64_t>& r) {
709  parent_thread_local_ids.setNewThreadId();
710  build_aggregation_tree_for_partitions(r.begin(), r.end());
711  });
712  } else {
713  auto partition_compuation_timer = DEBUG_TIMER(
714  "Window Function Non-Parallelized Segment Tree Construction for "
715  "Partitions");
716  build_aggregation_tree_for_partitions(0, partition_count);
717  }
718  }
719  CHECK(aggregate_tree_map.emplace(cache_key, aggregate_trees_).second);
720  VLOG(2) << "Put aggregate tree for the window framing";
721  }
722  }
723 
724  const auto compute_partitions = [=](const size_t start, const size_t end) {
725  for (size_t partition_idx = start; partition_idx < end; ++partition_idx) {
726  computePartitionBuffer(partition_idx,
727  intermediate_output_buffer + offsets()[partition_idx],
728  window_func_);
729  }
730  };
731 
732  if (should_parallelize) {
733  auto partition_compuation_timer = DEBUG_TIMER("Window Function Partition Compute");
734  tbb::parallel_for(tbb::blocked_range<int64_t>(0, partitionCount()),
735  [&, parent_thread_local_ids = logger::thread_local_ids()](
736  const tbb::blocked_range<int64_t>& r) {
738  parent_thread_local_ids.setNewThreadId();
739  compute_partitions(r.begin(), r.end());
740  });
741  } else {
742  auto partition_compuation_timer =
743  DEBUG_TIMER("Window Function Non-Parallelized Partition Compute");
744  compute_partitions(0, partitionCount());
745  }
746 
747  if (is_window_function_aggregate_or_has_framing) {
748  // If window function is aggregate we were able to write to the final output buffer
749  // directly in computePartition and we are done.
750  return;
751  }
752 
753  auto output_i64 = reinterpret_cast<int64_t*>(output_);
754  const auto payload_copy = [=](const size_t start, const size_t end) {
755  for (size_t i = start; i < end; ++i) {
756  output_i64[payload()[i]] = intermediate_output_buffer[i];
757  }
758  };
759  if (should_parallelize) {
760  auto payload_copy_timer =
761  DEBUG_TIMER("Window Function Non-Aggregate Payload Copy Parallelized");
762  tbb::parallel_for(tbb::blocked_range<int64_t>(0, elem_count_),
763  [&, parent_thread_local_ids = logger::thread_local_ids()](
764  const tbb::blocked_range<int64_t>& r) {
766  parent_thread_local_ids.setNewThreadId();
767  payload_copy(r.begin(), r.end());
768  });
769  } else {
770  auto payload_copy_timer =
771  DEBUG_TIMER("Window Function Non-Aggregate Payload Copy Non-Parallelized");
772  payload_copy(0, elem_count_);
773  }
774 }
775 
776 namespace {
778  int32_t const* original_col_idx_buf;
779  int64_t const* ordered_col_idx_buf;
780  int32_t const partition_size;
781  int64_t null_bit_pattern = -1;
782 
783  template <typename T>
784  IndexPair find_null_range_int(int8_t const* order_col_buf) const {
785  IndexPair null_range{std::numeric_limits<int64_t>::max(),
786  std::numeric_limits<int64_t>::min()};
787  auto const null_val = inline_int_null_value<T>();
788  auto const casted_order_col_buf = reinterpret_cast<T const*>(order_col_buf);
789  if (casted_order_col_buf[original_col_idx_buf[ordered_col_idx_buf[0]]] == null_val) {
790  int64_t null_range_max = 1;
791  while (null_range_max < partition_size &&
792  casted_order_col_buf
793  [original_col_idx_buf[ordered_col_idx_buf[null_range_max]]] ==
794  null_val) {
795  null_range_max++;
796  }
797  null_range.first = 0;
798  null_range.second = null_range_max - 1;
799  } else if (casted_order_col_buf
800  [original_col_idx_buf[ordered_col_idx_buf[partition_size - 1]]] ==
801  null_val) {
802  int64_t null_range_min = partition_size - 2;
803  while (null_range_min >= 0 &&
804  casted_order_col_buf
805  [original_col_idx_buf[ordered_col_idx_buf[null_range_min]]] ==
806  null_val) {
807  null_range_min--;
808  }
809  null_range.first = null_range_min + 1;
810  null_range.second = partition_size - 1;
811  }
812  return null_range;
813  }
814 
815  template <typename COL_TYPE,
816  typename NULL_TYPE =
817  std::conditional_t<sizeof(COL_TYPE) == sizeof(int32_t), int32_t, int64_t>>
818  IndexPair find_null_range_fp(int8_t const* order_col_buf) const {
819  IndexPair null_range{std::numeric_limits<int64_t>::max(),
820  std::numeric_limits<int64_t>::min()};
821  auto const casted_order_col_buf = reinterpret_cast<COL_TYPE const*>(order_col_buf);
822  auto check_null_val = [&casted_order_col_buf, this](size_t idx) {
823  return *reinterpret_cast<NULL_TYPE const*>(
824  may_alias_ptr(&casted_order_col_buf
825  [original_col_idx_buf[ordered_col_idx_buf[idx]]])) ==
826  null_bit_pattern;
827  };
828  if (check_null_val(0)) {
829  int64_t null_range_max = 1;
830  while (null_range_max < partition_size && check_null_val(null_range_max)) {
831  null_range_max++;
832  }
833  null_range.first = 0;
834  null_range.second = null_range_max - 1;
835  } else if (check_null_val(partition_size - 1)) {
836  int64_t null_range_min = partition_size - 2;
837  while (null_range_min >= 0 && check_null_val(null_range_min)) {
838  null_range_min--;
839  }
840  null_range.first = null_range_min + 1;
841  null_range.second = partition_size - 1;
842  }
843  return null_range;
844  }
845 };
846 } // namespace
847 
849  const SQLTypeInfo& order_col_ti,
850  size_t partition_idx,
851  const int32_t* original_col_idx_buf,
852  const int64_t* ordered_col_idx_buf) {
853  IndexPair null_range;
854  const auto partition_size = counts()[partition_idx];
855  if (partition_size > 0) {
856  if (order_col_ti.is_integer() || order_col_ti.is_decimal() ||
857  order_col_ti.is_time_or_date() || order_col_ti.is_boolean()) {
858  FindNullRange const null_range_info{
859  original_col_idx_buf, ordered_col_idx_buf, partition_size};
860  switch (order_col_ti.get_size()) {
861  case 8:
862  null_range =
863  null_range_info.find_null_range_int<int64_t>(order_columns_.front());
864  break;
865  case 4:
866  null_range =
867  null_range_info.find_null_range_int<int32_t>(order_columns_.front());
868  break;
869  case 2:
870  null_range =
871  null_range_info.find_null_range_int<int16_t>(order_columns_.front());
872  break;
873  case 1:
874  null_range =
875  null_range_info.find_null_range_int<int8_t>(order_columns_.front());
876  break;
877  default:
878  LOG(FATAL) << "Invalid type size: " << order_col_ti.get_size();
879  }
880  } else if (order_col_ti.is_fp()) {
881  const auto null_bit_pattern =
882  null_val_bit_pattern(order_col_ti, order_col_ti.get_type() == kFLOAT);
883  FindNullRange const null_range_info{
884  original_col_idx_buf, ordered_col_idx_buf, partition_size, null_bit_pattern};
885  switch (order_col_ti.get_type()) {
886  case kFLOAT:
887  null_range = null_range_info.find_null_range_fp<float>(order_columns_.front());
888  break;
889  case kDOUBLE:
890  null_range = null_range_info.find_null_range_fp<double>(order_columns_.front());
891  break;
892  default:
893  LOG(FATAL) << "Invalid float type";
894  }
895  } else {
896  LOG(FATAL) << "Invalid column type for window aggregation over the frame";
897  }
898  }
899  ordered_partition_null_start_pos_[partition_idx] = null_range.first;
900  ordered_partition_null_end_pos_[partition_idx] = null_range.second + 1;
901 }
902 
903 std::vector<WindowFunctionContext::Comparator> WindowFunctionContext::createComparator(
904  size_t partition_idx) {
905  // create tuple comparator
906  std::vector<WindowFunctionContext::Comparator> partition_comparator;
907  const auto& order_keys = window_func_->getOrderKeys();
908  const auto& collation = window_func_->getCollation();
909  CHECK_EQ(order_keys.size(), collation.size());
910  for (size_t order_column_idx = 0; order_column_idx < order_columns_.size();
911  ++order_column_idx) {
912  auto order_column_buffer = order_columns_[order_column_idx];
913  const auto order_col =
914  dynamic_cast<const Analyzer::ColumnVar*>(order_keys[order_column_idx].get());
915  CHECK(order_col);
916  const auto& order_col_collation = collation[order_column_idx];
917  auto comparator = makeComparator(order_col,
918  order_column_buffer,
919  payload() + offsets()[partition_idx],
920  !order_col_collation.is_desc,
921  order_col_collation.nulls_first);
922  if (order_col_collation.is_desc) {
923  comparator = [comparator](const int64_t lhs, const int64_t rhs) {
924  return comparator(rhs, lhs);
925  };
926  }
927  partition_comparator.push_back(comparator);
928  }
929  return partition_comparator;
930 }
931 
932 void WindowFunctionContext::sortPartition(const size_t partition_idx,
933  int64_t* output_for_partition_buff,
934  bool should_parallelize) {
935  const size_t partition_size{static_cast<size_t>(counts()[partition_idx])};
936  if (partition_size == 0) {
937  return;
938  }
939  std::iota(
940  output_for_partition_buff, output_for_partition_buff + partition_size, int64_t(0));
941  auto partition_comparator = createComparator(partition_idx);
942  if (!partition_comparator.empty()) {
943  const auto col_tuple_comparator = [&partition_comparator](const int64_t lhs,
944  const int64_t rhs) {
945  for (const auto& comparator : partition_comparator) {
946  const auto comparator_result = comparator(lhs, rhs);
947  switch (comparator_result) {
949  return true;
951  return false;
952  default:
953  // WindowComparatorResult::EQ: continue to next comparator
954  continue;
955  }
956  }
957  // If here WindowFunctionContext::WindowComparatorResult::KEQ for all keys
958  // return false as sort algo must enforce weak ordering
959  return false;
960  };
961  if (should_parallelize) {
962 #ifdef HAVE_TBB
963  tbb::parallel_sort(output_for_partition_buff,
964  output_for_partition_buff + partition_size,
965  col_tuple_comparator);
966 #else
967  thrust::sort(output_for_partition_buff,
968  output_for_partition_buff + partition_size,
969  col_tuple_comparator);
970 #endif
971  } else {
972  std::sort(output_for_partition_buff,
973  output_for_partition_buff + partition_size,
974  col_tuple_comparator);
975  }
976  }
977 }
978 
980  return window_func_;
981 }
982 
983 const int8_t* WindowFunctionContext::output() const {
984  return output_;
985 }
986 
989  return sorted_partition_buf_->data();
990 }
991 
994  return &aggregate_state_.val;
995 }
996 
999  return &aggregate_state_.count;
1000 }
1001 
1004  return partition_start_offset_;
1005 }
1006 
1009  return partition_start_offset_ + 1;
1010 }
1011 
1014  return reinterpret_cast<int64_t>(&aggregate_state_.outputs);
1015 }
1016 
1018  return partition_start_;
1019 }
1020 
1022  return partition_end_;
1023 }
1024 
1026  return elem_count_;
1027 }
1028 
1029 namespace {
1030 
1031 template <class T>
1033  const int8_t* order_column_buffer,
1034  const SQLTypeInfo& ti,
1035  const int32_t* partition_indices,
1036  const int64_t lhs,
1037  const int64_t rhs,
1038  const bool asc_ordering,
1039  const bool nulls_first) {
1040  const auto values = reinterpret_cast<const T*>(order_column_buffer);
1041  const auto lhs_val = values[partition_indices[lhs]];
1042  const auto rhs_val = values[partition_indices[rhs]];
1043  const auto null_val = inline_fixed_encoding_null_val(ti);
1044  if (lhs_val == null_val && rhs_val == null_val) {
1046  }
1047  if (lhs_val == null_val && rhs_val != null_val) {
1050  }
1051  if (rhs_val == null_val && lhs_val != null_val) {
1054  }
1055  if (lhs_val < rhs_val) {
1057  }
1058  if (lhs_val > rhs_val) {
1060  }
1062 }
1063 
1064 template <class T>
1066  const int8_t* order_column_buffer,
1067  const SQLTypeInfo& ti,
1068  const int32_t* partition_indices,
1069  const int64_t lhs,
1070  const int64_t rhs,
1071  const bool asc_ordering,
1072  const bool nulls_first) {
1073  const auto values = reinterpret_cast<const T*>(order_column_buffer);
1074  const auto lhs_val = values[partition_indices[lhs]];
1075  const auto rhs_val = values[partition_indices[rhs]];
1076  const auto null_val = inline_fixed_encoding_null_val(ti);
1077  if (lhs_val == null_val && rhs_val == null_val) {
1079  }
1080  if (lhs_val == null_val && rhs_val != null_val) {
1083  }
1084  if (rhs_val == null_val && lhs_val != null_val) {
1087  }
1088  if (lhs_val < rhs_val) {
1090  }
1091  if (lhs_val > rhs_val) {
1093  }
1095 }
1096 
1097 template <class T, class NullPatternType>
1099  const int8_t* order_column_buffer,
1100  const SQLTypeInfo& ti,
1101  const int32_t* partition_indices,
1102  const int64_t lhs,
1103  const int64_t rhs,
1104  const bool asc_ordering,
1105  const bool nulls_first) {
1106  const auto values = reinterpret_cast<const T*>(order_column_buffer);
1107  const auto lhs_val = values[partition_indices[lhs]];
1108  const auto rhs_val = values[partition_indices[rhs]];
1109  const auto null_bit_pattern = null_val_bit_pattern(ti, ti.get_type() == kFLOAT);
1110  const auto lhs_bit_pattern =
1111  *reinterpret_cast<const NullPatternType*>(may_alias_ptr(&lhs_val));
1112  const auto rhs_bit_pattern =
1113  *reinterpret_cast<const NullPatternType*>(may_alias_ptr(&rhs_val));
1114  if (lhs_bit_pattern == null_bit_pattern && rhs_bit_pattern == null_bit_pattern) {
1116  }
1117  if (lhs_bit_pattern == null_bit_pattern && rhs_bit_pattern != null_bit_pattern) {
1120  }
1121  if (rhs_bit_pattern == null_bit_pattern && lhs_bit_pattern != null_bit_pattern) {
1124  }
1125  if (lhs_val < rhs_val) {
1127  }
1128  if (lhs_val > rhs_val) {
1130  }
1132 }
1133 
1134 template <class T, class NullPatternType>
1136  const int8_t* order_column_buffer,
1137  const SQLTypeInfo& ti,
1138  const int32_t* partition_indices,
1139  const int64_t lhs,
1140  const int64_t rhs,
1141  const bool asc_ordering,
1142  const bool nulls_first) {
1143  const auto values = reinterpret_cast<const T*>(order_column_buffer);
1144  const auto lhs_val = values[partition_indices[lhs]];
1145  const auto rhs_val = values[partition_indices[rhs]];
1146  const auto null_bit_pattern = null_val_bit_pattern(ti, ti.get_type() == kFLOAT);
1147  const auto lhs_bit_pattern =
1148  *reinterpret_cast<const NullPatternType*>(may_alias_ptr(&lhs_val));
1149  const auto rhs_bit_pattern =
1150  *reinterpret_cast<const NullPatternType*>(may_alias_ptr(&rhs_val));
1151  if (lhs_bit_pattern == null_bit_pattern && rhs_bit_pattern == null_bit_pattern) {
1153  }
1154  if (lhs_bit_pattern == null_bit_pattern && rhs_bit_pattern != null_bit_pattern) {
1157  }
1158  if (rhs_bit_pattern == null_bit_pattern && lhs_bit_pattern != null_bit_pattern) {
1161  }
1162  if (lhs_val < rhs_val) {
1164  }
1165  if (lhs_val > rhs_val) {
1167  }
1169 }
1170 
1171 } // namespace
1172 
1174  const Analyzer::ColumnVar* col_var,
1175  const int8_t* order_column_buffer,
1176  const int32_t* partition_indices,
1177  const bool asc_ordering,
1178  const bool nulls_first) {
1179  const auto& ti = col_var->get_type_info();
1180  if (ti.is_integer() || ti.is_decimal() || ti.is_time() || ti.is_boolean()) {
1181  switch (ti.get_size()) {
1182  case 8: {
1183  return [order_column_buffer, nulls_first, partition_indices, asc_ordering, &ti](
1184  const int64_t lhs, const int64_t rhs) {
1185  return asc_ordering ? integer_comparator_asc<int64_t>(order_column_buffer,
1186  ti,
1187  partition_indices,
1188  lhs,
1189  rhs,
1190  asc_ordering,
1191  nulls_first)
1192  : integer_comparator_desc<int64_t>(order_column_buffer,
1193  ti,
1194  partition_indices,
1195  lhs,
1196  rhs,
1197  asc_ordering,
1198  nulls_first);
1199  };
1200  }
1201  case 4: {
1202  return [order_column_buffer, nulls_first, partition_indices, asc_ordering, &ti](
1203  const int64_t lhs, const int64_t rhs) {
1204  return asc_ordering ? integer_comparator_asc<int32_t>(order_column_buffer,
1205  ti,
1206  partition_indices,
1207  lhs,
1208  rhs,
1209  asc_ordering,
1210  nulls_first)
1211  : integer_comparator_desc<int32_t>(order_column_buffer,
1212  ti,
1213  partition_indices,
1214  lhs,
1215  rhs,
1216  asc_ordering,
1217  nulls_first);
1218  };
1219  }
1220  case 2: {
1221  return [order_column_buffer, nulls_first, partition_indices, asc_ordering, &ti](
1222  const int64_t lhs, const int64_t rhs) {
1223  return asc_ordering ? integer_comparator_asc<int16_t>(order_column_buffer,
1224  ti,
1225  partition_indices,
1226  lhs,
1227  rhs,
1228  asc_ordering,
1229  nulls_first)
1230  : integer_comparator_desc<int16_t>(order_column_buffer,
1231  ti,
1232  partition_indices,
1233  lhs,
1234  rhs,
1235  asc_ordering,
1236  nulls_first);
1237  };
1238  }
1239  case 1: {
1240  return [order_column_buffer, nulls_first, partition_indices, asc_ordering, &ti](
1241  const int64_t lhs, const int64_t rhs) {
1242  return asc_ordering ? integer_comparator_asc<int8_t>(order_column_buffer,
1243  ti,
1244  partition_indices,
1245  lhs,
1246  rhs,
1247  asc_ordering,
1248  nulls_first)
1249  : integer_comparator_desc<int8_t>(order_column_buffer,
1250  ti,
1251  partition_indices,
1252  lhs,
1253  rhs,
1254  asc_ordering,
1255  nulls_first);
1256  };
1257  }
1258  default: {
1259  LOG(FATAL) << "Invalid type size: " << ti.get_size();
1260  }
1261  }
1262  }
1263  if (ti.is_fp()) {
1264  switch (ti.get_type()) {
1265  case kFLOAT: {
1266  return [order_column_buffer, nulls_first, partition_indices, asc_ordering, &ti](
1267  const int64_t lhs, const int64_t rhs) {
1268  return asc_ordering ? fp_comparator_asc<float, int32_t>(order_column_buffer,
1269  ti,
1270  partition_indices,
1271  lhs,
1272  rhs,
1273  asc_ordering,
1274  nulls_first)
1275  : fp_comparator_desc<float, int32_t>(order_column_buffer,
1276  ti,
1277  partition_indices,
1278  lhs,
1279  rhs,
1280  asc_ordering,
1281  nulls_first);
1282  };
1283  }
1284  case kDOUBLE: {
1285  return [order_column_buffer, nulls_first, partition_indices, asc_ordering, &ti](
1286  const int64_t lhs, const int64_t rhs) {
1287  return asc_ordering ? fp_comparator_asc<double, int64_t>(order_column_buffer,
1288  ti,
1289  partition_indices,
1290  lhs,
1291  rhs,
1292  asc_ordering,
1293  nulls_first)
1294  : fp_comparator_desc<double, int64_t>(order_column_buffer,
1295  ti,
1296  partition_indices,
1297  lhs,
1298  rhs,
1299  asc_ordering,
1300  nulls_first);
1301  };
1302  }
1303  default: {
1304  LOG(FATAL) << "Invalid float type";
1305  }
1306  }
1307  }
1308  throw std::runtime_error("Type not supported yet");
1309 }
1310 
1312  const size_t partition_idx,
1313  int64_t* output_for_partition_buff,
1314  const Analyzer::WindowFunction* window_func) {
1315  const size_t partition_size{static_cast<size_t>(counts()[partition_idx])};
1316  if (partition_size == 0) {
1317  return;
1318  }
1319  const auto offset = offsets()[partition_idx];
1320  auto partition_comparator = createComparator(partition_idx);
1321  const auto col_tuple_comparator = [&partition_comparator](const int64_t lhs,
1322  const int64_t rhs) {
1323  for (const auto& comparator : partition_comparator) {
1324  const auto comparator_result = comparator(lhs, rhs);
1325  switch (comparator_result) {
1327  return true;
1329  return false;
1330  default:
1331  // WindowComparatorResult::EQ: continue to next comparator
1332  continue;
1333  }
1334  }
1335  // If here WindowFunctionContext::WindowComparatorResult::KEQ for all keys
1336  // return false as sort algo must enforce weak ordering
1337  return false;
1338  };
1339  switch (window_func->getKind()) {
1341  const auto row_numbers =
1342  index_to_row_number(output_for_partition_buff, partition_size);
1343  std::copy(row_numbers.begin(), row_numbers.end(), output_for_partition_buff);
1344  break;
1345  }
1347  const auto rank =
1348  index_to_rank(output_for_partition_buff, partition_size, col_tuple_comparator);
1349  std::copy(rank.begin(), rank.end(), output_for_partition_buff);
1350  break;
1351  }
1353  const auto dense_rank = index_to_dense_rank(
1354  output_for_partition_buff, partition_size, col_tuple_comparator);
1355  std::copy(dense_rank.begin(), dense_rank.end(), output_for_partition_buff);
1356  break;
1357  }
1359  const auto percent_rank = index_to_percent_rank(
1360  output_for_partition_buff, partition_size, col_tuple_comparator);
1361  std::copy(percent_rank.begin(),
1362  percent_rank.end(),
1363  reinterpret_cast<double*>(may_alias_ptr(output_for_partition_buff)));
1364  break;
1365  }
1367  const auto cume_dist = index_to_cume_dist(
1368  output_for_partition_buff, partition_size, col_tuple_comparator);
1369  std::copy(cume_dist.begin(),
1370  cume_dist.end(),
1371  reinterpret_cast<double*>(may_alias_ptr(output_for_partition_buff)));
1372  break;
1373  }
1375  const auto& args = window_func->getArgs();
1376  CHECK_EQ(args.size(), size_t(1));
1377  const auto n = get_int_constant_from_expr(args.front().get());
1378  const auto ntile = index_to_ntile(output_for_partition_buff, partition_size, n);
1379  std::copy(ntile.begin(), ntile.end(), output_for_partition_buff);
1380  break;
1381  }
1384  const auto lag_or_lead = get_lag_or_lead_argument(window_func);
1385  const auto partition_row_offsets = payload() + offset;
1387  lag_or_lead, partition_row_offsets, output_for_partition_buff, partition_size);
1388  break;
1389  }
1392  const auto target_idx =
1393  get_target_idx_for_first_or_last_value_func(window_func, partition_size);
1394  const auto partition_row_offsets = payload() + offset;
1396  partition_row_offsets, output_for_partition_buff, partition_size, target_idx);
1397  break;
1398  }
1400  auto const n_value_ptr =
1401  dynamic_cast<Analyzer::Constant*>(window_func_->getArgs()[1].get());
1402  CHECK(n_value_ptr);
1403  auto const n_value = static_cast<size_t>(n_value_ptr->get_constval().intval);
1404  const auto partition_row_offsets = payload() + offset;
1405  if (n_value < partition_size) {
1407  partition_row_offsets, output_for_partition_buff, partition_size, n_value);
1408  } else {
1409  // when NTH_VALUE of the current row is NULL, we keep the NULL value in the
1410  // current row's output storage in the query output buffer, so we assign the
1411  // original index of the current row to the corresponding slot in
1412  // `output_for_partition_buff`
1414  partition_row_offsets, output_for_partition_buff, partition_size);
1415  }
1416  break;
1417  }
1428  const auto partition_row_offsets = payload() + offset;
1429  if (window_function_requires_peer_handling(window_func)) {
1431  offset,
1432  output_for_partition_buff,
1433  partition_size,
1434  col_tuple_comparator);
1435  }
1437  output_for_partition_buff, partition_row_offsets, partition_size);
1438  break;
1439  }
1440  default: {
1441  throw std::runtime_error("Window function not supported yet: " +
1442  ::toString(window_func->getKind()));
1443  }
1444  }
1445 }
1446 
1448  auto const partition_count = partitionCount();
1450  if (!for_reuse) {
1451  segment_trees_owned_.resize(partition_count);
1452  }
1453 }
1454 
1456  SqlWindowFunctionKind agg_type,
1457  size_t partition_idx,
1458  size_t partition_size,
1459  const int32_t* original_rowid_buf,
1460  const int64_t* ordered_rowid_buf,
1461  const SQLTypeInfo& input_col_ti) {
1462  if (!(input_col_ti.is_number() || input_col_ti.is_boolean() ||
1463  input_col_ti.is_time_or_date())) {
1464  throw QueryNotSupported("Window aggregate function over frame on a column type " +
1465  ::toString(input_col_ti.get_type()) + " is not supported.");
1466  }
1467  if (input_col_ti.is_time_or_date() && !(agg_type == SqlWindowFunctionKind::MIN ||
1468  agg_type == SqlWindowFunctionKind::MAX ||
1469  agg_type == SqlWindowFunctionKind::COUNT)) {
1470  throw QueryNotSupported(
1471  "Aggregation over a window frame for a column type " +
1472  ::toString(input_col_ti.get_type()) +
1473  " must use one of the following window aggregate function: MIN / MAX / COUNT");
1474  }
1475  const auto type = input_col_ti.is_decimal()
1476  ? decimal_to_int_type(input_col_ti)
1477  : input_col_ti.is_time_or_date()
1478  ? get_int_type_by_size(input_col_ti.get_size())
1479  : input_col_ti.get_type();
1480  if (partition_size > 0) {
1481  IndexPair order_col_null_range{ordered_partition_null_start_pos_[partition_idx],
1482  ordered_partition_null_end_pos_[partition_idx]};
1483  const int64_t* ordered_rowid_buf_for_partition =
1484  ordered_rowid_buf + offsets()[partition_idx];
1485  switch (type) {
1486  case kBOOLEAN:
1487  case kTINYINT: {
1488  const auto segment_tree = std::make_shared<SegmentTree<int8_t, int64_t>>(
1490  input_col_ti,
1491  original_rowid_buf,
1492  ordered_rowid_buf_for_partition,
1493  partition_size,
1494  agg_type,
1496  aggregate_trees_depth_[partition_idx] =
1497  segment_tree ? segment_tree->getLeafDepth() : 0;
1498  if (agg_type == SqlWindowFunctionKind::AVG) {
1500  segment_tree ? segment_tree->getDerivedAggregatedValues() : nullptr;
1501  } else {
1503  segment_tree ? segment_tree->getAggregatedValues() : nullptr;
1504  }
1505  segment_trees_owned_[partition_idx] = std::move(segment_tree);
1506  break;
1507  }
1508  case kSMALLINT: {
1509  const auto segment_tree = std::make_shared<SegmentTree<int16_t, int64_t>>(
1511  input_col_ti,
1512  original_rowid_buf,
1513  ordered_rowid_buf_for_partition,
1514  partition_size,
1515  agg_type,
1517  aggregate_trees_depth_[partition_idx] =
1518  segment_tree ? segment_tree->getLeafDepth() : 0;
1519  if (agg_type == SqlWindowFunctionKind::AVG) {
1521  segment_tree ? segment_tree->getDerivedAggregatedValues() : nullptr;
1522  } else {
1524  segment_tree ? segment_tree->getAggregatedValues() : nullptr;
1525  }
1526  segment_trees_owned_[partition_idx] = std::move(segment_tree);
1527  break;
1528  }
1529  case kINT: {
1530  const auto segment_tree = std::make_shared<SegmentTree<int32_t, int64_t>>(
1532  input_col_ti,
1533  original_rowid_buf,
1534  ordered_rowid_buf_for_partition,
1535  partition_size,
1536  agg_type,
1538  aggregate_trees_depth_[partition_idx] =
1539  segment_tree ? segment_tree->getLeafDepth() : 0;
1540  if (agg_type == SqlWindowFunctionKind::AVG) {
1542  segment_tree ? segment_tree->getDerivedAggregatedValues() : nullptr;
1543  } else {
1545  segment_tree ? segment_tree->getAggregatedValues() : nullptr;
1546  }
1547  segment_trees_owned_[partition_idx] = std::move(segment_tree);
1548  break;
1549  }
1550  case kDECIMAL:
1551  case kNUMERIC:
1552  case kBIGINT: {
1553  const auto segment_tree = std::make_shared<SegmentTree<int64_t, int64_t>>(
1555  input_col_ti,
1556  original_rowid_buf,
1557  ordered_rowid_buf_for_partition,
1558  partition_size,
1559  agg_type,
1561  aggregate_trees_depth_[partition_idx] =
1562  segment_tree ? segment_tree->getLeafDepth() : 0;
1563  if (agg_type == SqlWindowFunctionKind::AVG) {
1565  segment_tree ? segment_tree->getDerivedAggregatedValues() : nullptr;
1566  } else {
1568  segment_tree ? segment_tree->getAggregatedValues() : nullptr;
1569  }
1570  segment_trees_owned_[partition_idx] = std::move(segment_tree);
1571  break;
1572  }
1573  case kFLOAT: {
1574  const auto segment_tree =
1575  std::make_shared<SegmentTree<float, double>>(window_func_expr_columns_,
1576  input_col_ti,
1577  original_rowid_buf,
1578  ordered_rowid_buf_for_partition,
1579  partition_size,
1580  agg_type,
1582  aggregate_trees_depth_[partition_idx] =
1583  segment_tree ? segment_tree->getLeafDepth() : 0;
1584  if (agg_type == SqlWindowFunctionKind::AVG) {
1586  segment_tree ? segment_tree->getDerivedAggregatedValues() : nullptr;
1587  } else {
1589  segment_tree ? segment_tree->getAggregatedValues() : nullptr;
1590  }
1591  segment_trees_owned_[partition_idx] = std::move(segment_tree);
1592  break;
1593  }
1594  case kDOUBLE: {
1595  const auto segment_tree =
1596  std::make_shared<SegmentTree<double, double>>(window_func_expr_columns_,
1597  input_col_ti,
1598  original_rowid_buf,
1599  ordered_rowid_buf_for_partition,
1600  partition_size,
1601  agg_type,
1603  aggregate_trees_depth_[partition_idx] =
1604  segment_tree ? segment_tree->getLeafDepth() : 0;
1605  if (agg_type == SqlWindowFunctionKind::AVG) {
1607  segment_tree ? segment_tree->getDerivedAggregatedValues() : nullptr;
1608  } else {
1610  segment_tree ? segment_tree->getAggregatedValues() : nullptr;
1611  }
1612  segment_trees_owned_[partition_idx] = std::move(segment_tree);
1613  break;
1614  }
1615  default:
1616  UNREACHABLE();
1617  }
1618  } else {
1619  // handling a case of an empty partition
1620  aggregate_trees_depth_[partition_idx] = 0;
1621  if (input_col_ti.is_integer() || input_col_ti.is_decimal() ||
1622  input_col_ti.is_boolean() || input_col_ti.is_time_or_date()) {
1623  if (agg_type == SqlWindowFunctionKind::AVG) {
1625  nullptr;
1626  } else {
1627  aggregate_trees_.aggregate_tree_for_integer_type_[partition_idx] = nullptr;
1628  }
1629  } else {
1630  CHECK(input_col_ti.is_fp());
1631  if (agg_type == SqlWindowFunctionKind::AVG) {
1633  } else {
1634  aggregate_trees_.aggregate_tree_for_double_type_[partition_idx] = nullptr;
1635  }
1636  }
1637  }
1639 }
1640 
1642  return const_cast<int64_t**>(aggregate_trees_.aggregate_tree_for_integer_type_.data());
1643 }
1644 
1646  return const_cast<double**>(aggregate_trees_.aggregate_tree_for_double_type_.data());
1647 }
1648 
1651  return const_cast<SumAndCountPair<int64_t>**>(
1653 }
1654 
1657  return const_cast<SumAndCountPair<double>**>(
1659 }
1660 
1662  return aggregate_trees_depth_;
1663 }
1664 
1666  return aggregate_trees_fan_out_;
1667 }
1668 
1671 }
1672 
1675 }
1676 
1679  0,
1680  static_cast<int64_t>(elem_count_),
1681  false,
1683  1};
1684  auto bitmap_sz = partition_start_bitmap.bitmapPaddedSizeBytes();
1685  if (partitions_) {
1686  bitmap_sz += partitions_->isBitwiseEq() ? 1 : 0;
1687  }
1688  partition_start_ = static_cast<int8_t*>(checked_calloc(bitmap_sz, 1));
1689  int64_t partition_count = partitionCount();
1690  auto partition_start_handle = reinterpret_cast<int64_t>(partition_start_);
1691  agg_count_distinct_bitmap(&partition_start_handle, 0, 0);
1693  // if we have `partition_start_offset_`, we can reuse it for this logic
1694  // but note that it has partition_count + 1 elements where the first element is zero
1695  // which means the first partition's start offset is zero
1696  // and rest of them can represent values required for this logic
1697  for (int64_t i = 0; i < partition_count - 1; ++i) {
1699  &partition_start_handle, partition_start_offset_[i + 1], 0);
1700  }
1701  } else {
1702  std::vector<size_t> partition_offsets(partition_count);
1703  std::partial_sum(counts(), counts() + partition_count, partition_offsets.begin());
1704  for (int64_t i = 0; i < partition_count - 1; ++i) {
1705  agg_count_distinct_bitmap(&partition_start_handle, partition_offsets[i], 0);
1706  }
1707  }
1708 }
1709 
1712  0,
1713  static_cast<int64_t>(elem_count_),
1714  false,
1716  1};
1717  auto bitmap_sz = partition_start_bitmap.bitmapPaddedSizeBytes();
1718  if (partitions_) {
1719  bitmap_sz += partitions_->isBitwiseEq() ? 1 : 0;
1720  }
1721  partition_end_ = static_cast<int8_t*>(checked_calloc(bitmap_sz, 1));
1722  auto partition_end_handle = reinterpret_cast<int64_t>(partition_end_);
1723  int64_t partition_count = partitionCount();
1725  // if we have `partition_start_offset_`, we can reuse it for this logic
1726  // but note that it has partition_count + 1 elements where the first element is zero
1727  // which means the first partition's start offset is zero
1728  // and rest of them can represent values required for this logic
1729  for (int64_t i = 0; i < partition_count - 1; ++i) {
1730  if (partition_start_offset_[i + 1] == 0) {
1731  continue;
1732  }
1734  &partition_end_handle, partition_start_offset_[i + 1] - 1, 0);
1735  }
1736  if (elem_count_) {
1737  agg_count_distinct_bitmap(&partition_end_handle, elem_count_ - 1, 0);
1738  }
1739  } else {
1740  std::vector<size_t> partition_offsets(partition_count);
1741  std::partial_sum(counts(), counts() + partition_count, partition_offsets.begin());
1742  for (int64_t i = 0; i < partition_count - 1; ++i) {
1743  if (partition_offsets[i] == 0) {
1744  continue;
1745  }
1746  agg_count_distinct_bitmap(&partition_end_handle, partition_offsets[i] - 1, 0);
1747  }
1748  if (elem_count_) {
1749  agg_count_distinct_bitmap(&partition_end_handle, elem_count_ - 1, 0);
1750  }
1751  }
1752 }
1753 
1754 const int32_t* WindowFunctionContext::payload() const {
1755  if (partitions_) {
1756  return reinterpret_cast<const int32_t*>(
1757  partitions_->getJoinHashBuffer(device_type_, 0) +
1758  partitions_->payloadBufferOff());
1759  }
1760  return dummy_payload_; // non-partitioned window function
1761 }
1762 
1763 const int32_t* WindowFunctionContext::offsets() const {
1764  if (partitions_) {
1765  return reinterpret_cast<const int32_t*>(
1766  partitions_->getJoinHashBuffer(device_type_, 0) + partitions_->offsetBufferOff());
1767  }
1768  return &dummy_offset_;
1769 }
1770 
1771 const int32_t* WindowFunctionContext::counts() const {
1772  if (partitions_) {
1773  return reinterpret_cast<const int32_t*>(
1774  partitions_->getJoinHashBuffer(device_type_, 0) + partitions_->countBufferOff());
1775  }
1776  return &dummy_count_;
1777 }
1778 
1780  if (partitions_) {
1781  const auto partition_count = counts() - offsets();
1782  CHECK_GE(partition_count, 0);
1783  return partition_count;
1784  }
1785  return 1; // non-partitioned window function
1786 }
1787 
1789  return window_func_->hasFraming() &&
1791 }
1792 
1794  // aggregate tree is constructed per window aggregate function kind, input expression,
1795  // partition key(s) and ordering key
1796  // this means when two window definitions have the same condition listed above but
1797  // differ in frame bound declaration,
1798  // they can share the same aggregate tree
1799  auto cache_key = boost::hash_value(::toString(window_func_->getKind()));
1800  boost::hash_combine(cache_key, ::toString(window_func_->getArgs()));
1801  boost::hash_combine(cache_key, ::toString(window_func_->getPartitionKeys()));
1802  boost::hash_combine(cache_key, ::toString(window_func_->getOrderKeys()));
1803  for (auto& order_entry : window_func_->getCollation()) {
1804  boost::hash_combine(cache_key, order_entry.toString());
1805  }
1806  return cache_key;
1807 }
1808 
1810  std::unique_ptr<WindowFunctionContext> window_function_context,
1811  const size_t target_index) {
1812  const auto it_ok = window_contexts_.emplace(
1813  std::make_pair(target_index, std::move(window_function_context)));
1814  CHECK(it_ok.second);
1815 }
1816 
1818  Executor* executor,
1819  const size_t target_index) const {
1820  const auto it = window_contexts_.find(target_index);
1821  CHECK(it != window_contexts_.end());
1822  executor->active_window_function_ = it->second.get();
1823  return executor->active_window_function_;
1824 }
1825 
1827  executor->active_window_function_ = nullptr;
1828 }
1829 
1831  Executor* executor) {
1832  return executor->active_window_function_;
1833 }
1834 
1836  executor->window_project_node_context_owned_ =
1837  std::make_unique<WindowProjectNodeContext>();
1838  return executor->window_project_node_context_owned_.get();
1839 }
1840 
1842  return executor->window_project_node_context_owned_.get();
1843 }
1844 
1845 void WindowProjectNodeContext::reset(Executor* executor) {
1846  executor->window_project_node_context_owned_ = nullptr;
1847  executor->active_window_function_ = nullptr;
1848 }
size_t getAggregateTreeFanout() const
bool g_enable_parallel_window_partition_sort
#define CHECK_EQ(x, y)
Definition: Logger.h:301
std::vector< SumAndCountPair< double > * > derived_aggregate_tree_for_double_type_
Definition: WindowContext.h:75
SqlWindowFunctionKind getKind() const
Definition: Analyzer.h:2576
void addOrderColumn(const int8_t *column, const SQLTypeInfo &ti, const std::vector< std::shared_ptr< Chunk_NS::Chunk >> &chunks_owner)
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
HOST DEVICE int get_size() const
Definition: sqltypes.h:393
WindowFunctionContext::WindowComparatorResult fp_comparator_asc(const int8_t *order_column_buffer, const SQLTypeInfo &ti, const int32_t *partition_indices, const int64_t lhs, const int64_t rhs, const bool asc_ordering, const bool nulls_first)
int64_t * ordered_partition_null_start_pos_
RUNTIME_EXPORT void apply_window_pending_outputs_float(const int64_t handle, const float value, const int64_t bitset, const int64_t pos)
Descriptor for the storage layout use for (approximate) count distinct operations.
const int32_t dummy_count_
std::vector< double * > aggregate_tree_for_double_type_
Definition: WindowContext.h:73
size_t get_target_idx_for_first_or_last_value_func(const Analyzer::WindowFunction *window_func, const size_t partition_size)
bool advance_current_rank(const std::function< bool(const int64_t lhs, const int64_t rhs)> &comparator, const int64_t *index, const size_t i)
RUNTIME_EXPORT void add_window_pending_output(void *pending_output, const int64_t handle)
ExecutorDeviceType
bool hasAggregateTreeRequiredWindowFunc() const
Definition: Analyzer.h:2624
std::vector< int64_t > index_to_ntile(const int64_t *index, const size_t index_size, const size_t n)
int64_t * getNullValueEndPos() const
Utility functions for easy access to the result set buffers.
bool is_time_or_date() const
Definition: sqltypes.h:1000
const int32_t dummy_offset_
#define LOG(tag)
Definition: Logger.h:285
static Comparator makeComparator(const Analyzer::ColumnVar *col_var, const int8_t *partition_values, const int32_t *partition_indices, const bool asc_ordering, const bool nulls_first)
bool is_fp() const
Definition: sqltypes.h:584
constexpr QueryPlanHash EMPTY_HASHED_PLAN_DAG_KEY
const int8_t * partitionStart() const
std::vector< double > index_to_percent_rank(const int64_t *index, const size_t index_size, const std::function< bool(const int64_t lhs, const int64_t rhs)> &comparator)
const std::vector< SQLTypeInfo > & getOrderKeyColumnBufferTypes() const
void setSortedPartitionCacheKey(QueryPlanHash cache_key)
#define UNREACHABLE()
Definition: Logger.h:337
void computeNullRangeOfSortedPartition(const SQLTypeInfo &order_col_ti, size_t partition_idx, const int32_t *original_col_idx_buf, const int64_t *ordered_col_idx_buf)
void apply_permutation_to_partition(int64_t *output_for_partition_buff, const int32_t *original_indices, const size_t partition_size)
DEVICE void sort(ARGS &&...args)
Definition: gpu_enabled.h:105
#define CHECK_GE(x, y)
Definition: Logger.h:306
static WindowProjectNodeContext * create(Executor *executor)
RUNTIME_EXPORT ALWAYS_INLINE void agg_count_distinct_bitmap(int64_t *agg, const int64_t val, const int64_t min_val)
size_t elementCount() const
const int8_t * output() const
const Analyzer::WindowFunction * window_func_
const int32_t * counts() const
Constants for Builtin SQL Types supported by HEAVY.AI.
int64_t get_lag_or_lead_argument(const Analyzer::WindowFunction *window_func)
const int32_t * offsets() const
void index_to_partition_end(const int8_t *partition_end, const size_t off, const int64_t *index, const size_t index_size, const std::function< bool(const int64_t lhs, const int64_t rhs)> &comparator)
size_t g_parallel_window_partition_compute_threshold
HOST DEVICE SQLTypes get_type() const
Definition: sqltypes.h:381
size_t g_parallel_window_partition_sort_threshold
RUNTIME_EXPORT void apply_window_pending_outputs_double(const int64_t handle, const double value, const int64_t bitset, const int64_t pos)
std::vector< int64_t > index_to_row_number(const int64_t *index, const size_t index_size)
std::vector< int64_t > index_to_dense_rank(const int64_t *index, const size_t index_size, const std::function< bool(const int64_t lhs, const int64_t rhs)> &comparator)
bool is_number() const
Definition: sqltypes.h:585
const std::vector< std::shared_ptr< Analyzer::Expr > > & getOrderKeys() const
Definition: Analyzer.h:2584
int64_t null_val_bit_pattern(const SQLTypeInfo &ti, const bool float_argument_input)
const std::vector< OrderEntry > & getCollation() const
Definition: Analyzer.h:2602
static WindowFunctionContext * getActiveWindowFunctionContext(Executor *executor)
const bool needsToBuildAggregateTree() const
size_t * getAggregateTreeDepth() const
int64_t ** getAggregationTreesForIntegerTypeWindowExpr() const
const std::vector< const int8_t * > & getColumnBufferForWindowFunctionExpressions() const
int64_t * getNullValueStartPos() const
SumAndCountPair< double > ** getDerivedAggregationTreesForDoubleTypeWindowExpr() const
const int64_t * partitionStartOffset() const
std::vector< std::shared_ptr< void > > segment_trees_owned_
std::shared_ptr< std::vector< int64_t > > sorted_partition_buf_
size_t partitionCount() const
AggregateState aggregate_state_
void apply_window_pending_outputs_int(const int64_t handle, const int64_t value, const int64_t bitset, const int64_t pos)
static const WindowProjectNodeContext * get(Executor *executor)
size_t g_window_function_aggregation_tree_fanout
DEVICE void fill(ARGS &&...args)
Definition: gpu_enabled.h:60
const int64_t * aggregateStateCount() const
std::vector< Comparator > createComparator(size_t partition_idx)
const std::vector< std::shared_ptr< Analyzer::Expr > > & getArgs() const
Definition: Analyzer.h:2578
QueryPlanHash sorted_partition_cache_key_
void apply_nth_value_to_partition(const int32_t *original_indices, int64_t *output_for_partition_buff, const size_t partition_size, const size_t target_pos)
bool is_integer() const
Definition: sqltypes.h:582
const int8_t * partitionEnd() const
void * checked_malloc(const size_t size)
Definition: checked_alloc.h:45
DEVICE auto copy(ARGS &&...args)
Definition: gpu_enabled.h:51
std::vector< std::vector< std::shared_ptr< Chunk_NS::Chunk > > > window_func_expr_columns_owner_
void addColumnBufferForWindowFunctionExpression(const int8_t *column, const std::vector< std::shared_ptr< Chunk_NS::Chunk >> &chunks_owner)
static void reset(Executor *executor)
const WindowFunctionContext * activateWindowFunctionContext(Executor *executor, const size_t target_index) const
DEVICE void partial_sum(ARGS &&...args)
Definition: gpu_enabled.h:87
void * checked_calloc(const size_t nmemb, const size_t size)
Definition: checked_alloc.h:53
int64_t aggregateStatePendingOutputs() const
std::string toString(const ExecutorDeviceType &device_type)
void buildAggregationTreeForPartition(SqlWindowFunctionKind agg_type, size_t partition_idx, size_t partition_size, const int32_t *original_rowid_buf, const int64_t *ordered_rowid_buf, const SQLTypeInfo &input_col_ti)
RUNTIME_EXPORT void apply_window_pending_outputs_int64(const int64_t handle, const int64_t value, const int64_t bitset, const int64_t pos)
bool is_boolean() const
Definition: sqltypes.h:587
const int64_t * aggregateState() const
RUNTIME_EXPORT void apply_window_pending_outputs_int32(const int64_t handle, const int64_t value, const int64_t bitset, const int64_t pos)
const SQLTypeInfo & get_type_info() const
Definition: Analyzer.h:79
std::pair< int64_t, int64_t > IndexPair
SQLTypes decimal_to_int_type(const SQLTypeInfo &ti)
Definition: Datum.cpp:559
SumAndCountPair< int64_t > ** getDerivedAggregationTreesForIntegerTypeWindowExpr() const
#define RUNTIME_EXPORT
bool window_function_is_aggregate(const SqlWindowFunctionKind kind)
Definition: WindowContext.h:43
#define CHECK_LT(x, y)
Definition: Logger.h:303
void addWindowFunctionContext(std::unique_ptr< WindowFunctionContext > window_function_context, const size_t target_index)
#define CHECK_LE(x, y)
Definition: Logger.h:304
RUNTIME_EXPORT void apply_window_pending_outputs_int8(const int64_t handle, const int64_t value, const int64_t bitset, const int64_t pos)
std::vector< const int8_t * > window_func_expr_columns_
SQLTypes get_int_type_by_size(size_t const nbytes)
Definition: sqltypes.h:1217
AggregateTreeForWindowFraming aggregate_trees_
void sortPartition(const size_t partition_idx, int64_t *output_for_partition_buff, bool should_parallelize)
DEVICE void iota(ARGS &&...args)
Definition: gpu_enabled.h:69
WindowFunctionContext(const Analyzer::WindowFunction *window_func, const size_t elem_count, const ExecutorDeviceType device_type, std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner)
bool pos_is_set(const int64_t bitset, const int64_t pos)
const std::vector< const int8_t * > & getOrderKeyColumnBuffers() const
std::shared_ptr< HashJoin > partitions_
size_t QueryPlanHash
void resizeStorageForWindowFraming(size_t partition_count)
Definition: WindowContext.h:78
const int64_t * partitionNumCountBuf() const
RUNTIME_EXPORT void apply_window_pending_outputs_float_columnar(const int64_t handle, const float value, const int64_t bitset, const int64_t pos)
void apply_lag_to_partition(const int64_t lag, const int32_t *original_indices, int64_t *sorted_indices, const size_t partition_size)
void apply_original_index_to_partition(const int32_t *original_indices, int64_t *output_for_partition_buff, const size_t partition_size)
bool window_function_requires_peer_handling(const Analyzer::WindowFunction *window_func)
SqlWindowFunctionKind
Definition: sqldefs.h:120
void parallel_for(const blocked_range< Int > &range, const Body &body, const Partitioner &p=Partitioner())
void resizeStorageForWindowFraming(bool const for_reuse=false)
WindowFunctionContext::WindowComparatorResult integer_comparator_desc(const int8_t *order_column_buffer, const SQLTypeInfo &ti, const int32_t *partition_indices, const int64_t lhs, const int64_t rhs, const bool asc_ordering, const bool nulls_first)
size_t window_function_buffer_element_size(const SqlWindowFunctionKind)
bool g_enable_parallel_window_partition_compute
void computePartitionBuffer(const size_t partition_idx, int64_t *output_for_partition_buff, const Analyzer::WindowFunction *window_func)
int64_t * partition_start_offset_
#define CHECK(condition)
Definition: Logger.h:291
#define DEBUG_TIMER(name)
Definition: Logger.h:411
std::vector< double > index_to_cume_dist(const int64_t *index, const size_t index_size, const std::function< bool(const int64_t lhs, const int64_t rhs)> &comparator)
std::vector< const int8_t * > order_columns_
const int64_t * sortedPartition() const
const QueryPlanHash computeAggregateTreeCacheKey() const
static void resetWindowFunctionContext(Executor *executor)
std::vector< int64_t > index_to_rank(const int64_t *index, const size_t index_size, const std::function< bool(const int64_t lhs, const int64_t rhs)> &comparator)
int64_t inline_fixed_encoding_null_val(const SQL_TYPE_INFO &ti)
std::vector< SQLTypeInfo > order_columns_ti_
std::vector< SumAndCountPair< int64_t > * > derived_aggregate_tree_for_integer_type_
Definition: WindowContext.h:74
const Analyzer::WindowFunction * getWindowFunction() const
Definition: sqltypes.h:62
const int32_t * payload() const
IndexPair find_null_range_int(int8_t const *order_col_buf) const
std::function< WindowFunctionContext::WindowComparatorResult(const int64_t lhs, const int64_t rhs)> Comparator
size_t get_int_constant_from_expr(const Analyzer::Expr *expr)
size_t * aggregate_trees_depth_
RUNTIME_EXPORT void apply_window_pending_outputs_int16(const int64_t handle, const int64_t value, const int64_t bitset, const int64_t pos)
constexpr double n
Definition: Utm.h:38
std::vector< std::vector< std::shared_ptr< Chunk_NS::Chunk > > > order_columns_owner_
WindowFunctionContext::WindowComparatorResult fp_comparator_desc(const int8_t *order_column_buffer, const SQLTypeInfo &ti, const int32_t *partition_indices, const int64_t lhs, const int64_t rhs, const bool asc_ordering, const bool nulls_first)
const std::vector< std::shared_ptr< Analyzer::Expr > > & getPartitionKeys() const
Definition: Analyzer.h:2580
bool is_decimal() const
Definition: sqltypes.h:583
Divide up indexes (A, A+1, A+2, ..., B-2, B-1) among N workers as evenly as possible in a range-based...
bool hasFraming() const
Definition: Analyzer.h:2612
std::vector< int64_t * > aggregate_tree_for_integer_type_
Definition: WindowContext.h:72
WindowFunctionContext::WindowComparatorResult integer_comparator_asc(const int8_t *order_column_buffer, const SQLTypeInfo &ti, const int32_t *partition_indices, const int64_t lhs, const int64_t rhs, const bool asc_ordering, const bool nulls_first)
IndexPair find_null_range_fp(int8_t const *order_col_buf) const
ThreadLocalIds thread_local_ids()
Definition: Logger.cpp:874
const ExecutorDeviceType device_type_
#define VLOG(n)
Definition: Logger.h:387
void compute(std::unordered_map< QueryPlanHash, size_t > &sorted_partition_key_ref_count_map, std::unordered_map< QueryPlanHash, std::shared_ptr< std::vector< int64_t >>> &sorted_partition_cache, std::unordered_map< QueryPlanHash, AggregateTreeForWindowFraming > &aggregate_tree_map)
double ** getAggregationTreesForDoubleTypeWindowExpr() const
int64_t * ordered_partition_null_end_pos_