OmniSciDB  c1a53651b2
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
WindowFunctionContext Class Reference

#include <WindowContext.h>

+ Collaboration diagram for WindowFunctionContext:

Classes

struct  AggregateState
 

Public Types

enum  WindowComparatorResult { WindowComparatorResult::LT, WindowComparatorResult::EQ, WindowComparatorResult::GT }
 
using Comparator = std::function< WindowFunctionContext::WindowComparatorResult(const int64_t lhs, const int64_t rhs)>
 

Public Member Functions

 WindowFunctionContext (const Analyzer::WindowFunction *window_func, const size_t elem_count, const ExecutorDeviceType device_type, std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner)
 
 WindowFunctionContext (const Analyzer::WindowFunction *window_func, QueryPlanHash cache_key, const std::shared_ptr< HashJoin > &partitions, const size_t elem_count, const ExecutorDeviceType device_type, std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner, size_t aggregation_tree_fan_out=g_window_function_aggregation_tree_fanout)
 
 WindowFunctionContext (const WindowFunctionContext &)=delete
 
WindowFunctionContextoperator= (const WindowFunctionContext &)=delete
 
 ~WindowFunctionContext ()
 
void addOrderColumn (const int8_t *column, const SQLTypeInfo &ti, const std::vector< std::shared_ptr< Chunk_NS::Chunk >> &chunks_owner)
 
void setSortedPartitionCacheKey (QueryPlanHash cache_key)
 
void addColumnBufferForWindowFunctionExpression (const int8_t *column, const std::vector< std::shared_ptr< Chunk_NS::Chunk >> &chunks_owner)
 
std::vector< ComparatorcreateComparator (size_t partition_idx)
 
void compute (std::unordered_map< QueryPlanHash, size_t > &sorted_partition_key_ref_count_map, std::unordered_map< QueryPlanHash, std::shared_ptr< std::vector< int64_t >>> &sorted_partition_cache, std::unordered_map< QueryPlanHash, AggregateTreeForWindowFraming > &aggregate_tree_map)
 
const Analyzer::WindowFunctiongetWindowFunction () const
 
const int8_t * output () const
 
const int64_t * sortedPartition () const
 
const int64_t * aggregateState () const
 
const int64_t * aggregateStateCount () const
 
int64_t aggregateStatePendingOutputs () const
 
const int64_t * partitionStartOffset () const
 
const int64_t * partitionNumCountBuf () const
 
const std::vector< const
int8_t * > & 
getColumnBufferForWindowFunctionExpressions () const
 
const std::vector< const
int8_t * > & 
getOrderKeyColumnBuffers () const
 
const std::vector< SQLTypeInfo > & getOrderKeyColumnBufferTypes () const
 
int64_t ** getAggregationTreesForIntegerTypeWindowExpr () const
 
double ** getAggregationTreesForDoubleTypeWindowExpr () const
 
SumAndCountPair< int64_t > ** getDerivedAggregationTreesForIntegerTypeWindowExpr () const
 
SumAndCountPair< double > ** getDerivedAggregationTreesForDoubleTypeWindowExpr () const
 
size_t * getAggregateTreeDepth () const
 
size_t getAggregateTreeFanout () const
 
int64_t * getNullValueStartPos () const
 
int64_t * getNullValueEndPos () const
 
const int8_t * partitionStart () const
 
const int8_t * partitionEnd () const
 
size_t elementCount () const
 
const int32_t * payload () const
 
const int32_t * offsets () const
 
const int32_t * counts () const
 
size_t partitionCount () const
 
const bool needsToBuildAggregateTree () const
 

Private Member Functions

void computePartitionBuffer (const size_t partition_idx, int64_t *output_for_partition_buff, const Analyzer::WindowFunction *window_func)
 
void sortPartition (const size_t partition_idx, int64_t *output_for_partition_buff, bool should_parallelize)
 
void computeNullRangeOfSortedPartition (const SQLTypeInfo &order_col_ti, size_t partition_idx, const int32_t *original_col_idx_buf, const int64_t *ordered_col_idx_buf)
 
void buildAggregationTreeForPartition (SqlWindowFunctionKind agg_type, size_t partition_idx, size_t partition_size, const int32_t *original_rowid_buf, const int64_t *ordered_rowid_buf, const SQLTypeInfo &input_col_ti)
 
void fillPartitionStart ()
 
void fillPartitionEnd ()
 
void resizeStorageForWindowFraming (bool const for_reuse=false)
 
const QueryPlanHash computeAggregateTreeCacheKey () const
 

Static Private Member Functions

static Comparator makeComparator (const Analyzer::ColumnVar *col_var, const int8_t *partition_values, const int32_t *partition_indices, const bool asc_ordering, const bool nulls_first)
 

Private Attributes

const Analyzer::WindowFunctionwindow_func_
 
QueryPlanHash partition_cache_key_
 
QueryPlanHash sorted_partition_cache_key_
 
std::vector< std::vector
< std::shared_ptr
< Chunk_NS::Chunk > > > 
order_columns_owner_
 
std::vector< const int8_t * > order_columns_
 
std::vector< SQLTypeInfoorder_columns_ti_
 
std::shared_ptr< HashJoinpartitions_
 
size_t elem_count_
 
int8_t * output_
 
std::shared_ptr< std::vector
< int64_t > > 
sorted_partition_buf_
 
std::vector< std::vector
< std::shared_ptr
< Chunk_NS::Chunk > > > 
window_func_expr_columns_owner_
 
std::vector< const int8_t * > window_func_expr_columns_
 
std::vector< std::shared_ptr
< void > > 
segment_trees_owned_
 
AggregateTreeForWindowFraming aggregate_trees_
 
size_t aggregate_trees_fan_out_
 
size_t * aggregate_trees_depth_
 
int64_t * ordered_partition_null_start_pos_
 
int64_t * ordered_partition_null_end_pos_
 
int64_t * partition_start_offset_
 
int8_t * partition_start_
 
int8_t * partition_end_
 
AggregateState aggregate_state_
 
const ExecutorDeviceType device_type_
 
std::shared_ptr
< RowSetMemoryOwner
row_set_mem_owner_
 
const int32_t dummy_count_
 
const int32_t dummy_offset_
 
int32_t * dummy_payload_
 

Detailed Description

Definition at line 117 of file WindowContext.h.

Member Typedef Documentation

using WindowFunctionContext::Comparator = std::function<WindowFunctionContext::WindowComparatorResult(const int64_t lhs, const int64_t rhs)>

Definition at line 155 of file WindowContext.h.

Member Enumeration Documentation

Enumerator
LT 
EQ 
GT 

Definition at line 152 of file WindowContext.h.

Constructor & Destructor Documentation

WindowFunctionContext::WindowFunctionContext ( const Analyzer::WindowFunction window_func,
const size_t  elem_count,
const ExecutorDeviceType  device_type,
std::shared_ptr< RowSetMemoryOwner row_set_mem_owner 
)

Definition at line 50 of file WindowContext.cpp.

References aggregate_trees_depth_, CHECK_LE, checked_calloc(), checked_malloc(), dummy_payload_, elem_count_, Analyzer::WindowFunction::getKind(), Analyzer::WindowFunction::hasFraming(), gpu_enabled::iota(), NTH_VALUE, ordered_partition_null_end_pos_, ordered_partition_null_start_pos_, partition_start_offset_, and window_func_.

55  : window_func_(window_func)
58  , partitions_(nullptr)
59  , elem_count_(elem_count)
60  , output_(nullptr)
61  , sorted_partition_buf_(nullptr)
63  , aggregate_trees_depth_(nullptr)
66  , partition_start_offset_(nullptr)
67  , partition_start_(nullptr)
68  , partition_end_(nullptr)
69  , device_type_(device_type)
70  , row_set_mem_owner_(row_set_mem_owner)
71  , dummy_count_(elem_count)
72  , dummy_offset_(0)
73  , dummy_payload_(nullptr) {
74  CHECK_LE(elem_count_, static_cast<size_t>(std::numeric_limits<int32_t>::max()));
76  reinterpret_cast<int32_t*>(checked_malloc(elem_count_ * sizeof(int32_t)));
78  if (window_func_->hasFraming() ||
80  // in this case, we consider all rows of the row belong to the same and only
81  // existing partition
83  reinterpret_cast<int64_t*>(checked_calloc(2, sizeof(int64_t)));
85  aggregate_trees_depth_ = reinterpret_cast<size_t*>(checked_calloc(1, sizeof(size_t)));
87  reinterpret_cast<int64_t*>(checked_calloc(1, sizeof(int64_t)));
89  reinterpret_cast<int64_t*>(checked_calloc(1, sizeof(int64_t)));
90  }
91 }
SqlWindowFunctionKind getKind() const
Definition: Analyzer.h:2576
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
int64_t * ordered_partition_null_start_pos_
const int32_t dummy_count_
const int32_t dummy_offset_
constexpr QueryPlanHash EMPTY_HASHED_PLAN_DAG_KEY
const Analyzer::WindowFunction * window_func_
std::shared_ptr< std::vector< int64_t > > sorted_partition_buf_
size_t g_window_function_aggregation_tree_fanout
QueryPlanHash sorted_partition_cache_key_
void * checked_malloc(const size_t size)
Definition: checked_alloc.h:45
QueryPlanHash partition_cache_key_
void * checked_calloc(const size_t nmemb, const size_t size)
Definition: checked_alloc.h:53
#define CHECK_LE(x, y)
Definition: Logger.h:304
DEVICE void iota(ARGS &&...args)
Definition: gpu_enabled.h:69
std::shared_ptr< HashJoin > partitions_
int64_t * partition_start_offset_
size_t * aggregate_trees_depth_
bool hasFraming() const
Definition: Analyzer.h:2612
const ExecutorDeviceType device_type_
int64_t * ordered_partition_null_end_pos_

+ Here is the call graph for this function:

WindowFunctionContext::WindowFunctionContext ( const Analyzer::WindowFunction window_func,
QueryPlanHash  cache_key,
const std::shared_ptr< HashJoin > &  partitions,
const size_t  elem_count,
const ExecutorDeviceType  device_type,
std::shared_ptr< RowSetMemoryOwner row_set_mem_owner,
size_t  aggregation_tree_fan_out = g_window_function_aggregation_tree_fanout 
)

Definition at line 94 of file WindowContext.cpp.

References aggregate_trees_depth_, CHECK, checked_calloc(), counts(), Analyzer::WindowFunction::hasFraming(), ordered_partition_null_end_pos_, ordered_partition_null_start_pos_, gpu_enabled::partial_sum(), partition_start_offset_, partitionCount(), partitions_, and window_func_.

102  : window_func_(window_func)
103  , partition_cache_key_(partition_cache_key)
105  , partitions_(partitions)
106  , elem_count_(elem_count)
107  , output_(nullptr)
108  , sorted_partition_buf_(nullptr)
109  , aggregate_trees_fan_out_(aggregation_tree_fan_out)
110  , aggregate_trees_depth_(nullptr)
113  , partition_start_offset_(nullptr)
114  , partition_start_(nullptr)
115  , partition_end_(nullptr)
116  , device_type_(device_type)
117  , row_set_mem_owner_(row_set_mem_owner)
118  , dummy_count_(elem_count)
119  , dummy_offset_(0)
120  , dummy_payload_(nullptr) {
121  CHECK(partitions_); // This version should have hash table
122  size_t partition_count = partitionCount();
124  reinterpret_cast<int64_t*>(checked_calloc(partition_count + 1, sizeof(int64_t)));
125  if (window_func_->hasFraming()) {
127  reinterpret_cast<size_t*>(checked_calloc(partition_count, sizeof(size_t)));
129  reinterpret_cast<int64_t*>(checked_calloc(partition_count, sizeof(int64_t)));
131  reinterpret_cast<int64_t*>(checked_calloc(partition_count, sizeof(int64_t)));
132  }
133  // the first partition starts at zero position
134  std::partial_sum(counts(), counts() + partition_count, partition_start_offset_ + 1);
135 }
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
int64_t * ordered_partition_null_start_pos_
const int32_t dummy_count_
const int32_t dummy_offset_
constexpr QueryPlanHash EMPTY_HASHED_PLAN_DAG_KEY
const Analyzer::WindowFunction * window_func_
const int32_t * counts() const
std::shared_ptr< std::vector< int64_t > > sorted_partition_buf_
size_t partitionCount() const
QueryPlanHash sorted_partition_cache_key_
DEVICE void partial_sum(ARGS &&...args)
Definition: gpu_enabled.h:87
QueryPlanHash partition_cache_key_
void * checked_calloc(const size_t nmemb, const size_t size)
Definition: checked_alloc.h:53
std::shared_ptr< HashJoin > partitions_
int64_t * partition_start_offset_
#define CHECK(condition)
Definition: Logger.h:291
size_t * aggregate_trees_depth_
bool hasFraming() const
Definition: Analyzer.h:2612
const ExecutorDeviceType device_type_
int64_t * ordered_partition_null_end_pos_

+ Here is the call graph for this function:

WindowFunctionContext::WindowFunctionContext ( const WindowFunctionContext )
delete
WindowFunctionContext::~WindowFunctionContext ( )

Member Function Documentation

void WindowFunctionContext::addColumnBufferForWindowFunctionExpression ( const int8_t *  column,
const std::vector< std::shared_ptr< Chunk_NS::Chunk >> &  chunks_owner 
)

Definition at line 166 of file WindowContext.cpp.

References window_func_expr_columns_, and window_func_expr_columns_owner_.

168  {
169  window_func_expr_columns_owner_.push_back(chunks_owner);
170  window_func_expr_columns_.push_back(column);
171 };
std::vector< std::vector< std::shared_ptr< Chunk_NS::Chunk > > > window_func_expr_columns_owner_
std::vector< const int8_t * > window_func_expr_columns_
void WindowFunctionContext::addOrderColumn ( const int8_t *  column,
const SQLTypeInfo ti,
const std::vector< std::shared_ptr< Chunk_NS::Chunk >> &  chunks_owner 
)

Definition at line 157 of file WindowContext.cpp.

References order_columns_, order_columns_owner_, and order_columns_ti_.

160  {
161  order_columns_owner_.push_back(chunks_owner);
162  order_columns_.push_back(column);
163  order_columns_ti_.push_back(ti);
164 }
std::vector< const int8_t * > order_columns_
std::vector< SQLTypeInfo > order_columns_ti_
std::vector< std::vector< std::shared_ptr< Chunk_NS::Chunk > > > order_columns_owner_
const int64_t * WindowFunctionContext::aggregateState ( ) const

Definition at line 992 of file WindowContext.cpp.

References aggregate_state_, CHECK, Analyzer::WindowFunction::getKind(), WindowFunctionContext::AggregateState::val, window_func_, and window_function_is_aggregate().

992  {
994  return &aggregate_state_.val;
995 }
SqlWindowFunctionKind getKind() const
Definition: Analyzer.h:2576
const Analyzer::WindowFunction * window_func_
AggregateState aggregate_state_
bool window_function_is_aggregate(const SqlWindowFunctionKind kind)
Definition: WindowContext.h:43
#define CHECK(condition)
Definition: Logger.h:291

+ Here is the call graph for this function:

const int64_t * WindowFunctionContext::aggregateStateCount ( ) const

Definition at line 997 of file WindowContext.cpp.

References aggregate_state_, CHECK, WindowFunctionContext::AggregateState::count, Analyzer::WindowFunction::getKind(), window_func_, and window_function_is_aggregate().

997  {
999  return &aggregate_state_.count;
1000 }
SqlWindowFunctionKind getKind() const
Definition: Analyzer.h:2576
const Analyzer::WindowFunction * window_func_
AggregateState aggregate_state_
bool window_function_is_aggregate(const SqlWindowFunctionKind kind)
Definition: WindowContext.h:43
#define CHECK(condition)
Definition: Logger.h:291

+ Here is the call graph for this function:

int64_t WindowFunctionContext::aggregateStatePendingOutputs ( ) const

Definition at line 1012 of file WindowContext.cpp.

References aggregate_state_, CHECK, Analyzer::WindowFunction::getKind(), WindowFunctionContext::AggregateState::outputs, window_func_, and window_function_is_aggregate().

1012  {
1014  return reinterpret_cast<int64_t>(&aggregate_state_.outputs);
1015 }
SqlWindowFunctionKind getKind() const
Definition: Analyzer.h:2576
const Analyzer::WindowFunction * window_func_
AggregateState aggregate_state_
bool window_function_is_aggregate(const SqlWindowFunctionKind kind)
Definition: WindowContext.h:43
#define CHECK(condition)
Definition: Logger.h:291

+ Here is the call graph for this function:

void WindowFunctionContext::buildAggregationTreeForPartition ( SqlWindowFunctionKind  agg_type,
size_t  partition_idx,
size_t  partition_size,
const int32_t *  original_rowid_buf,
const int64_t *  ordered_rowid_buf,
const SQLTypeInfo input_col_ti 
)
private

Definition at line 1455 of file WindowContext.cpp.

References AggregateTreeForWindowFraming::aggregate_tree_for_double_type_, AggregateTreeForWindowFraming::aggregate_tree_for_integer_type_, aggregate_trees_, AggregateTreeForWindowFraming::aggregate_trees_depth_, aggregate_trees_depth_, aggregate_trees_fan_out_, AVG, CHECK, COUNT, decimal_to_int_type(), AggregateTreeForWindowFraming::derived_aggregate_tree_for_double_type_, AggregateTreeForWindowFraming::derived_aggregate_tree_for_integer_type_, get_int_type_by_size(), SQLTypeInfo::get_size(), SQLTypeInfo::get_type(), SQLTypeInfo::is_boolean(), SQLTypeInfo::is_decimal(), SQLTypeInfo::is_fp(), SQLTypeInfo::is_integer(), SQLTypeInfo::is_number(), SQLTypeInfo::is_time_or_date(), kBIGINT, kBOOLEAN, kDECIMAL, kDOUBLE, kFLOAT, kINT, kNUMERIC, kSMALLINT, kTINYINT, MAX, MIN, offsets(), ordered_partition_null_end_pos_, ordered_partition_null_start_pos_, segment_trees_owned_, toString(), run_benchmark_import::type, UNREACHABLE, and window_func_expr_columns_.

Referenced by compute().

1461  {
1462  if (!(input_col_ti.is_number() || input_col_ti.is_boolean() ||
1463  input_col_ti.is_time_or_date())) {
1464  throw QueryNotSupported("Window aggregate function over frame on a column type " +
1465  ::toString(input_col_ti.get_type()) + " is not supported.");
1466  }
1467  if (input_col_ti.is_time_or_date() && !(agg_type == SqlWindowFunctionKind::MIN ||
1468  agg_type == SqlWindowFunctionKind::MAX ||
1469  agg_type == SqlWindowFunctionKind::COUNT)) {
1470  throw QueryNotSupported(
1471  "Aggregation over a window frame for a column type " +
1472  ::toString(input_col_ti.get_type()) +
1473  " must use one of the following window aggregate function: MIN / MAX / COUNT");
1474  }
1475  const auto type = input_col_ti.is_decimal()
1476  ? decimal_to_int_type(input_col_ti)
1477  : input_col_ti.is_time_or_date()
1478  ? get_int_type_by_size(input_col_ti.get_size())
1479  : input_col_ti.get_type();
1480  if (partition_size > 0) {
1481  IndexPair order_col_null_range{ordered_partition_null_start_pos_[partition_idx],
1482  ordered_partition_null_end_pos_[partition_idx]};
1483  const int64_t* ordered_rowid_buf_for_partition =
1484  ordered_rowid_buf + offsets()[partition_idx];
1485  switch (type) {
1486  case kBOOLEAN:
1487  case kTINYINT: {
1488  const auto segment_tree = std::make_shared<SegmentTree<int8_t, int64_t>>(
1490  input_col_ti,
1491  original_rowid_buf,
1492  ordered_rowid_buf_for_partition,
1493  partition_size,
1494  agg_type,
1496  aggregate_trees_depth_[partition_idx] =
1497  segment_tree ? segment_tree->getLeafDepth() : 0;
1498  if (agg_type == SqlWindowFunctionKind::AVG) {
1500  segment_tree ? segment_tree->getDerivedAggregatedValues() : nullptr;
1501  } else {
1503  segment_tree ? segment_tree->getAggregatedValues() : nullptr;
1504  }
1505  segment_trees_owned_[partition_idx] = std::move(segment_tree);
1506  break;
1507  }
1508  case kSMALLINT: {
1509  const auto segment_tree = std::make_shared<SegmentTree<int16_t, int64_t>>(
1511  input_col_ti,
1512  original_rowid_buf,
1513  ordered_rowid_buf_for_partition,
1514  partition_size,
1515  agg_type,
1517  aggregate_trees_depth_[partition_idx] =
1518  segment_tree ? segment_tree->getLeafDepth() : 0;
1519  if (agg_type == SqlWindowFunctionKind::AVG) {
1521  segment_tree ? segment_tree->getDerivedAggregatedValues() : nullptr;
1522  } else {
1524  segment_tree ? segment_tree->getAggregatedValues() : nullptr;
1525  }
1526  segment_trees_owned_[partition_idx] = std::move(segment_tree);
1527  break;
1528  }
1529  case kINT: {
1530  const auto segment_tree = std::make_shared<SegmentTree<int32_t, int64_t>>(
1532  input_col_ti,
1533  original_rowid_buf,
1534  ordered_rowid_buf_for_partition,
1535  partition_size,
1536  agg_type,
1538  aggregate_trees_depth_[partition_idx] =
1539  segment_tree ? segment_tree->getLeafDepth() : 0;
1540  if (agg_type == SqlWindowFunctionKind::AVG) {
1542  segment_tree ? segment_tree->getDerivedAggregatedValues() : nullptr;
1543  } else {
1545  segment_tree ? segment_tree->getAggregatedValues() : nullptr;
1546  }
1547  segment_trees_owned_[partition_idx] = std::move(segment_tree);
1548  break;
1549  }
1550  case kDECIMAL:
1551  case kNUMERIC:
1552  case kBIGINT: {
1553  const auto segment_tree = std::make_shared<SegmentTree<int64_t, int64_t>>(
1555  input_col_ti,
1556  original_rowid_buf,
1557  ordered_rowid_buf_for_partition,
1558  partition_size,
1559  agg_type,
1561  aggregate_trees_depth_[partition_idx] =
1562  segment_tree ? segment_tree->getLeafDepth() : 0;
1563  if (agg_type == SqlWindowFunctionKind::AVG) {
1565  segment_tree ? segment_tree->getDerivedAggregatedValues() : nullptr;
1566  } else {
1568  segment_tree ? segment_tree->getAggregatedValues() : nullptr;
1569  }
1570  segment_trees_owned_[partition_idx] = std::move(segment_tree);
1571  break;
1572  }
1573  case kFLOAT: {
1574  const auto segment_tree =
1575  std::make_shared<SegmentTree<float, double>>(window_func_expr_columns_,
1576  input_col_ti,
1577  original_rowid_buf,
1578  ordered_rowid_buf_for_partition,
1579  partition_size,
1580  agg_type,
1582  aggregate_trees_depth_[partition_idx] =
1583  segment_tree ? segment_tree->getLeafDepth() : 0;
1584  if (agg_type == SqlWindowFunctionKind::AVG) {
1586  segment_tree ? segment_tree->getDerivedAggregatedValues() : nullptr;
1587  } else {
1589  segment_tree ? segment_tree->getAggregatedValues() : nullptr;
1590  }
1591  segment_trees_owned_[partition_idx] = std::move(segment_tree);
1592  break;
1593  }
1594  case kDOUBLE: {
1595  const auto segment_tree =
1596  std::make_shared<SegmentTree<double, double>>(window_func_expr_columns_,
1597  input_col_ti,
1598  original_rowid_buf,
1599  ordered_rowid_buf_for_partition,
1600  partition_size,
1601  agg_type,
1603  aggregate_trees_depth_[partition_idx] =
1604  segment_tree ? segment_tree->getLeafDepth() : 0;
1605  if (agg_type == SqlWindowFunctionKind::AVG) {
1607  segment_tree ? segment_tree->getDerivedAggregatedValues() : nullptr;
1608  } else {
1610  segment_tree ? segment_tree->getAggregatedValues() : nullptr;
1611  }
1612  segment_trees_owned_[partition_idx] = std::move(segment_tree);
1613  break;
1614  }
1615  default:
1616  UNREACHABLE();
1617  }
1618  } else {
1619  // handling a case of an empty partition
1620  aggregate_trees_depth_[partition_idx] = 0;
1621  if (input_col_ti.is_integer() || input_col_ti.is_decimal() ||
1622  input_col_ti.is_boolean() || input_col_ti.is_time_or_date()) {
1623  if (agg_type == SqlWindowFunctionKind::AVG) {
1625  nullptr;
1626  } else {
1627  aggregate_trees_.aggregate_tree_for_integer_type_[partition_idx] = nullptr;
1628  }
1629  } else {
1630  CHECK(input_col_ti.is_fp());
1631  if (agg_type == SqlWindowFunctionKind::AVG) {
1633  } else {
1634  aggregate_trees_.aggregate_tree_for_double_type_[partition_idx] = nullptr;
1635  }
1636  }
1637  }
1639 }
std::vector< SumAndCountPair< double > * > derived_aggregate_tree_for_double_type_
Definition: WindowContext.h:75
int64_t * ordered_partition_null_start_pos_
std::vector< double * > aggregate_tree_for_double_type_
Definition: WindowContext.h:73
bool is_time_or_date() const
Definition: sqltypes.h:1000
#define UNREACHABLE()
Definition: Logger.h:337
const int32_t * offsets() const
HOST DEVICE SQLTypes get_type() const
Definition: sqltypes.h:381
bool is_number() const
Definition: sqltypes.h:585
std::vector< std::shared_ptr< void > > segment_trees_owned_
std::string toString(const ExecutorDeviceType &device_type)
bool is_boolean() const
Definition: sqltypes.h:587
std::pair< int64_t, int64_t > IndexPair
SQLTypes decimal_to_int_type(const SQLTypeInfo &ti)
Definition: Datum.cpp:559
std::vector< const int8_t * > window_func_expr_columns_
SQLTypes get_int_type_by_size(size_t const nbytes)
Definition: sqltypes.h:1217
AggregateTreeForWindowFraming aggregate_trees_
#define CHECK(condition)
Definition: Logger.h:291
std::vector< SumAndCountPair< int64_t > * > derived_aggregate_tree_for_integer_type_
Definition: WindowContext.h:74
Definition: sqltypes.h:62
size_t * aggregate_trees_depth_
bool is_decimal() const
Definition: sqltypes.h:583
std::vector< int64_t * > aggregate_tree_for_integer_type_
Definition: WindowContext.h:72
int64_t * ordered_partition_null_end_pos_

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

void WindowFunctionContext::compute ( std::unordered_map< QueryPlanHash, size_t > &  sorted_partition_key_ref_count_map,
std::unordered_map< QueryPlanHash, std::shared_ptr< std::vector< int64_t >>> &  sorted_partition_cache,
std::unordered_map< QueryPlanHash, AggregateTreeForWindowFraming > &  aggregate_tree_map 
)

Definition at line 549 of file WindowContext.cpp.

References aggregate_trees_, AggregateTreeForWindowFraming::aggregate_trees_depth_, aggregate_trees_depth_, buildAggregationTreeForPartition(), CHECK, computeAggregateTreeCacheKey(), computeNullRangeOfSortedPartition(), computePartitionBuffer(), counts(), DEBUG_TIMER, elem_count_, fillPartitionEnd(), fillPartitionStart(), g_enable_parallel_window_partition_compute, g_parallel_window_partition_compute_threshold, Analyzer::WindowFunction::getArgs(), Analyzer::WindowFunction::getKind(), Analyzer::WindowFunction::getOrderKeys(), Analyzer::WindowFunction::hasFraming(), needsToBuildAggregateTree(), offsets(), output_, threading_serial::parallel_for(), partitionCount(), payload(), resizeStorageForWindowFraming(), row_set_mem_owner_, sorted_partition_buf_, sorted_partition_cache_key_, sortPartition(), logger::thread_local_ids(), toString(), VLOG, window_func_, anonymous_namespace{WindowContext.cpp}::window_function_buffer_element_size(), window_function_is_aggregate(), and window_function_requires_peer_handling().

553  {
554  auto timer = DEBUG_TIMER(__func__);
555  CHECK(!output_);
556  if (elem_count_ == 0) {
557  return;
558  }
559  size_t output_buf_sz =
561  output_ = static_cast<int8_t*>(row_set_mem_owner_->allocate(output_buf_sz,
562  /*thread_idx=*/0));
563  const bool is_window_function_aggregate_or_has_framing =
565  if (is_window_function_aggregate_or_has_framing) {
570  }
571  }
572  std::unique_ptr<int64_t[]> scratchpad;
573  int64_t* intermediate_output_buffer;
574  if (is_window_function_aggregate_or_has_framing) {
575  intermediate_output_buffer = reinterpret_cast<int64_t*>(output_);
576  } else {
577  output_buf_sz = sizeof(int64_t) * elem_count_;
578  scratchpad.reset(new int64_t[elem_count_]);
579  intermediate_output_buffer = scratchpad.get();
580  }
581  const bool should_parallelize{g_enable_parallel_window_partition_compute &&
582  elem_count_ >=
584 
585  auto cached_sorted_partition_it =
586  sorted_partition_cache.find(sorted_partition_cache_key_);
587  if (cached_sorted_partition_it != sorted_partition_cache.end()) {
588  auto& sorted_partition = cached_sorted_partition_it->second;
589  VLOG(1) << "Reuse cached sorted partition to compute window function context (key: "
591  << ", ordering condition: " << ::toString(window_func_->getOrderKeys())
592  << ")";
593  DEBUG_TIMER("Window Function Cached Sorted Partition Copy");
594  std::memcpy(intermediate_output_buffer, sorted_partition->data(), output_buf_sz);
595  if (window_func_->hasFraming()) {
596  sorted_partition_buf_ = sorted_partition;
597  }
598  } else {
599  // ordering partitions if necessary
600  const auto sort_partitions = [&](const size_t start, const size_t end) {
601  for (size_t partition_idx = start; partition_idx < end; ++partition_idx) {
602  sortPartition(partition_idx,
603  intermediate_output_buffer + offsets()[partition_idx],
604  should_parallelize);
605  }
606  };
607 
608  if (should_parallelize) {
609  auto sorted_partition_copy_timer =
610  DEBUG_TIMER("Window Function Partition Sorting Parallelized");
611  tbb::parallel_for(tbb::blocked_range<int64_t>(0, partitionCount()),
612  [&, parent_thread_local_ids = logger::thread_local_ids()](
613  const tbb::blocked_range<int64_t>& r) {
615  parent_thread_local_ids.setNewThreadId();
616  sort_partitions(r.begin(), r.end());
617  });
618  } else {
619  auto sorted_partition_copy_timer =
620  DEBUG_TIMER("Window Function Partition Sorting Non-Parallelized");
621  sort_partitions(0, partitionCount());
622  }
623  auto sorted_partition_ref_cnt_it =
624  sorted_partition_key_ref_count_map.find(sorted_partition_cache_key_);
625  bool can_access_sorted_partition =
626  sorted_partition_ref_cnt_it != sorted_partition_key_ref_count_map.end() &&
627  sorted_partition_ref_cnt_it->second > 1;
628  if (can_access_sorted_partition || window_func_->hasFraming()) {
629  // keep the sorted partition only if it will be reused from other window function
630  // context of this query
631  sorted_partition_buf_ = std::make_shared<std::vector<int64_t>>(elem_count_);
632  DEBUG_TIMER("Window Function Sorted Partition Copy For Caching");
633  std::memcpy(
634  sorted_partition_buf_->data(), intermediate_output_buffer, output_buf_sz);
635  auto it = sorted_partition_cache.emplace(sorted_partition_cache_key_,
637  if (it.second) {
638  VLOG(1) << "Put sorted partition to cache (key: " << sorted_partition_cache_key_
639  << ", ordering condition: " << ::toString(window_func_->getOrderKeys())
640  << ")";
641  }
642  }
643  }
644 
645  if (window_func_->hasFraming()) {
646  const auto compute_ordered_partition_null_range = [=](const size_t start,
647  const size_t end) {
648  for (size_t partition_idx = start; partition_idx < end; ++partition_idx) {
650  window_func_->getOrderKeys().front()->get_type_info(),
651  partition_idx,
652  payload() + offsets()[partition_idx],
653  intermediate_output_buffer + offsets()[partition_idx]);
654  }
655  };
656  auto partition_count = partitionCount();
657 
658  if (should_parallelize) {
659  auto partition_compuation_timer =
660  DEBUG_TIMER("Window Function Ordered-Partition Null-Range Compute");
661  tbb::parallel_for(tbb::blocked_range<int64_t>(0, partitionCount()),
662  [&, parent_thread_local_ids = logger::thread_local_ids()](
663  const tbb::blocked_range<int64_t>& r) {
665  parent_thread_local_ids.setNewThreadId();
666  compute_ordered_partition_null_range(r.begin(), r.end());
667  });
668  } else {
669  auto partition_compuation_timer = DEBUG_TIMER(
670  "Window Function Non-Parallelized Ordered-Partition Null-Range Compute");
671  compute_ordered_partition_null_range(0, partitionCount());
672  }
673  auto const cache_key = computeAggregateTreeCacheKey();
674  auto const c_it = aggregate_tree_map.find(cache_key);
675  if (c_it != aggregate_tree_map.cend()) {
676  VLOG(1) << "Reuse aggregate tree for window function framing";
678  aggregate_trees_ = c_it->second;
679  memcpy(aggregate_trees_depth_,
681  sizeof(size_t) * partition_count);
682  } else {
684  const auto build_aggregation_tree_for_partitions = [=](const size_t start,
685  const size_t end) {
686  for (size_t partition_idx = start; partition_idx < end; ++partition_idx) {
687  // build a segment tree for the partition
688  // todo (yoonmin) : support generic window function expression
689  // i.e., when window_func_expr_columns_.size() > 1
690  SQLTypeInfo const input_col_ti =
691  window_func_->getArgs().front()->get_type_info();
692  const auto partition_size = counts()[partition_idx];
694  partition_idx,
695  partition_size,
696  payload() + offsets()[partition_idx],
697  intermediate_output_buffer,
698  input_col_ti);
699  }
700  };
702  if (should_parallelize) {
703  auto partition_compuation_timer = DEBUG_TIMER(
704  "Window Function Parallelized Segment Tree Construction for Partitions");
705  tbb::parallel_for(tbb::blocked_range<int64_t>(0, partitionCount()),
706  [=, parent_thread_local_ids = logger::thread_local_ids()](
707  const tbb::blocked_range<int64_t>& r) {
709  parent_thread_local_ids.setNewThreadId();
710  build_aggregation_tree_for_partitions(r.begin(), r.end());
711  });
712  } else {
713  auto partition_compuation_timer = DEBUG_TIMER(
714  "Window Function Non-Parallelized Segment Tree Construction for "
715  "Partitions");
716  build_aggregation_tree_for_partitions(0, partition_count);
717  }
718  }
719  CHECK(aggregate_tree_map.emplace(cache_key, aggregate_trees_).second);
720  VLOG(2) << "Put aggregate tree for the window framing";
721  }
722  }
723 
724  const auto compute_partitions = [=](const size_t start, const size_t end) {
725  for (size_t partition_idx = start; partition_idx < end; ++partition_idx) {
726  computePartitionBuffer(partition_idx,
727  intermediate_output_buffer + offsets()[partition_idx],
728  window_func_);
729  }
730  };
731 
732  if (should_parallelize) {
733  auto partition_compuation_timer = DEBUG_TIMER("Window Function Partition Compute");
734  tbb::parallel_for(tbb::blocked_range<int64_t>(0, partitionCount()),
735  [&, parent_thread_local_ids = logger::thread_local_ids()](
736  const tbb::blocked_range<int64_t>& r) {
738  parent_thread_local_ids.setNewThreadId();
739  compute_partitions(r.begin(), r.end());
740  });
741  } else {
742  auto partition_compuation_timer =
743  DEBUG_TIMER("Window Function Non-Parallelized Partition Compute");
744  compute_partitions(0, partitionCount());
745  }
746 
747  if (is_window_function_aggregate_or_has_framing) {
748  // If window function is aggregate we were able to write to the final output buffer
749  // directly in computePartition and we are done.
750  return;
751  }
752 
753  auto output_i64 = reinterpret_cast<int64_t*>(output_);
754  const auto payload_copy = [=](const size_t start, const size_t end) {
755  for (size_t i = start; i < end; ++i) {
756  output_i64[payload()[i]] = intermediate_output_buffer[i];
757  }
758  };
759  if (should_parallelize) {
760  auto payload_copy_timer =
761  DEBUG_TIMER("Window Function Non-Aggregate Payload Copy Parallelized");
762  tbb::parallel_for(tbb::blocked_range<int64_t>(0, elem_count_),
763  [&, parent_thread_local_ids = logger::thread_local_ids()](
764  const tbb::blocked_range<int64_t>& r) {
766  parent_thread_local_ids.setNewThreadId();
767  payload_copy(r.begin(), r.end());
768  });
769  } else {
770  auto payload_copy_timer =
771  DEBUG_TIMER("Window Function Non-Aggregate Payload Copy Non-Parallelized");
772  payload_copy(0, elem_count_);
773  }
774 }
SqlWindowFunctionKind getKind() const
Definition: Analyzer.h:2576
std::shared_ptr< RowSetMemoryOwner > row_set_mem_owner_
void computeNullRangeOfSortedPartition(const SQLTypeInfo &order_col_ti, size_t partition_idx, const int32_t *original_col_idx_buf, const int64_t *ordered_col_idx_buf)
const Analyzer::WindowFunction * window_func_
const int32_t * counts() const
const int32_t * offsets() const
size_t g_parallel_window_partition_compute_threshold
const std::vector< std::shared_ptr< Analyzer::Expr > > & getOrderKeys() const
Definition: Analyzer.h:2584
const bool needsToBuildAggregateTree() const
std::shared_ptr< std::vector< int64_t > > sorted_partition_buf_
size_t partitionCount() const
const std::vector< std::shared_ptr< Analyzer::Expr > > & getArgs() const
Definition: Analyzer.h:2578
QueryPlanHash sorted_partition_cache_key_
std::string toString(const ExecutorDeviceType &device_type)
void buildAggregationTreeForPartition(SqlWindowFunctionKind agg_type, size_t partition_idx, size_t partition_size, const int32_t *original_rowid_buf, const int64_t *ordered_rowid_buf, const SQLTypeInfo &input_col_ti)
bool window_function_is_aggregate(const SqlWindowFunctionKind kind)
Definition: WindowContext.h:43
AggregateTreeForWindowFraming aggregate_trees_
void sortPartition(const size_t partition_idx, int64_t *output_for_partition_buff, bool should_parallelize)
bool window_function_requires_peer_handling(const Analyzer::WindowFunction *window_func)
void parallel_for(const blocked_range< Int > &range, const Body &body, const Partitioner &p=Partitioner())
void resizeStorageForWindowFraming(bool const for_reuse=false)
size_t window_function_buffer_element_size(const SqlWindowFunctionKind)
bool g_enable_parallel_window_partition_compute
void computePartitionBuffer(const size_t partition_idx, int64_t *output_for_partition_buff, const Analyzer::WindowFunction *window_func)
#define CHECK(condition)
Definition: Logger.h:291
#define DEBUG_TIMER(name)
Definition: Logger.h:411
const QueryPlanHash computeAggregateTreeCacheKey() const
const int32_t * payload() const
size_t * aggregate_trees_depth_
bool hasFraming() const
Definition: Analyzer.h:2612
ThreadLocalIds thread_local_ids()
Definition: Logger.cpp:874
#define VLOG(n)
Definition: Logger.h:387

+ Here is the call graph for this function:

QueryPlanHash const WindowFunctionContext::computeAggregateTreeCacheKey ( ) const
private

Definition at line 1793 of file WindowContext.cpp.

References Analyzer::WindowFunction::getArgs(), Analyzer::WindowFunction::getCollation(), Analyzer::WindowFunction::getKind(), Analyzer::WindowFunction::getOrderKeys(), Analyzer::WindowFunction::getPartitionKeys(), toString(), and window_func_.

Referenced by compute().

1793  {
1794  // aggregate tree is constructed per window aggregate function kind, input expression,
1795  // partition key(s) and ordering key
1796  // this means when two window definitions have the same condition listed above but
1797  // differ in frame bound declaration,
1798  // they can share the same aggregate tree
1799  auto cache_key = boost::hash_value(::toString(window_func_->getKind()));
1800  boost::hash_combine(cache_key, ::toString(window_func_->getArgs()));
1801  boost::hash_combine(cache_key, ::toString(window_func_->getPartitionKeys()));
1802  boost::hash_combine(cache_key, ::toString(window_func_->getOrderKeys()));
1803  for (auto& order_entry : window_func_->getCollation()) {
1804  boost::hash_combine(cache_key, order_entry.toString());
1805  }
1806  return cache_key;
1807 }
SqlWindowFunctionKind getKind() const
Definition: Analyzer.h:2576
const Analyzer::WindowFunction * window_func_
const std::vector< std::shared_ptr< Analyzer::Expr > > & getOrderKeys() const
Definition: Analyzer.h:2584
const std::vector< OrderEntry > & getCollation() const
Definition: Analyzer.h:2602
const std::vector< std::shared_ptr< Analyzer::Expr > > & getArgs() const
Definition: Analyzer.h:2578
std::string toString(const ExecutorDeviceType &device_type)
const std::vector< std::shared_ptr< Analyzer::Expr > > & getPartitionKeys() const
Definition: Analyzer.h:2580

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

void WindowFunctionContext::computeNullRangeOfSortedPartition ( const SQLTypeInfo order_col_ti,
size_t  partition_idx,
const int32_t *  original_col_idx_buf,
const int64_t *  ordered_col_idx_buf 
)
private

Definition at line 848 of file WindowContext.cpp.

References counts(), logger::FATAL, SQLTypeInfo::get_size(), SQLTypeInfo::get_type(), SQLTypeInfo::is_boolean(), SQLTypeInfo::is_decimal(), SQLTypeInfo::is_fp(), SQLTypeInfo::is_integer(), SQLTypeInfo::is_time_or_date(), kDOUBLE, kFLOAT, LOG, null_val_bit_pattern(), order_columns_, ordered_partition_null_end_pos_, and ordered_partition_null_start_pos_.

Referenced by compute().

852  {
853  IndexPair null_range;
854  const auto partition_size = counts()[partition_idx];
855  if (partition_size > 0) {
856  if (order_col_ti.is_integer() || order_col_ti.is_decimal() ||
857  order_col_ti.is_time_or_date() || order_col_ti.is_boolean()) {
858  FindNullRange const null_range_info{
859  original_col_idx_buf, ordered_col_idx_buf, partition_size};
860  switch (order_col_ti.get_size()) {
861  case 8:
862  null_range =
863  null_range_info.find_null_range_int<int64_t>(order_columns_.front());
864  break;
865  case 4:
866  null_range =
867  null_range_info.find_null_range_int<int32_t>(order_columns_.front());
868  break;
869  case 2:
870  null_range =
871  null_range_info.find_null_range_int<int16_t>(order_columns_.front());
872  break;
873  case 1:
874  null_range =
875  null_range_info.find_null_range_int<int8_t>(order_columns_.front());
876  break;
877  default:
878  LOG(FATAL) << "Invalid type size: " << order_col_ti.get_size();
879  }
880  } else if (order_col_ti.is_fp()) {
881  const auto null_bit_pattern =
882  null_val_bit_pattern(order_col_ti, order_col_ti.get_type() == kFLOAT);
883  FindNullRange const null_range_info{
884  original_col_idx_buf, ordered_col_idx_buf, partition_size, null_bit_pattern};
885  switch (order_col_ti.get_type()) {
886  case kFLOAT:
887  null_range = null_range_info.find_null_range_fp<float>(order_columns_.front());
888  break;
889  case kDOUBLE:
890  null_range = null_range_info.find_null_range_fp<double>(order_columns_.front());
891  break;
892  default:
893  LOG(FATAL) << "Invalid float type";
894  }
895  } else {
896  LOG(FATAL) << "Invalid column type for window aggregation over the frame";
897  }
898  }
899  ordered_partition_null_start_pos_[partition_idx] = null_range.first;
900  ordered_partition_null_end_pos_[partition_idx] = null_range.second + 1;
901 }
HOST DEVICE int get_size() const
Definition: sqltypes.h:393
int64_t * ordered_partition_null_start_pos_
bool is_time_or_date() const
Definition: sqltypes.h:1000
#define LOG(tag)
Definition: Logger.h:285
bool is_fp() const
Definition: sqltypes.h:584
const int32_t * counts() const
HOST DEVICE SQLTypes get_type() const
Definition: sqltypes.h:381
int64_t null_val_bit_pattern(const SQLTypeInfo &ti, const bool float_argument_input)
bool is_integer() const
Definition: sqltypes.h:582
bool is_boolean() const
Definition: sqltypes.h:587
std::pair< int64_t, int64_t > IndexPair
std::vector< const int8_t * > order_columns_
bool is_decimal() const
Definition: sqltypes.h:583
int64_t * ordered_partition_null_end_pos_

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

void WindowFunctionContext::computePartitionBuffer ( const size_t  partition_idx,
int64_t *  output_for_partition_buff,
const Analyzer::WindowFunction window_func 
)
private

Definition at line 1311 of file WindowContext.cpp.

References anonymous_namespace{WindowContext.cpp}::apply_lag_to_partition(), anonymous_namespace{WindowContext.cpp}::apply_nth_value_to_partition(), anonymous_namespace{WindowContext.cpp}::apply_original_index_to_partition(), anonymous_namespace{WindowContext.cpp}::apply_permutation_to_partition(), run_benchmark_import::args, AVG, CHECK, CHECK_EQ, gpu_enabled::copy(), COUNT, COUNT_IF, counts(), createComparator(), CUME_DIST, DENSE_RANK, FIRST_VALUE, anonymous_namespace{WindowContext.cpp}::get_int_constant_from_expr(), anonymous_namespace{WindowContext.cpp}::get_lag_or_lead_argument(), anonymous_namespace{WindowContext.cpp}::get_target_idx_for_first_or_last_value_func(), Analyzer::WindowFunction::getArgs(), Analyzer::WindowFunction::getKind(), GT, anonymous_namespace{WindowContext.cpp}::index_to_cume_dist(), anonymous_namespace{WindowContext.cpp}::index_to_dense_rank(), anonymous_namespace{WindowContext.cpp}::index_to_ntile(), anonymous_namespace{WindowContext.cpp}::index_to_partition_end(), anonymous_namespace{WindowContext.cpp}::index_to_percent_rank(), anonymous_namespace{WindowContext.cpp}::index_to_rank(), anonymous_namespace{WindowContext.cpp}::index_to_row_number(), LAG, LAG_IN_FRAME, LAST_VALUE, LEAD, LEAD_IN_FRAME, LT, MAX, MIN, anonymous_namespace{Utm.h}::n, NTH_VALUE, NTH_VALUE_IN_FRAME, NTILE, offsets(), partitionEnd(), payload(), PERCENT_RANK, RANK, ROW_NUMBER, SUM, SUM_IF, toString(), window_func_, and window_function_requires_peer_handling().

Referenced by compute().

1314  {
1315  const size_t partition_size{static_cast<size_t>(counts()[partition_idx])};
1316  if (partition_size == 0) {
1317  return;
1318  }
1319  const auto offset = offsets()[partition_idx];
1320  auto partition_comparator = createComparator(partition_idx);
1321  const auto col_tuple_comparator = [&partition_comparator](const int64_t lhs,
1322  const int64_t rhs) {
1323  for (const auto& comparator : partition_comparator) {
1324  const auto comparator_result = comparator(lhs, rhs);
1325  switch (comparator_result) {
1327  return true;
1329  return false;
1330  default:
1331  // WindowComparatorResult::EQ: continue to next comparator
1332  continue;
1333  }
1334  }
1335  // If here WindowFunctionContext::WindowComparatorResult::KEQ for all keys
1336  // return false as sort algo must enforce weak ordering
1337  return false;
1338  };
1339  switch (window_func->getKind()) {
1341  const auto row_numbers =
1342  index_to_row_number(output_for_partition_buff, partition_size);
1343  std::copy(row_numbers.begin(), row_numbers.end(), output_for_partition_buff);
1344  break;
1345  }
1347  const auto rank =
1348  index_to_rank(output_for_partition_buff, partition_size, col_tuple_comparator);
1349  std::copy(rank.begin(), rank.end(), output_for_partition_buff);
1350  break;
1351  }
1353  const auto dense_rank = index_to_dense_rank(
1354  output_for_partition_buff, partition_size, col_tuple_comparator);
1355  std::copy(dense_rank.begin(), dense_rank.end(), output_for_partition_buff);
1356  break;
1357  }
1359  const auto percent_rank = index_to_percent_rank(
1360  output_for_partition_buff, partition_size, col_tuple_comparator);
1361  std::copy(percent_rank.begin(),
1362  percent_rank.end(),
1363  reinterpret_cast<double*>(may_alias_ptr(output_for_partition_buff)));
1364  break;
1365  }
1367  const auto cume_dist = index_to_cume_dist(
1368  output_for_partition_buff, partition_size, col_tuple_comparator);
1369  std::copy(cume_dist.begin(),
1370  cume_dist.end(),
1371  reinterpret_cast<double*>(may_alias_ptr(output_for_partition_buff)));
1372  break;
1373  }
1375  const auto& args = window_func->getArgs();
1376  CHECK_EQ(args.size(), size_t(1));
1377  const auto n = get_int_constant_from_expr(args.front().get());
1378  const auto ntile = index_to_ntile(output_for_partition_buff, partition_size, n);
1379  std::copy(ntile.begin(), ntile.end(), output_for_partition_buff);
1380  break;
1381  }
1384  const auto lag_or_lead = get_lag_or_lead_argument(window_func);
1385  const auto partition_row_offsets = payload() + offset;
1387  lag_or_lead, partition_row_offsets, output_for_partition_buff, partition_size);
1388  break;
1389  }
1392  const auto target_idx =
1393  get_target_idx_for_first_or_last_value_func(window_func, partition_size);
1394  const auto partition_row_offsets = payload() + offset;
1396  partition_row_offsets, output_for_partition_buff, partition_size, target_idx);
1397  break;
1398  }
1400  auto const n_value_ptr =
1401  dynamic_cast<Analyzer::Constant*>(window_func_->getArgs()[1].get());
1402  CHECK(n_value_ptr);
1403  auto const n_value = static_cast<size_t>(n_value_ptr->get_constval().intval);
1404  const auto partition_row_offsets = payload() + offset;
1405  if (n_value < partition_size) {
1407  partition_row_offsets, output_for_partition_buff, partition_size, n_value);
1408  } else {
1409  // when NTH_VALUE of the current row is NULL, we keep the NULL value in the
1410  // current row's output storage in the query output buffer, so we assign the
1411  // original index of the current row to the corresponding slot in
1412  // `output_for_partition_buff`
1414  partition_row_offsets, output_for_partition_buff, partition_size);
1415  }
1416  break;
1417  }
1428  const auto partition_row_offsets = payload() + offset;
1429  if (window_function_requires_peer_handling(window_func)) {
1431  offset,
1432  output_for_partition_buff,
1433  partition_size,
1434  col_tuple_comparator);
1435  }
1437  output_for_partition_buff, partition_row_offsets, partition_size);
1438  break;
1439  }
1440  default: {
1441  throw std::runtime_error("Window function not supported yet: " +
1442  ::toString(window_func->getKind()));
1443  }
1444  }
1445 }
#define CHECK_EQ(x, y)
Definition: Logger.h:301
SqlWindowFunctionKind getKind() const
Definition: Analyzer.h:2576
size_t get_target_idx_for_first_or_last_value_func(const Analyzer::WindowFunction *window_func, const size_t partition_size)
std::vector< int64_t > index_to_ntile(const int64_t *index, const size_t index_size, const size_t n)
std::vector< double > index_to_percent_rank(const int64_t *index, const size_t index_size, const std::function< bool(const int64_t lhs, const int64_t rhs)> &comparator)
void apply_permutation_to_partition(int64_t *output_for_partition_buff, const int32_t *original_indices, const size_t partition_size)
const Analyzer::WindowFunction * window_func_
const int32_t * counts() const
int64_t get_lag_or_lead_argument(const Analyzer::WindowFunction *window_func)
void index_to_partition_end(const int8_t *partition_end, const size_t off, const int64_t *index, const size_t index_size, const std::function< bool(const int64_t lhs, const int64_t rhs)> &comparator)
const int32_t * offsets() const
std::vector< int64_t > index_to_row_number(const int64_t *index, const size_t index_size)
std::vector< int64_t > index_to_dense_rank(const int64_t *index, const size_t index_size, const std::function< bool(const int64_t lhs, const int64_t rhs)> &comparator)
std::vector< Comparator > createComparator(size_t partition_idx)
const std::vector< std::shared_ptr< Analyzer::Expr > > & getArgs() const
Definition: Analyzer.h:2578
void apply_nth_value_to_partition(const int32_t *original_indices, int64_t *output_for_partition_buff, const size_t partition_size, const size_t target_pos)
const int8_t * partitionEnd() const
DEVICE auto copy(ARGS &&...args)
Definition: gpu_enabled.h:51
std::string toString(const ExecutorDeviceType &device_type)
void apply_lag_to_partition(const int64_t lag, const int32_t *original_indices, int64_t *sorted_indices, const size_t partition_size)
void apply_original_index_to_partition(const int32_t *original_indices, int64_t *output_for_partition_buff, const size_t partition_size)
bool window_function_requires_peer_handling(const Analyzer::WindowFunction *window_func)
#define CHECK(condition)
Definition: Logger.h:291
std::vector< double > index_to_cume_dist(const int64_t *index, const size_t index_size, const std::function< bool(const int64_t lhs, const int64_t rhs)> &comparator)
std::vector< int64_t > index_to_rank(const int64_t *index, const size_t index_size, const std::function< bool(const int64_t lhs, const int64_t rhs)> &comparator)
const int32_t * payload() const
size_t get_int_constant_from_expr(const Analyzer::Expr *expr)
constexpr double n
Definition: Utm.h:38

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

const int32_t * WindowFunctionContext::counts ( ) const

Definition at line 1771 of file WindowContext.cpp.

References device_type_, dummy_count_, and partitions_.

Referenced by CodeGenerator::codegenFixedLengthColVarInWindow(), Executor::codegenLoadPartitionBuffers(), compute(), computeNullRangeOfSortedPartition(), computePartitionBuffer(), fillPartitionEnd(), fillPartitionStart(), partitionCount(), sortPartition(), and WindowFunctionContext().

1771  {
1772  if (partitions_) {
1773  return reinterpret_cast<const int32_t*>(
1774  partitions_->getJoinHashBuffer(device_type_, 0) + partitions_->countBufferOff());
1775  }
1776  return &dummy_count_;
1777 }
const int32_t dummy_count_
std::shared_ptr< HashJoin > partitions_
const ExecutorDeviceType device_type_

+ Here is the caller graph for this function:

std::vector< WindowFunctionContext::Comparator > WindowFunctionContext::createComparator ( size_t  partition_idx)

Definition at line 903 of file WindowContext.cpp.

References CHECK, CHECK_EQ, Analyzer::WindowFunction::getCollation(), Analyzer::WindowFunction::getOrderKeys(), makeComparator(), offsets(), order_columns_, payload(), and window_func_.

Referenced by computePartitionBuffer(), and sortPartition().

904  {
905  // create tuple comparator
906  std::vector<WindowFunctionContext::Comparator> partition_comparator;
907  const auto& order_keys = window_func_->getOrderKeys();
908  const auto& collation = window_func_->getCollation();
909  CHECK_EQ(order_keys.size(), collation.size());
910  for (size_t order_column_idx = 0; order_column_idx < order_columns_.size();
911  ++order_column_idx) {
912  auto order_column_buffer = order_columns_[order_column_idx];
913  const auto order_col =
914  dynamic_cast<const Analyzer::ColumnVar*>(order_keys[order_column_idx].get());
915  CHECK(order_col);
916  const auto& order_col_collation = collation[order_column_idx];
917  auto comparator = makeComparator(order_col,
918  order_column_buffer,
919  payload() + offsets()[partition_idx],
920  !order_col_collation.is_desc,
921  order_col_collation.nulls_first);
922  if (order_col_collation.is_desc) {
923  comparator = [comparator](const int64_t lhs, const int64_t rhs) {
924  return comparator(rhs, lhs);
925  };
926  }
927  partition_comparator.push_back(comparator);
928  }
929  return partition_comparator;
930 }
#define CHECK_EQ(x, y)
Definition: Logger.h:301
static Comparator makeComparator(const Analyzer::ColumnVar *col_var, const int8_t *partition_values, const int32_t *partition_indices, const bool asc_ordering, const bool nulls_first)
const Analyzer::WindowFunction * window_func_
const int32_t * offsets() const
const std::vector< std::shared_ptr< Analyzer::Expr > > & getOrderKeys() const
Definition: Analyzer.h:2584
const std::vector< OrderEntry > & getCollation() const
Definition: Analyzer.h:2602
#define CHECK(condition)
Definition: Logger.h:291
std::vector< const int8_t * > order_columns_
const int32_t * payload() const

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

size_t WindowFunctionContext::elementCount ( ) const

Definition at line 1025 of file WindowContext.cpp.

References elem_count_.

Referenced by Executor::codegenCurrentPartitionIndex().

1025  {
1026  return elem_count_;
1027 }

+ Here is the caller graph for this function:

void WindowFunctionContext::fillPartitionEnd ( )
private

Definition at line 1710 of file WindowContext.cpp.

References agg_count_distinct_bitmap(), Bitmap, checked_calloc(), counts(), CPU, elem_count_, gpu_enabled::partial_sum(), partition_end_, partition_start_offset_, partitionCount(), and partitions_.

Referenced by compute().

1710  {
1712  0,
1713  static_cast<int64_t>(elem_count_),
1714  false,
1716  1};
1717  auto bitmap_sz = partition_start_bitmap.bitmapPaddedSizeBytes();
1718  if (partitions_) {
1719  bitmap_sz += partitions_->isBitwiseEq() ? 1 : 0;
1720  }
1721  partition_end_ = static_cast<int8_t*>(checked_calloc(bitmap_sz, 1));
1722  auto partition_end_handle = reinterpret_cast<int64_t>(partition_end_);
1723  int64_t partition_count = partitionCount();
1725  // if we have `partition_start_offset_`, we can reuse it for this logic
1726  // but note that it has partition_count + 1 elements where the first element is zero
1727  // which means the first partition's start offset is zero
1728  // and rest of them can represent values required for this logic
1729  for (int64_t i = 0; i < partition_count - 1; ++i) {
1730  if (partition_start_offset_[i + 1] == 0) {
1731  continue;
1732  }
1734  &partition_end_handle, partition_start_offset_[i + 1] - 1, 0);
1735  }
1736  if (elem_count_) {
1737  agg_count_distinct_bitmap(&partition_end_handle, elem_count_ - 1, 0);
1738  }
1739  } else {
1740  std::vector<size_t> partition_offsets(partition_count);
1741  std::partial_sum(counts(), counts() + partition_count, partition_offsets.begin());
1742  for (int64_t i = 0; i < partition_count - 1; ++i) {
1743  if (partition_offsets[i] == 0) {
1744  continue;
1745  }
1746  agg_count_distinct_bitmap(&partition_end_handle, partition_offsets[i] - 1, 0);
1747  }
1748  if (elem_count_) {
1749  agg_count_distinct_bitmap(&partition_end_handle, elem_count_ - 1, 0);
1750  }
1751  }
1752 }
RUNTIME_EXPORT ALWAYS_INLINE void agg_count_distinct_bitmap(int64_t *agg, const int64_t val, const int64_t min_val)
const int32_t * counts() const
size_t partitionCount() const
DEVICE void partial_sum(ARGS &&...args)
Definition: gpu_enabled.h:87
void * checked_calloc(const size_t nmemb, const size_t size)
Definition: checked_alloc.h:53
std::shared_ptr< HashJoin > partitions_
int64_t * partition_start_offset_

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

void WindowFunctionContext::fillPartitionStart ( )
private

Definition at line 1677 of file WindowContext.cpp.

References agg_count_distinct_bitmap(), Bitmap, checked_calloc(), counts(), CPU, elem_count_, gpu_enabled::partial_sum(), partition_start_, partition_start_offset_, partitionCount(), and partitions_.

Referenced by compute().

1677  {
1679  0,
1680  static_cast<int64_t>(elem_count_),
1681  false,
1683  1};
1684  auto bitmap_sz = partition_start_bitmap.bitmapPaddedSizeBytes();
1685  if (partitions_) {
1686  bitmap_sz += partitions_->isBitwiseEq() ? 1 : 0;
1687  }
1688  partition_start_ = static_cast<int8_t*>(checked_calloc(bitmap_sz, 1));
1689  int64_t partition_count = partitionCount();
1690  auto partition_start_handle = reinterpret_cast<int64_t>(partition_start_);
1691  agg_count_distinct_bitmap(&partition_start_handle, 0, 0);
1693  // if we have `partition_start_offset_`, we can reuse it for this logic
1694  // but note that it has partition_count + 1 elements where the first element is zero
1695  // which means the first partition's start offset is zero
1696  // and rest of them can represent values required for this logic
1697  for (int64_t i = 0; i < partition_count - 1; ++i) {
1699  &partition_start_handle, partition_start_offset_[i + 1], 0);
1700  }
1701  } else {
1702  std::vector<size_t> partition_offsets(partition_count);
1703  std::partial_sum(counts(), counts() + partition_count, partition_offsets.begin());
1704  for (int64_t i = 0; i < partition_count - 1; ++i) {
1705  agg_count_distinct_bitmap(&partition_start_handle, partition_offsets[i], 0);
1706  }
1707  }
1708 }
RUNTIME_EXPORT ALWAYS_INLINE void agg_count_distinct_bitmap(int64_t *agg, const int64_t val, const int64_t min_val)
const int32_t * counts() const
size_t partitionCount() const
DEVICE void partial_sum(ARGS &&...args)
Definition: gpu_enabled.h:87
void * checked_calloc(const size_t nmemb, const size_t size)
Definition: checked_alloc.h:53
std::shared_ptr< HashJoin > partitions_
int64_t * partition_start_offset_

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

size_t * WindowFunctionContext::getAggregateTreeDepth ( ) const

Definition at line 1661 of file WindowContext.cpp.

References aggregate_trees_depth_.

1661  {
1662  return aggregate_trees_depth_;
1663 }
size_t * aggregate_trees_depth_
size_t WindowFunctionContext::getAggregateTreeFanout ( ) const

Definition at line 1665 of file WindowContext.cpp.

References aggregate_trees_fan_out_.

1665  {
1666  return aggregate_trees_fan_out_;
1667 }
double ** WindowFunctionContext::getAggregationTreesForDoubleTypeWindowExpr ( ) const

Definition at line 1645 of file WindowContext.cpp.

References AggregateTreeForWindowFraming::aggregate_tree_for_double_type_, and aggregate_trees_.

1645  {
1646  return const_cast<double**>(aggregate_trees_.aggregate_tree_for_double_type_.data());
1647 }
std::vector< double * > aggregate_tree_for_double_type_
Definition: WindowContext.h:73
AggregateTreeForWindowFraming aggregate_trees_
int64_t ** WindowFunctionContext::getAggregationTreesForIntegerTypeWindowExpr ( ) const

Definition at line 1641 of file WindowContext.cpp.

References AggregateTreeForWindowFraming::aggregate_tree_for_integer_type_, and aggregate_trees_.

1641  {
1642  return const_cast<int64_t**>(aggregate_trees_.aggregate_tree_for_integer_type_.data());
1643 }
AggregateTreeForWindowFraming aggregate_trees_
std::vector< int64_t * > aggregate_tree_for_integer_type_
Definition: WindowContext.h:72
const std::vector< const int8_t * > & WindowFunctionContext::getColumnBufferForWindowFunctionExpressions ( ) const

Definition at line 174 of file WindowContext.cpp.

References window_func_expr_columns_.

174  {
176 }
std::vector< const int8_t * > window_func_expr_columns_
SumAndCountPair< double > ** WindowFunctionContext::getDerivedAggregationTreesForDoubleTypeWindowExpr ( ) const

Definition at line 1656 of file WindowContext.cpp.

References aggregate_trees_, and AggregateTreeForWindowFraming::derived_aggregate_tree_for_double_type_.

1656  {
1657  return const_cast<SumAndCountPair<double>**>(
1659 }
std::vector< SumAndCountPair< double > * > derived_aggregate_tree_for_double_type_
Definition: WindowContext.h:75
AggregateTreeForWindowFraming aggregate_trees_
SumAndCountPair< int64_t > ** WindowFunctionContext::getDerivedAggregationTreesForIntegerTypeWindowExpr ( ) const

Definition at line 1650 of file WindowContext.cpp.

References aggregate_trees_, and AggregateTreeForWindowFraming::derived_aggregate_tree_for_integer_type_.

1650  {
1651  return const_cast<SumAndCountPair<int64_t>**>(
1653 }
AggregateTreeForWindowFraming aggregate_trees_
std::vector< SumAndCountPair< int64_t > * > derived_aggregate_tree_for_integer_type_
Definition: WindowContext.h:74
int64_t * WindowFunctionContext::getNullValueEndPos ( ) const

Definition at line 1673 of file WindowContext.cpp.

References ordered_partition_null_end_pos_.

Referenced by Executor::codegenFrameNullRange().

1673  {
1675 }
int64_t * ordered_partition_null_end_pos_

+ Here is the caller graph for this function:

int64_t * WindowFunctionContext::getNullValueStartPos ( ) const

Definition at line 1669 of file WindowContext.cpp.

References ordered_partition_null_start_pos_.

Referenced by Executor::codegenFrameNullRange().

1669  {
1671 }
int64_t * ordered_partition_null_start_pos_

+ Here is the caller graph for this function:

const std::vector< const int8_t * > & WindowFunctionContext::getOrderKeyColumnBuffers ( ) const

Definition at line 178 of file WindowContext.cpp.

References order_columns_.

Referenced by Executor::codegenLoadOrderKeyBufPtr(), and Executor::codegenWindowFrameBounds().

179  {
180  return order_columns_;
181 }
std::vector< const int8_t * > order_columns_

+ Here is the caller graph for this function:

const std::vector< SQLTypeInfo > & WindowFunctionContext::getOrderKeyColumnBufferTypes ( ) const

Definition at line 183 of file WindowContext.cpp.

References order_columns_ti_.

Referenced by CodeGenerator::codegenFixedLengthColVar(), and Executor::codegenLoadOrderKeyBufPtr().

184  {
185  return order_columns_ti_;
186 }
std::vector< SQLTypeInfo > order_columns_ti_

+ Here is the caller graph for this function:

const Analyzer::WindowFunction * WindowFunctionContext::getWindowFunction ( ) const

Definition at line 979 of file WindowContext.cpp.

References window_func_.

Referenced by Executor::codegenCurrentPartitionIndex(), CodeGenerator::codegenFixedLengthColVar(), CodeGenerator::codegenFixedLengthColVarInWindow(), Executor::codegenLoadCurrentValueFromColBuf(), Executor::codegenLoadOrderKeyBufPtr(), Executor::codegenWindowFrameBounds(), Executor::codegenWindowFunction(), Executor::getFirstOrderColTypeInfo(), and Executor::getOrderKeyTypeName().

979  {
980  return window_func_;
981 }
const Analyzer::WindowFunction * window_func_

+ Here is the caller graph for this function:

WindowFunctionContext::Comparator WindowFunctionContext::makeComparator ( const Analyzer::ColumnVar col_var,
const int8_t *  partition_values,
const int32_t *  partition_indices,
const bool  asc_ordering,
const bool  nulls_first 
)
staticprivate

Definition at line 1173 of file WindowContext.cpp.

References logger::FATAL, Analyzer::Expr::get_type_info(), kDOUBLE, kFLOAT, and LOG.

Referenced by createComparator().

1178  {
1179  const auto& ti = col_var->get_type_info();
1180  if (ti.is_integer() || ti.is_decimal() || ti.is_time() || ti.is_boolean()) {
1181  switch (ti.get_size()) {
1182  case 8: {
1183  return [order_column_buffer, nulls_first, partition_indices, asc_ordering, &ti](
1184  const int64_t lhs, const int64_t rhs) {
1185  return asc_ordering ? integer_comparator_asc<int64_t>(order_column_buffer,
1186  ti,
1187  partition_indices,
1188  lhs,
1189  rhs,
1190  asc_ordering,
1191  nulls_first)
1192  : integer_comparator_desc<int64_t>(order_column_buffer,
1193  ti,
1194  partition_indices,
1195  lhs,
1196  rhs,
1197  asc_ordering,
1198  nulls_first);
1199  };
1200  }
1201  case 4: {
1202  return [order_column_buffer, nulls_first, partition_indices, asc_ordering, &ti](
1203  const int64_t lhs, const int64_t rhs) {
1204  return asc_ordering ? integer_comparator_asc<int32_t>(order_column_buffer,
1205  ti,
1206  partition_indices,
1207  lhs,
1208  rhs,
1209  asc_ordering,
1210  nulls_first)
1211  : integer_comparator_desc<int32_t>(order_column_buffer,
1212  ti,
1213  partition_indices,
1214  lhs,
1215  rhs,
1216  asc_ordering,
1217  nulls_first);
1218  };
1219  }
1220  case 2: {
1221  return [order_column_buffer, nulls_first, partition_indices, asc_ordering, &ti](
1222  const int64_t lhs, const int64_t rhs) {
1223  return asc_ordering ? integer_comparator_asc<int16_t>(order_column_buffer,
1224  ti,
1225  partition_indices,
1226  lhs,
1227  rhs,
1228  asc_ordering,
1229  nulls_first)
1230  : integer_comparator_desc<int16_t>(order_column_buffer,
1231  ti,
1232  partition_indices,
1233  lhs,
1234  rhs,
1235  asc_ordering,
1236  nulls_first);
1237  };
1238  }
1239  case 1: {
1240  return [order_column_buffer, nulls_first, partition_indices, asc_ordering, &ti](
1241  const int64_t lhs, const int64_t rhs) {
1242  return asc_ordering ? integer_comparator_asc<int8_t>(order_column_buffer,
1243  ti,
1244  partition_indices,
1245  lhs,
1246  rhs,
1247  asc_ordering,
1248  nulls_first)
1249  : integer_comparator_desc<int8_t>(order_column_buffer,
1250  ti,
1251  partition_indices,
1252  lhs,
1253  rhs,
1254  asc_ordering,
1255  nulls_first);
1256  };
1257  }
1258  default: {
1259  LOG(FATAL) << "Invalid type size: " << ti.get_size();
1260  }
1261  }
1262  }
1263  if (ti.is_fp()) {
1264  switch (ti.get_type()) {
1265  case kFLOAT: {
1266  return [order_column_buffer, nulls_first, partition_indices, asc_ordering, &ti](
1267  const int64_t lhs, const int64_t rhs) {
1268  return asc_ordering ? fp_comparator_asc<float, int32_t>(order_column_buffer,
1269  ti,
1270  partition_indices,
1271  lhs,
1272  rhs,
1273  asc_ordering,
1274  nulls_first)
1275  : fp_comparator_desc<float, int32_t>(order_column_buffer,
1276  ti,
1277  partition_indices,
1278  lhs,
1279  rhs,
1280  asc_ordering,
1281  nulls_first);
1282  };
1283  }
1284  case kDOUBLE: {
1285  return [order_column_buffer, nulls_first, partition_indices, asc_ordering, &ti](
1286  const int64_t lhs, const int64_t rhs) {
1287  return asc_ordering ? fp_comparator_asc<double, int64_t>(order_column_buffer,
1288  ti,
1289  partition_indices,
1290  lhs,
1291  rhs,
1292  asc_ordering,
1293  nulls_first)
1294  : fp_comparator_desc<double, int64_t>(order_column_buffer,
1295  ti,
1296  partition_indices,
1297  lhs,
1298  rhs,
1299  asc_ordering,
1300  nulls_first);
1301  };
1302  }
1303  default: {
1304  LOG(FATAL) << "Invalid float type";
1305  }
1306  }
1307  }
1308  throw std::runtime_error("Type not supported yet");
1309 }
#define LOG(tag)
Definition: Logger.h:285
const SQLTypeInfo & get_type_info() const
Definition: Analyzer.h:79

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

const bool WindowFunctionContext::needsToBuildAggregateTree ( ) const

Definition at line 1788 of file WindowContext.cpp.

References elem_count_, Analyzer::WindowFunction::hasAggregateTreeRequiredWindowFunc(), Analyzer::WindowFunction::hasFraming(), and window_func_.

Referenced by compute().

1788  {
1789  return window_func_->hasFraming() &&
1791 }
bool hasAggregateTreeRequiredWindowFunc() const
Definition: Analyzer.h:2624
const Analyzer::WindowFunction * window_func_
bool hasFraming() const
Definition: Analyzer.h:2612

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

const int32_t * WindowFunctionContext::offsets ( ) const

Definition at line 1763 of file WindowContext.cpp.

References device_type_, dummy_offset_, and partitions_.

Referenced by buildAggregationTreeForPartition(), compute(), computePartitionBuffer(), createComparator(), and partitionCount().

1763  {
1764  if (partitions_) {
1765  return reinterpret_cast<const int32_t*>(
1766  partitions_->getJoinHashBuffer(device_type_, 0) + partitions_->offsetBufferOff());
1767  }
1768  return &dummy_offset_;
1769 }
const int32_t dummy_offset_
std::shared_ptr< HashJoin > partitions_
const ExecutorDeviceType device_type_

+ Here is the caller graph for this function:

WindowFunctionContext& WindowFunctionContext::operator= ( const WindowFunctionContext )
delete
const int8_t * WindowFunctionContext::output ( ) const

Definition at line 983 of file WindowContext.cpp.

References output_.

Referenced by CodeGenerator::codegenWindowPosition().

983  {
984  return output_;
985 }

+ Here is the caller graph for this function:

size_t WindowFunctionContext::partitionCount ( ) const

Definition at line 1779 of file WindowContext.cpp.

References CHECK_GE, counts(), offsets(), and partitions_.

Referenced by Executor::codegenCurrentPartitionIndex(), compute(), fillPartitionEnd(), fillPartitionStart(), resizeStorageForWindowFraming(), and WindowFunctionContext().

1779  {
1780  if (partitions_) {
1781  const auto partition_count = counts() - offsets();
1782  CHECK_GE(partition_count, 0);
1783  return partition_count;
1784  }
1785  return 1; // non-partitioned window function
1786 }
#define CHECK_GE(x, y)
Definition: Logger.h:306
const int32_t * counts() const
const int32_t * offsets() const
std::shared_ptr< HashJoin > partitions_

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

const int8_t * WindowFunctionContext::partitionEnd ( ) const

Definition at line 1021 of file WindowContext.cpp.

References partition_end_.

Referenced by computePartitionBuffer().

1021  {
1022  return partition_end_;
1023 }

+ Here is the caller graph for this function:

const int64_t * WindowFunctionContext::partitionNumCountBuf ( ) const

Definition at line 1007 of file WindowContext.cpp.

References CHECK, and partition_start_offset_.

Referenced by Executor::codegenCurrentPartitionIndex().

1007  {
1009  return partition_start_offset_ + 1;
1010 }
int64_t * partition_start_offset_
#define CHECK(condition)
Definition: Logger.h:291

+ Here is the caller graph for this function:

const int8_t * WindowFunctionContext::partitionStart ( ) const

Definition at line 1017 of file WindowContext.cpp.

References partition_start_.

1017  {
1018  return partition_start_;
1019 }
const int64_t * WindowFunctionContext::partitionStartOffset ( ) const

Definition at line 1002 of file WindowContext.cpp.

References CHECK, and partition_start_offset_.

Referenced by Executor::codegenLoadPartitionBuffers().

1002  {
1004  return partition_start_offset_;
1005 }
int64_t * partition_start_offset_
#define CHECK(condition)
Definition: Logger.h:291

+ Here is the caller graph for this function:

const int32_t * WindowFunctionContext::payload ( ) const

Definition at line 1754 of file WindowContext.cpp.

References device_type_, dummy_payload_, and partitions_.

Referenced by Executor::codegenCurrentPartitionIndex(), Executor::codegenLoadPartitionBuffers(), compute(), computePartitionBuffer(), and createComparator().

1754  {
1755  if (partitions_) {
1756  return reinterpret_cast<const int32_t*>(
1757  partitions_->getJoinHashBuffer(device_type_, 0) +
1758  partitions_->payloadBufferOff());
1759  }
1760  return dummy_payload_; // non-partitioned window function
1761 }
std::shared_ptr< HashJoin > partitions_
const ExecutorDeviceType device_type_

+ Here is the caller graph for this function:

void WindowFunctionContext::resizeStorageForWindowFraming ( bool const  for_reuse = false)
private

Definition at line 1447 of file WindowContext.cpp.

References aggregate_trees_, partitionCount(), AggregateTreeForWindowFraming::resizeStorageForWindowFraming(), and segment_trees_owned_.

Referenced by compute().

1447  {
1448  auto const partition_count = partitionCount();
1450  if (!for_reuse) {
1451  segment_trees_owned_.resize(partition_count);
1452  }
1453 }
std::vector< std::shared_ptr< void > > segment_trees_owned_
size_t partitionCount() const
AggregateTreeForWindowFraming aggregate_trees_
void resizeStorageForWindowFraming(size_t partition_count)
Definition: WindowContext.h:78

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

void WindowFunctionContext::setSortedPartitionCacheKey ( QueryPlanHash  cache_key)

Definition at line 188 of file WindowContext.cpp.

References sorted_partition_cache_key_.

188  {
189  sorted_partition_cache_key_ = cache_key;
190 }
QueryPlanHash sorted_partition_cache_key_
const int64_t * WindowFunctionContext::sortedPartition ( ) const

Definition at line 987 of file WindowContext.cpp.

References CHECK, and sorted_partition_buf_.

Referenced by Executor::codegenLoadPartitionBuffers().

987  {
989  return sorted_partition_buf_->data();
990 }
std::shared_ptr< std::vector< int64_t > > sorted_partition_buf_
#define CHECK(condition)
Definition: Logger.h:291

+ Here is the caller graph for this function:

void WindowFunctionContext::sortPartition ( const size_t  partition_idx,
int64_t *  output_for_partition_buff,
bool  should_parallelize 
)
private

Definition at line 932 of file WindowContext.cpp.

References counts(), createComparator(), GT, gpu_enabled::iota(), LT, and gpu_enabled::sort().

Referenced by compute().

934  {
935  const size_t partition_size{static_cast<size_t>(counts()[partition_idx])};
936  if (partition_size == 0) {
937  return;
938  }
939  std::iota(
940  output_for_partition_buff, output_for_partition_buff + partition_size, int64_t(0));
941  auto partition_comparator = createComparator(partition_idx);
942  if (!partition_comparator.empty()) {
943  const auto col_tuple_comparator = [&partition_comparator](const int64_t lhs,
944  const int64_t rhs) {
945  for (const auto& comparator : partition_comparator) {
946  const auto comparator_result = comparator(lhs, rhs);
947  switch (comparator_result) {
949  return true;
951  return false;
952  default:
953  // WindowComparatorResult::EQ: continue to next comparator
954  continue;
955  }
956  }
957  // If here WindowFunctionContext::WindowComparatorResult::KEQ for all keys
958  // return false as sort algo must enforce weak ordering
959  return false;
960  };
961  if (should_parallelize) {
962 #ifdef HAVE_TBB
963  tbb::parallel_sort(output_for_partition_buff,
964  output_for_partition_buff + partition_size,
965  col_tuple_comparator);
966 #else
967  thrust::sort(output_for_partition_buff,
968  output_for_partition_buff + partition_size,
969  col_tuple_comparator);
970 #endif
971  } else {
972  std::sort(output_for_partition_buff,
973  output_for_partition_buff + partition_size,
974  col_tuple_comparator);
975  }
976  }
977 }
DEVICE void sort(ARGS &&...args)
Definition: gpu_enabled.h:105
const int32_t * counts() const
std::vector< Comparator > createComparator(size_t partition_idx)
DEVICE void iota(ARGS &&...args)
Definition: gpu_enabled.h:69

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

Member Data Documentation

AggregateState WindowFunctionContext::aggregate_state_
private
size_t* WindowFunctionContext::aggregate_trees_depth_
private
size_t WindowFunctionContext::aggregate_trees_fan_out_
private

Definition at line 296 of file WindowContext.h.

Referenced by buildAggregationTreeForPartition(), and getAggregateTreeFanout().

const ExecutorDeviceType WindowFunctionContext::device_type_
private

Definition at line 309 of file WindowContext.h.

Referenced by counts(), offsets(), and payload().

const int32_t WindowFunctionContext::dummy_count_
private

Definition at line 313 of file WindowContext.h.

Referenced by counts().

const int32_t WindowFunctionContext::dummy_offset_
private

Definition at line 314 of file WindowContext.h.

Referenced by offsets().

int32_t* WindowFunctionContext::dummy_payload_
private

Definition at line 319 of file WindowContext.h.

Referenced by payload(), WindowFunctionContext(), and ~WindowFunctionContext().

size_t WindowFunctionContext::elem_count_
private
std::vector<const int8_t*> WindowFunctionContext::order_columns_
private
std::vector<std::vector<std::shared_ptr<Chunk_NS::Chunk> > > WindowFunctionContext::order_columns_owner_
private

Definition at line 277 of file WindowContext.h.

Referenced by addOrderColumn().

std::vector<SQLTypeInfo> WindowFunctionContext::order_columns_ti_
private

Definition at line 280 of file WindowContext.h.

Referenced by addOrderColumn(), and getOrderKeyColumnBufferTypes().

int64_t* WindowFunctionContext::ordered_partition_null_end_pos_
private
int64_t* WindowFunctionContext::ordered_partition_null_start_pos_
private
int8_t* WindowFunctionContext::output_
private

Definition at line 286 of file WindowContext.h.

Referenced by compute(), and output().

QueryPlanHash WindowFunctionContext::partition_cache_key_
private

Definition at line 274 of file WindowContext.h.

int8_t* WindowFunctionContext::partition_end_
private

Definition at line 306 of file WindowContext.h.

Referenced by fillPartitionEnd(), partitionEnd(), and ~WindowFunctionContext().

int8_t* WindowFunctionContext::partition_start_
private

Definition at line 303 of file WindowContext.h.

Referenced by fillPartitionStart(), partitionStart(), and ~WindowFunctionContext().

int64_t* WindowFunctionContext::partition_start_offset_
private
std::shared_ptr<HashJoin> WindowFunctionContext::partitions_
private
std::shared_ptr<RowSetMemoryOwner> WindowFunctionContext::row_set_mem_owner_
private

Definition at line 310 of file WindowContext.h.

Referenced by compute().

std::vector<std::shared_ptr<void> > WindowFunctionContext::segment_trees_owned_
private
std::shared_ptr<std::vector<int64_t> > WindowFunctionContext::sorted_partition_buf_
private

Definition at line 287 of file WindowContext.h.

Referenced by compute(), and sortedPartition().

QueryPlanHash WindowFunctionContext::sorted_partition_cache_key_
private

Definition at line 275 of file WindowContext.h.

Referenced by compute(), and setSortedPartitionCacheKey().

std::vector<const int8_t*> WindowFunctionContext::window_func_expr_columns_
private
std::vector<std::vector<std::shared_ptr<Chunk_NS::Chunk> > > WindowFunctionContext::window_func_expr_columns_owner_
private

Definition at line 290 of file WindowContext.h.

Referenced by addColumnBufferForWindowFunctionExpression().


The documentation for this class was generated from the following files: