OmniSciDB  0264ff685a
RuntimeFunctions.h File Reference
#include <cassert>
#include <cstdint>
#include <ctime>
#include <limits>
#include <type_traits>
+ Include dependency graph for RuntimeFunctions.h:
+ This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Macros

#define EMPTY_KEY_64   std::numeric_limits<int64_t>::max()
 
#define EMPTY_KEY_32   std::numeric_limits<int32_t>::max()
 
#define EMPTY_KEY_16   std::numeric_limits<int16_t>::max()
 
#define EMPTY_KEY_8   std::numeric_limits<int8_t>::max()
 

Enumerations

enum  RuntimeInterruptFlags { INT_CHECK = 0, INT_ABORT = -1, INT_RESET = -2 }
 

Functions

int64_t agg_sum (int64_t *agg, const int64_t val)
 
void agg_max (int64_t *agg, const int64_t val)
 
void agg_min (int64_t *agg, const int64_t val)
 
void agg_sum_double (int64_t *agg, const double val)
 
void agg_max_double (int64_t *agg, const double val)
 
void agg_min_double (int64_t *agg, const double val)
 
int32_t agg_sum_int32_skip_val (int32_t *agg, const int32_t val, const int32_t skip_val)
 
int64_t agg_sum_skip_val (int64_t *agg, const int64_t val, const int64_t skip_val)
 
void agg_max_skip_val (int64_t *agg, const int64_t val, const int64_t skip_val)
 
void agg_min_skip_val (int64_t *agg, const int64_t val, const int64_t skip_val)
 
void agg_sum_float_skip_val (int32_t *agg, const float val, const float skip_val)
 
void agg_sum_double_skip_val (int64_t *agg, const double val, const double skip_val)
 
void agg_max_double_skip_val (int64_t *agg, const double val, const double skip_val)
 
void agg_min_double_skip_val (int64_t *agg, const double val, const double skip_val)
 
int32_t agg_sum_int32 (int32_t *agg, const int32_t val)
 
void agg_max_int32 (int32_t *agg, const int32_t val)
 
void agg_max_int16 (int16_t *agg, const int16_t val)
 
void agg_max_int8 (int8_t *agg, const int8_t val)
 
void agg_min_int32 (int32_t *agg, const int32_t val)
 
void agg_min_int16 (int16_t *agg, const int16_t val)
 
void agg_min_int8 (int8_t *agg, const int8_t val)
 
void agg_sum_float (int32_t *agg, const float val)
 
void agg_max_float (int32_t *agg, const float val)
 
void agg_min_float (int32_t *agg, const float val)
 
void agg_max_int32_skip_val (int32_t *agg, const int32_t val, const int32_t skip_val)
 
void agg_max_int16_skip_val (int16_t *agg, const int16_t val, const int16_t skip_val)
 
void agg_max_int8_skip_val (int8_t *agg, const int8_t val, const int8_t skip_val)
 
void agg_min_int32_skip_val (int32_t *agg, const int32_t val, const int32_t skip_val)
 
void agg_min_int16_skip_val (int16_t *agg, const int16_t val, const int16_t skip_val)
 
void agg_min_int8_skip_val (int8_t *agg, const int8_t val, const int8_t skip_val)
 
void agg_max_float_skip_val (int32_t *agg, const float val, const float skip_val)
 
void agg_min_float_skip_val (int32_t *agg, const float val, const float skip_val)
 
void agg_count_distinct_bitmap (int64_t *agg, const int64_t val, const int64_t min_val)
 
uint32_t key_hash (const int64_t *key, const uint32_t key_qw_count, const uint32_t key_byte_width)
 
int64_t * get_group_value (int64_t *groups_buffer, const uint32_t groups_buffer_entry_count, const int64_t *key, const uint32_t key_count, const uint32_t key_width, const uint32_t row_size_quad)
 
bool check_interrupt ()
 
bool check_interrupt_init (unsigned command)
 
int64_t * get_group_value_with_watchdog (int64_t *groups_buffer, const uint32_t groups_buffer_entry_count, const int64_t *key, const uint32_t key_count, const uint32_t key_width, const uint32_t row_size_quad)
 
int64_t * get_group_value_columnar (int64_t *groups_buffer, const uint32_t groups_buffer_entry_count, const int64_t *key, const uint32_t key_qw_count)
 
int64_t * get_group_value_columnar_with_watchdog (int64_t *groups_buffer, const uint32_t groups_buffer_entry_count, const int64_t *key, const uint32_t key_qw_count)
 
int64_t * get_group_value_fast (int64_t *groups_buffer, const int64_t key, const int64_t min_key, const int64_t bucket, const uint32_t row_size_quad)
 
int64_t * get_group_value_fast_with_original_key (int64_t *groups_buffer, const int64_t key, const int64_t orig_key, const int64_t min_key, const int64_t bucket, const uint32_t row_size_quad)
 
uint32_t get_columnar_group_bin_offset (int64_t *key_base_ptr, const int64_t key, const int64_t min_key, const int64_t bucket)
 
int64_t * get_matching_group_value_perfect_hash (int64_t *groups_buffer, const uint32_t h, const int64_t *key, const uint32_t key_qw_count, const uint32_t row_size_quad)
 
int64_t * get_matching_group_value_perfect_hash_keyless (int64_t *groups_buffer, const uint32_t hashed_index, const uint32_t row_size_quad)
 
int32_t * get_bucketized_hash_slot (int32_t *buff, const int64_t key, const int64_t min_key, const int64_t bucket_normalization=1)
 
int32_t * get_hash_slot (int32_t *buff, const int64_t key, const int64_t min_key)
 
int32_t * get_hash_slot_sharded (int32_t *buff, const int64_t key, const int64_t min_key, const uint32_t entry_count_per_shard, const uint32_t num_shards, const uint32_t device_count)
 
int32_t * get_bucketized_hash_slot_sharded (int32_t *buff, const int64_t key, const int64_t min_key, const uint32_t entry_count_per_shard, const uint32_t num_shards, const uint32_t device_count, const int64_t bucket_normalization)
 
int32_t * get_hash_slot_sharded_opt (int32_t *buff, const int64_t key, const int64_t min_key, const uint32_t entry_count_per_shard, const uint32_t shard, const uint32_t num_shards, const uint32_t device_count)
 
int32_t * get_bucketized_hash_slot_sharded_opt (int32_t *buff, const int64_t key, const int64_t min_key, const uint32_t entry_count_per_shard, const uint32_t shard, const uint32_t num_shards, const uint32_t device_count, const int64_t bucket_normalization)
 
void linear_probabilistic_count (uint8_t *bitmap, const uint32_t bitmap_bytes, const uint8_t *key_bytes, const uint32_t key_len)
 
int64_t fixed_width_int_decode_noinline (const int8_t *byte_stream, const int32_t byte_width, const int64_t pos)
 
int64_t fixed_width_unsigned_decode_noinline (const int8_t *byte_stream, const int32_t byte_width, const int64_t pos)
 
float fixed_width_float_decode_noinline (const int8_t *byte_stream, const int64_t pos)
 
double fixed_width_double_decode_noinline (const int8_t *byte_stream, const int64_t pos)
 
int64_t fixed_width_small_date_decode_noinline (const int8_t *byte_stream, const int32_t byte_width, const int32_t null_val, const int64_t ret_null_val, const int64_t pos)
 
int8_t * extract_str_ptr_noinline (const uint64_t str_and_len)
 
int32_t extract_str_len_noinline (const uint64_t str_and_len)
 
template<typename T = int64_t>
get_empty_key ()
 
template<>
int32_t get_empty_key ()
 

Macro Definition Documentation

◆ EMPTY_KEY_16

#define EMPTY_KEY_16   std::numeric_limits<int16_t>::max()

Definition at line 116 of file RuntimeFunctions.h.

◆ EMPTY_KEY_32

#define EMPTY_KEY_32   std::numeric_limits<int32_t>::max()

Definition at line 115 of file RuntimeFunctions.h.

Referenced by get_empty_key().

◆ EMPTY_KEY_64

#define EMPTY_KEY_64   std::numeric_limits<int64_t>::max()

Definition at line 114 of file RuntimeFunctions.h.

Referenced by get_empty_key().

◆ EMPTY_KEY_8

#define EMPTY_KEY_8   std::numeric_limits<int8_t>::max()

Definition at line 117 of file RuntimeFunctions.h.

Enumeration Type Documentation

◆ RuntimeInterruptFlags

Enumerator
INT_CHECK 
INT_ABORT 
INT_RESET 

Definition at line 130 of file RuntimeFunctions.h.

Function Documentation

◆ agg_count_distinct_bitmap()

void agg_count_distinct_bitmap ( int64_t *  agg,
const int64_t  val,
const int64_t  min_val 
)

Definition at line 302 of file RuntimeFunctions.cpp.

Referenced by agg_count_distinct_bitmap_skip_val(), WindowFunctionContext::fillPartitionEnd(), WindowFunctionContext::fillPartitionStart(), anonymous_namespace{WindowContext.cpp}::index_to_partition_end(), and InValuesBitmap::InValuesBitmap().

304  {
305  const uint64_t bitmap_idx = val - min_val;
306  reinterpret_cast<int8_t*>(*agg)[bitmap_idx >> 3] |= (1 << (bitmap_idx & 7));
307 }
+ Here is the caller graph for this function:

◆ agg_max()

void agg_max ( int64_t *  agg,
const int64_t  val 
)

Definition at line 384 of file RuntimeFunctions.cpp.

Referenced by agg_count_int32_skip_val().

384  {
385  *agg = std::max(*agg, val);
386 }
+ Here is the caller graph for this function:

◆ agg_max_double()

void agg_max_double ( int64_t *  agg,
const double  val 
)

Definition at line 608 of file RuntimeFunctions.cpp.

608  {
609  const auto r = std::max(*reinterpret_cast<const double*>(agg), val);
610  *agg = *(reinterpret_cast<const int64_t*>(may_alias_ptr(&r)));
611 }

◆ agg_max_double_skip_val()

void agg_max_double_skip_val ( int64_t *  agg,
const double  val,
const double  skip_val 
)

Referenced by Executor::reduceResults().

+ Here is the caller graph for this function:

◆ agg_max_float()

void agg_max_float ( int32_t *  agg,
const float  val 
)

Definition at line 649 of file RuntimeFunctions.cpp.

649  {
650  const auto r = std::max(*reinterpret_cast<const float*>(agg), val);
651  *agg = *(reinterpret_cast<const int32_t*>(may_alias_ptr(&r)));
652 }

◆ agg_max_float_skip_val()

void agg_max_float_skip_val ( int32_t *  agg,
const float  val,
const float  skip_val 
)

Referenced by Executor::reduceResults().

+ Here is the caller graph for this function:

◆ agg_max_int16()

void agg_max_int16 ( int16_t *  agg,
const int16_t  val 
)

Referenced by agg_count_int32_skip_val().

+ Here is the caller graph for this function:

◆ agg_max_int16_skip_val()

void agg_max_int16_skip_val ( int16_t *  agg,
const int16_t  val,
const int16_t  skip_val 
)

◆ agg_max_int32()

void agg_max_int32 ( int32_t *  agg,
const int32_t  val 
)

Referenced by agg_count_int32_skip_val().

+ Here is the caller graph for this function:

◆ agg_max_int32_skip_val()

void agg_max_int32_skip_val ( int32_t *  agg,
const int32_t  val,
const int32_t  skip_val 
)

◆ agg_max_int8()

void agg_max_int8 ( int8_t *  agg,
const int8_t  val 
)

Referenced by agg_count_int32_skip_val().

+ Here is the caller graph for this function:

◆ agg_max_int8_skip_val()

void agg_max_int8_skip_val ( int8_t *  agg,
const int8_t  val,
const int8_t  skip_val 
)

◆ agg_max_skip_val()

void agg_max_skip_val ( int64_t *  agg,
const int64_t  val,
const int64_t  skip_val 
)

Referenced by Executor::reduceResults().

+ Here is the caller graph for this function:

◆ agg_min()

void agg_min ( int64_t *  agg,
const int64_t  val 
)

Definition at line 388 of file RuntimeFunctions.cpp.

Referenced by agg_count_int32_skip_val().

388  {
389  *agg = std::min(*agg, val);
390 }
+ Here is the caller graph for this function:

◆ agg_min_double()

void agg_min_double ( int64_t *  agg,
const double  val 
)

Definition at line 613 of file RuntimeFunctions.cpp.

613  {
614  const auto r = std::min(*reinterpret_cast<const double*>(agg), val);
615  *agg = *(reinterpret_cast<const int64_t*>(may_alias_ptr(&r)));
616 }

◆ agg_min_double_skip_val()

void agg_min_double_skip_val ( int64_t *  agg,
const double  val,
const double  skip_val 
)

Referenced by Executor::reduceResults().

+ Here is the caller graph for this function:

◆ agg_min_float()

void agg_min_float ( int32_t *  agg,
const float  val 
)

Definition at line 654 of file RuntimeFunctions.cpp.

654  {
655  const auto r = std::min(*reinterpret_cast<const float*>(agg), val);
656  *agg = *(reinterpret_cast<const int32_t*>(may_alias_ptr(&r)));
657 }

◆ agg_min_float_skip_val()

void agg_min_float_skip_val ( int32_t *  agg,
const float  val,
const float  skip_val 
)

Referenced by Executor::reduceResults().

+ Here is the caller graph for this function:

◆ agg_min_int16()

void agg_min_int16 ( int16_t *  agg,
const int16_t  val 
)

Referenced by agg_count_int32_skip_val().

+ Here is the caller graph for this function:

◆ agg_min_int16_skip_val()

void agg_min_int16_skip_val ( int16_t *  agg,
const int16_t  val,
const int16_t  skip_val 
)

◆ agg_min_int32()

void agg_min_int32 ( int32_t *  agg,
const int32_t  val 
)

Referenced by agg_count_int32_skip_val().

+ Here is the caller graph for this function:

◆ agg_min_int32_skip_val()

void agg_min_int32_skip_val ( int32_t *  agg,
const int32_t  val,
const int32_t  skip_val 
)

◆ agg_min_int8()

void agg_min_int8 ( int8_t *  agg,
const int8_t  val 
)

Referenced by agg_count_int32_skip_val().

+ Here is the caller graph for this function:

◆ agg_min_int8_skip_val()

void agg_min_int8_skip_val ( int8_t *  agg,
const int8_t  val,
const int8_t  skip_val 
)

◆ agg_min_skip_val()

void agg_min_skip_val ( int64_t *  agg,
const int64_t  val,
const int64_t  skip_val 
)

Referenced by Executor::reduceResults().

+ Here is the caller graph for this function:

◆ agg_sum()

int64_t agg_sum ( int64_t *  agg,
const int64_t  val 
)

Definition at line 378 of file RuntimeFunctions.cpp.

Referenced by agg_sum_skip_val().

378  {
379  const auto old = *agg;
380  *agg += val;
381  return old;
382 }
+ Here is the caller graph for this function:

◆ agg_sum_double()

void agg_sum_double ( int64_t *  agg,
const double  val 
)

Definition at line 603 of file RuntimeFunctions.cpp.

603  {
604  const auto r = *reinterpret_cast<const double*>(agg) + val;
605  *agg = *reinterpret_cast<const int64_t*>(may_alias_ptr(&r));
606 }

◆ agg_sum_double_skip_val()

void agg_sum_double_skip_val ( int64_t *  agg,
const double  val,
const double  skip_val 
)

Referenced by Executor::reduceResults().

+ Here is the caller graph for this function:

◆ agg_sum_float()

void agg_sum_float ( int32_t *  agg,
const float  val 
)

Definition at line 644 of file RuntimeFunctions.cpp.

644  {
645  const auto r = *reinterpret_cast<const float*>(agg) + val;
646  *agg = *reinterpret_cast<const int32_t*>(may_alias_ptr(&r));
647 }

◆ agg_sum_float_skip_val()

void agg_sum_float_skip_val ( int32_t *  agg,
const float  val,
const float  skip_val 
)

Referenced by Executor::reduceResults().

+ Here is the caller graph for this function:

◆ agg_sum_int32()

int32_t agg_sum_int32 ( int32_t *  agg,
const int32_t  val 
)

Definition at line 436 of file RuntimeFunctions.cpp.

Referenced by agg_sum_int32_skip_val().

436  {
437  const auto old = *agg;
438  *agg += val;
439  return old;
440 }
+ Here is the caller graph for this function:

◆ agg_sum_int32_skip_val()

int32_t agg_sum_int32_skip_val ( int32_t *  agg,
const int32_t  val,
const int32_t  skip_val 
)

Definition at line 521 of file RuntimeFunctions.cpp.

References agg_sum_int32().

523  {
524  const auto old = *agg;
525  if (val != skip_val) {
526  if (old != skip_val) {
527  return agg_sum_int32(agg, val);
528  } else {
529  *agg = val;
530  }
531  }
532  return old;
533 }
ALWAYS_INLINE int32_t agg_sum_int32(int32_t *agg, const int32_t val)
+ Here is the call graph for this function:

◆ agg_sum_skip_val()

int64_t agg_sum_skip_val ( int64_t *  agg,
const int64_t  val,
const int64_t  skip_val 
)

Definition at line 507 of file RuntimeFunctions.cpp.

References agg_sum().

Referenced by Executor::reduceResults().

509  {
510  const auto old = *agg;
511  if (val != skip_val) {
512  if (old != skip_val) {
513  return agg_sum(agg, val);
514  } else {
515  *agg = val;
516  }
517  }
518  return old;
519 }
ALWAYS_INLINE int64_t agg_sum(int64_t *agg, const int64_t val)
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ check_interrupt()

bool check_interrupt ( )

Definition at line 1411 of file RuntimeFunctions.cpp.

References check_interrupt_init(), and INT_CHECK.

1411  {
1412  if (check_interrupt_init(static_cast<unsigned>(INT_CHECK))) {
1413  return true;
1414  }
1415  return false;
1416 }
bool check_interrupt_init(unsigned command)
+ Here is the call graph for this function:

◆ check_interrupt_init()

bool check_interrupt_init ( unsigned  command)

Definition at line 1418 of file RuntimeFunctions.cpp.

References INT_ABORT, INT_CHECK, and INT_RESET.

Referenced by check_interrupt(), Executor::interrupt(), and Executor::resetInterrupt().

1418  {
1419  static std::atomic_bool runtime_interrupt_flag{false};
1420 
1421  if (command == static_cast<unsigned>(INT_CHECK)) {
1422  if (runtime_interrupt_flag.load()) {
1423  return true;
1424  }
1425  return false;
1426  }
1427  if (command == static_cast<unsigned>(INT_ABORT)) {
1428  runtime_interrupt_flag.store(true);
1429  return false;
1430  }
1431  if (command == static_cast<unsigned>(INT_RESET)) {
1432  runtime_interrupt_flag.store(false);
1433  return false;
1434  }
1435  return false;
1436 }
+ Here is the caller graph for this function:

◆ extract_str_len_noinline()

int32_t extract_str_len_noinline ( const uint64_t  str_and_len)

Definition at line 1233 of file RuntimeFunctions.cpp.

References extract_str_len().

Referenced by string_compress().

1233  {
1234  return extract_str_len(str_and_len);
1235 }
ALWAYS_INLINE int32_t extract_str_len(const uint64_t str_and_len)
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ extract_str_ptr_noinline()

int8_t* extract_str_ptr_noinline ( const uint64_t  str_and_len)

Definition at line 1229 of file RuntimeFunctions.cpp.

References extract_str_ptr().

Referenced by string_compress().

1229  {
1230  return extract_str_ptr(str_and_len);
1231 }
ALWAYS_INLINE int8_t * extract_str_ptr(const uint64_t str_and_len)
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ fixed_width_double_decode_noinline()

double fixed_width_double_decode_noinline ( const int8_t *  byte_stream,
const int64_t  pos 
)

Definition at line 126 of file DecodersImpl.h.

References ALWAYS_INLINE, DEVICE, fixed_width_double_decode(), and SUFFIX.

Referenced by compute_bucket_sizes_impl(), JoinColumnIterator::getElementSwitch(), result_set::lazy_decode(), and OverlapsKeyHandler::operator()().

126  {
127  return SUFFIX(fixed_width_double_decode)(byte_stream, pos);
128 }
#define SUFFIX(name)
DEVICE ALWAYS_INLINE double SUFFIX() fixed_width_double_decode(const int8_t *byte_stream, const int64_t pos)
Definition: DecodersImpl.h:118
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ fixed_width_float_decode_noinline()

float fixed_width_float_decode_noinline ( const int8_t *  byte_stream,
const int64_t  pos 
)

Definition at line 113 of file DecodersImpl.h.

References ALWAYS_INLINE, DEVICE, fixed_width_float_decode(), and SUFFIX.

Referenced by result_set::lazy_decode().

113  {
114  return SUFFIX(fixed_width_float_decode)(byte_stream, pos);
115 }
#define SUFFIX(name)
DEVICE ALWAYS_INLINE float SUFFIX() fixed_width_float_decode(const int8_t *byte_stream, const int64_t pos)
Definition: DecodersImpl.h:105
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ fixed_width_int_decode_noinline()

int64_t fixed_width_int_decode_noinline ( const int8_t *  byte_stream,
const int32_t  byte_width,
const int64_t  pos 
)

Definition at line 83 of file DecodersImpl.h.

References DEVICE, fixed_width_int_decode(), NEVER_INLINE, and SUFFIX.

Referenced by JoinColumnIterator::getElementSwitch(), and result_set::lazy_decode().

85  {
86  return SUFFIX(fixed_width_int_decode)(byte_stream, byte_width, pos);
87 }
DEVICE ALWAYS_INLINE int64_t SUFFIX() fixed_width_int_decode(const int8_t *byte_stream, const int32_t byte_width, const int64_t pos)
Definition: DecodersImpl.h:31
#define SUFFIX(name)
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ fixed_width_small_date_decode_noinline()

int64_t fixed_width_small_date_decode_noinline ( const int8_t *  byte_stream,
const int32_t  byte_width,
const int32_t  null_val,
const int64_t  ret_null_val,
const int64_t  pos 
)

Definition at line 141 of file DecodersImpl.h.

References fixed_width_small_date_decode(), and SUFFIX.

Referenced by JoinColumnIterator::getElementSwitch(), and result_set::lazy_decode().

145  {
147  byte_stream, byte_width, null_val, ret_null_val, pos);
148 }
#define SUFFIX(name)
DEVICE ALWAYS_INLINE int64_t SUFFIX() fixed_width_small_date_decode(const int8_t *byte_stream, const int32_t byte_width, const int32_t null_val, const int64_t ret_null_val, const int64_t pos)
Definition: DecodersImpl.h:131
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ fixed_width_unsigned_decode_noinline()

int64_t fixed_width_unsigned_decode_noinline ( const int8_t *  byte_stream,
const int32_t  byte_width,
const int64_t  pos 
)

Definition at line 90 of file DecodersImpl.h.

References ALWAYS_INLINE, DEVICE, fixed_width_unsigned_decode(), and SUFFIX.

Referenced by JoinColumnIterator::getElementSwitch(), and result_set::lazy_decode().

92  {
93  return SUFFIX(fixed_width_unsigned_decode)(byte_stream, byte_width, pos);
94 }
#define SUFFIX(name)
DEVICE ALWAYS_INLINE int64_t SUFFIX() fixed_width_unsigned_decode(const int8_t *byte_stream, const int32_t byte_width, const int64_t pos)
Definition: DecodersImpl.h:57
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ get_bucketized_hash_slot()

int32_t* get_bucketized_hash_slot ( int32_t *  buff,
const int64_t  key,
const int64_t  min_key,
const int64_t  bucket_normalization = 1 
)

Definition at line 31 of file JoinHashImpl.h.

Referenced by bucketized_hash_join_idx(), count_matches_bucketized(), fill_hash_join_buff_bucketized(), and fill_row_ids_bucketized().

35  {
36  return buff + (key - min_key) / bucket_normalization;
37 }
+ Here is the caller graph for this function:

◆ get_bucketized_hash_slot_sharded()

int32_t* get_bucketized_hash_slot_sharded ( int32_t *  buff,
const int64_t  key,
const int64_t  min_key,
const uint32_t  entry_count_per_shard,
const uint32_t  num_shards,
const uint32_t  device_count,
const int64_t  bucket_normalization 
)

Definition at line 45 of file JoinHashImpl.h.

References SHARD_FOR_KEY.

Referenced by fill_row_ids_sharded_bucketized().

52  {
53  const uint32_t shard = SHARD_FOR_KEY(key, num_shards);
54  const uint32_t shard_buffer_index =
55  shard / device_count; // shard sub-buffer index within `buff`
56  int32_t* shard_buffer = buff + shard_buffer_index * entry_count_per_shard;
57  return shard_buffer + (key - min_key) / bucket_normalization / num_shards;
58 }
#define SHARD_FOR_KEY(key, num_shards)
Definition: shard_key.h:20
+ Here is the caller graph for this function:

◆ get_bucketized_hash_slot_sharded_opt()

int32_t* get_bucketized_hash_slot_sharded_opt ( int32_t *  buff,
const int64_t  key,
const int64_t  min_key,
const uint32_t  entry_count_per_shard,
const uint32_t  shard,
const uint32_t  num_shards,
const uint32_t  device_count,
const int64_t  bucket_normalization 
)

Definition at line 74 of file JoinHashImpl.h.

Referenced by fill_hash_join_buff_sharded_bucketized().

82  {
83  const uint32_t shard_buffer_index =
84  shard / device_count; // shard sub-buffer index within `buff`
85  int32_t* shard_buffer = buff + shard_buffer_index * entry_count_per_shard;
86  return shard_buffer + (key - min_key) / bucket_normalization / num_shards;
87 }
+ Here is the caller graph for this function:

◆ get_columnar_group_bin_offset()

uint32_t get_columnar_group_bin_offset ( int64_t *  key_base_ptr,
const int64_t  key,
const int64_t  min_key,
const int64_t  bucket 
)

Definition at line 229 of file GroupByRuntime.cpp.

References EMPTY_KEY_64.

232  {
233  int64_t off = key - min_key;
234  if (bucket) {
235  off /= bucket;
236  }
237  if (key_base_ptr[off] == EMPTY_KEY_64) {
238  key_base_ptr[off] = key;
239  }
240  return off;
241 }
#define EMPTY_KEY_64

◆ get_empty_key() [1/2]

template<typename T = int64_t>
T get_empty_key ( )
inline

Definition at line 259 of file RuntimeFunctions.h.

References EMPTY_KEY_64.

259  {
260  static_assert(std::is_same<T, int64_t>::value,
261  "Unsupported template parameter other than int64_t for now");
262  return EMPTY_KEY_64;
263 }
#define EMPTY_KEY_64

◆ get_empty_key() [2/2]

template<>
int32_t get_empty_key ( )
inline

Definition at line 266 of file RuntimeFunctions.h.

References EMPTY_KEY_32.

266  {
267  return EMPTY_KEY_32;
268 }
#define EMPTY_KEY_32

◆ get_group_value()

int64_t* get_group_value ( int64_t *  groups_buffer,
const uint32_t  groups_buffer_entry_count,
const int64_t *  key,
const uint32_t  key_count,
const uint32_t  key_width,
const uint32_t  row_size_quad 
)

Definition at line 26 of file GroupByRuntime.cpp.

References DEVICE, dynamic_watchdog(), get_matching_group_value(), key_hash(), and NEVER_INLINE.

Referenced by ResultSetStorage::moveOneEntryToBuffer().

32  {
33  uint32_t h = key_hash(key, key_count, key_width) % groups_buffer_entry_count;
34  int64_t* matching_group = get_matching_group_value(
35  groups_buffer, h, key, key_count, key_width, row_size_quad);
36  if (matching_group) {
37  return matching_group;
38  }
39  uint32_t h_probe = (h + 1) % groups_buffer_entry_count;
40  while (h_probe != h) {
41  matching_group = get_matching_group_value(
42  groups_buffer, h_probe, key, key_count, key_width, row_size_quad);
43  if (matching_group) {
44  return matching_group;
45  }
46  h_probe = (h_probe + 1) % groups_buffer_entry_count;
47  }
48  return NULL;
49 }
ALWAYS_INLINE DEVICE uint32_t key_hash(const int64_t *key, const uint32_t key_count, const uint32_t key_byte_width)
ALWAYS_INLINE int64_t * get_matching_group_value(int64_t *groups_buffer, const uint32_t h, const T *key, const uint32_t key_count, const uint32_t row_size_quad)
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ get_group_value_columnar()

int64_t* get_group_value_columnar ( int64_t *  groups_buffer,
const uint32_t  groups_buffer_entry_count,
const int64_t *  key,
const uint32_t  key_qw_count 
)

Definition at line 140 of file GroupByRuntime.cpp.

References get_matching_group_value_columnar(), and key_hash().

Referenced by ResultSetStorage::moveOneEntryToBuffer().

144  {
145  uint32_t h = key_hash(key, key_qw_count, sizeof(int64_t)) % groups_buffer_entry_count;
146  int64_t* matching_group = get_matching_group_value_columnar(
147  groups_buffer, h, key, key_qw_count, groups_buffer_entry_count);
148  if (matching_group) {
149  return matching_group;
150  }
151  uint32_t h_probe = (h + 1) % groups_buffer_entry_count;
152  while (h_probe != h) {
153  matching_group = get_matching_group_value_columnar(
154  groups_buffer, h_probe, key, key_qw_count, groups_buffer_entry_count);
155  if (matching_group) {
156  return matching_group;
157  }
158  h_probe = (h_probe + 1) % groups_buffer_entry_count;
159  }
160  return NULL;
161 }
ALWAYS_INLINE DEVICE uint32_t key_hash(const int64_t *key, const uint32_t key_count, const uint32_t key_byte_width)
ALWAYS_INLINE int64_t * get_matching_group_value_columnar(int64_t *groups_buffer, const uint32_t h, const int64_t *key, const uint32_t key_qw_count, const size_t entry_count)
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ get_group_value_columnar_with_watchdog()

int64_t* get_group_value_columnar_with_watchdog ( int64_t *  groups_buffer,
const uint32_t  groups_buffer_entry_count,
const int64_t *  key,
const uint32_t  key_qw_count 
)

Definition at line 163 of file GroupByRuntime.cpp.

References dynamic_watchdog(), get_matching_group_value_columnar(), and key_hash().

167  {
168  uint32_t h = key_hash(key, key_qw_count, sizeof(int64_t)) % groups_buffer_entry_count;
169  int64_t* matching_group = get_matching_group_value_columnar(
170  groups_buffer, h, key, key_qw_count, groups_buffer_entry_count);
171  if (matching_group) {
172  return matching_group;
173  }
174  uint32_t watchdog_countdown = 100;
175  uint32_t h_probe = (h + 1) % groups_buffer_entry_count;
176  while (h_probe != h) {
177  matching_group = get_matching_group_value_columnar(
178  groups_buffer, h_probe, key, key_qw_count, groups_buffer_entry_count);
179  if (matching_group) {
180  return matching_group;
181  }
182  h_probe = (h_probe + 1) % groups_buffer_entry_count;
183  if (--watchdog_countdown == 0) {
184  if (dynamic_watchdog()) {
185  return NULL;
186  }
187  watchdog_countdown = 100;
188  }
189  }
190  return NULL;
191 }
ALWAYS_INLINE DEVICE uint32_t key_hash(const int64_t *key, const uint32_t key_count, const uint32_t key_byte_width)
NEVER_INLINE DEVICE bool dynamic_watchdog()
ALWAYS_INLINE int64_t * get_matching_group_value_columnar(int64_t *groups_buffer, const uint32_t h, const int64_t *key, const uint32_t key_qw_count, const size_t entry_count)
+ Here is the call graph for this function:

◆ get_group_value_fast()

int64_t* get_group_value_fast ( int64_t *  groups_buffer,
const int64_t  key,
const int64_t  min_key,
const int64_t  bucket,
const uint32_t  row_size_quad 
)

Definition at line 193 of file GroupByRuntime.cpp.

References EMPTY_KEY_64.

198  {
199  int64_t key_diff = key - min_key;
200  if (bucket) {
201  key_diff /= bucket;
202  }
203  int64_t off = key_diff * row_size_quad;
204  if (groups_buffer[off] == EMPTY_KEY_64) {
205  groups_buffer[off] = key;
206  }
207  return groups_buffer + off + 1;
208 }
#define EMPTY_KEY_64

◆ get_group_value_fast_with_original_key()

int64_t* get_group_value_fast_with_original_key ( int64_t *  groups_buffer,
const int64_t  key,
const int64_t  orig_key,
const int64_t  min_key,
const int64_t  bucket,
const uint32_t  row_size_quad 
)

Definition at line 210 of file GroupByRuntime.cpp.

References ALWAYS_INLINE, DEVICE, and EMPTY_KEY_64.

216  {
217  int64_t key_diff = key - min_key;
218  if (bucket) {
219  key_diff /= bucket;
220  }
221  int64_t off = key_diff * row_size_quad;
222  if (groups_buffer[off] == EMPTY_KEY_64) {
223  groups_buffer[off] = orig_key;
224  }
225  return groups_buffer + off + 1;
226 }
#define EMPTY_KEY_64

◆ get_group_value_with_watchdog()

int64_t* get_group_value_with_watchdog ( int64_t *  groups_buffer,
const uint32_t  groups_buffer_entry_count,
const int64_t *  key,
const uint32_t  key_count,
const uint32_t  key_width,
const uint32_t  row_size_quad 
)

Definition at line 53 of file GroupByRuntime.cpp.

References DEVICE, dynamic_watchdog(), get_matching_group_value(), key_hash(), and NEVER_INLINE.

59  {
60  uint32_t h = key_hash(key, key_count, key_width) % groups_buffer_entry_count;
61  int64_t* matching_group = get_matching_group_value(
62  groups_buffer, h, key, key_count, key_width, row_size_quad);
63  if (matching_group) {
64  return matching_group;
65  }
66  uint32_t watchdog_countdown = 100;
67  uint32_t h_probe = (h + 1) % groups_buffer_entry_count;
68  while (h_probe != h) {
69  matching_group = get_matching_group_value(
70  groups_buffer, h_probe, key, key_count, key_width, row_size_quad);
71  if (matching_group) {
72  return matching_group;
73  }
74  h_probe = (h_probe + 1) % groups_buffer_entry_count;
75  if (--watchdog_countdown == 0) {
76  if (dynamic_watchdog()) {
77  return NULL;
78  }
79  watchdog_countdown = 100;
80  }
81  }
82  return NULL;
83 }
ALWAYS_INLINE DEVICE uint32_t key_hash(const int64_t *key, const uint32_t key_count, const uint32_t key_byte_width)
ALWAYS_INLINE int64_t * get_matching_group_value(int64_t *groups_buffer, const uint32_t h, const T *key, const uint32_t key_count, const uint32_t row_size_quad)
NEVER_INLINE DEVICE bool dynamic_watchdog()
+ Here is the call graph for this function:

◆ get_hash_slot()

int32_t* get_hash_slot ( int32_t *  buff,
const int64_t  key,
const int64_t  min_key 
)

Definition at line 39 of file JoinHashImpl.h.

Referenced by count_matches(), fill_hash_join_buff(), fill_row_ids(), and hash_join_idx().

41  {
42  return buff + (key - min_key);
43 }
+ Here is the caller graph for this function:

◆ get_hash_slot_sharded()

int32_t* get_hash_slot_sharded ( int32_t *  buff,
const int64_t  key,
const int64_t  min_key,
const uint32_t  entry_count_per_shard,
const uint32_t  num_shards,
const uint32_t  device_count 
)

Definition at line 60 of file JoinHashImpl.h.

References SHARD_FOR_KEY.

Referenced by count_matches_sharded(), fill_row_ids_sharded(), and hash_join_idx_sharded().

66  {
67  const uint32_t shard = SHARD_FOR_KEY(key, num_shards);
68  const uint32_t shard_buffer_index =
69  shard / device_count; // shard sub-buffer index within `buff`
70  int32_t* shard_buffer = buff + shard_buffer_index * entry_count_per_shard;
71  return shard_buffer + (key - min_key) / num_shards;
72 }
#define SHARD_FOR_KEY(key, num_shards)
Definition: shard_key.h:20
+ Here is the caller graph for this function:

◆ get_hash_slot_sharded_opt()

int32_t* get_hash_slot_sharded_opt ( int32_t *  buff,
const int64_t  key,
const int64_t  min_key,
const uint32_t  entry_count_per_shard,
const uint32_t  shard,
const uint32_t  num_shards,
const uint32_t  device_count 
)

Definition at line 89 of file JoinHashImpl.h.

Referenced by fill_hash_join_buff_sharded().

96  {
97  const uint32_t shard_buffer_index =
98  shard / device_count; // shard sub-buffer index within `buff`
99  int32_t* shard_buffer = buff + shard_buffer_index * entry_count_per_shard;
100  return shard_buffer + (key - min_key) / num_shards;
101 }
+ Here is the caller graph for this function:

◆ get_matching_group_value_perfect_hash()

int64_t* get_matching_group_value_perfect_hash ( int64_t *  groups_buffer,
const uint32_t  h,
const int64_t *  key,
const uint32_t  key_qw_count,
const uint32_t  row_size_quad 
)

Definition at line 1153 of file RuntimeFunctions.cpp.

References EMPTY_KEY_64.

1158  {
1159  uint32_t off = hashed_index * row_size_quad;
1160  if (groups_buffer[off] == EMPTY_KEY_64) {
1161  for (uint32_t i = 0; i < key_count; ++i) {
1162  groups_buffer[off + i] = key[i];
1163  }
1164  }
1165  return groups_buffer + off + key_count;
1166 }
#define EMPTY_KEY_64

◆ get_matching_group_value_perfect_hash_keyless()

int64_t* get_matching_group_value_perfect_hash_keyless ( int64_t *  groups_buffer,
const uint32_t  hashed_index,
const uint32_t  row_size_quad 
)

For a particular hashed index (only used with multi-column perfect hash group by) it returns the row-wise offset of the group in the output buffer. Since it is intended for keyless hash use, it assumes there is no group columns prepending the output buffer.

Definition at line 1174 of file RuntimeFunctions.cpp.

1177  {
1178  return groups_buffer + row_size_quad * hashed_index;
1179 }

◆ key_hash()

uint32_t key_hash ( const int64_t *  key,
const uint32_t  key_qw_count,
const uint32_t  key_byte_width 
)

Definition at line 20 of file GroupByRuntime.cpp.

References MurmurHash1().

Referenced by get_group_value(), get_group_value_columnar(), anonymous_namespace{ResultSetReduction.cpp}::get_group_value_columnar_reduction(), get_group_value_columnar_slot(), get_group_value_columnar_slot_with_watchdog(), get_group_value_columnar_with_watchdog(), result_set::get_group_value_reduction(), and get_group_value_with_watchdog().

22  {
23  return MurmurHash1(key, key_byte_width * key_count, 0);
24 }
NEVER_INLINE DEVICE uint32_t MurmurHash1(const void *key, int len, const uint32_t seed)
Definition: MurmurHash.cpp:20
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ linear_probabilistic_count()

void linear_probabilistic_count ( uint8_t *  bitmap,
const uint32_t  bitmap_bytes,
const uint8_t *  key_bytes,
const uint32_t  key_len 
)

Definition at line 1319 of file RuntimeFunctions.cpp.

References MurmurHash1().

1322  {
1323  const uint32_t bit_pos = MurmurHash1(key_bytes, key_len, 0) % (bitmap_bytes * 8);
1324  const uint32_t word_idx = bit_pos / 32;
1325  const uint32_t bit_idx = bit_pos % 32;
1326  reinterpret_cast<uint32_t*>(bitmap)[word_idx] |= 1 << bit_idx;
1327 }
NEVER_INLINE DEVICE uint32_t MurmurHash1(const void *key, int len, const uint32_t seed)
Definition: MurmurHash.cpp:20
+ Here is the call graph for this function: