OmniSciDB  1dac507f6e
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
RuntimeFunctions.h File Reference
#include <cassert>
#include <cstdint>
#include <ctime>
#include <limits>
#include <type_traits>
+ Include dependency graph for RuntimeFunctions.h:
+ This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Macros

#define EMPTY_KEY_64   std::numeric_limits<int64_t>::max()
 
#define EMPTY_KEY_32   std::numeric_limits<int32_t>::max()
 
#define EMPTY_KEY_16   std::numeric_limits<int16_t>::max()
 
#define EMPTY_KEY_8   std::numeric_limits<int8_t>::max()
 

Functions

int64_t agg_sum (int64_t *agg, const int64_t val)
 
void agg_max (int64_t *agg, const int64_t val)
 
void agg_min (int64_t *agg, const int64_t val)
 
void agg_sum_double (int64_t *agg, const double val)
 
void agg_max_double (int64_t *agg, const double val)
 
void agg_min_double (int64_t *agg, const double val)
 
int32_t agg_sum_int32_skip_val (int32_t *agg, const int32_t val, const int32_t skip_val)
 
int64_t agg_sum_skip_val (int64_t *agg, const int64_t val, const int64_t skip_val)
 
void agg_max_skip_val (int64_t *agg, const int64_t val, const int64_t skip_val)
 
void agg_min_skip_val (int64_t *agg, const int64_t val, const int64_t skip_val)
 
void agg_sum_float_skip_val (int32_t *agg, const float val, const float skip_val)
 
void agg_sum_double_skip_val (int64_t *agg, const double val, const double skip_val)
 
void agg_max_double_skip_val (int64_t *agg, const double val, const double skip_val)
 
void agg_min_double_skip_val (int64_t *agg, const double val, const double skip_val)
 
int32_t agg_sum_int32 (int32_t *agg, const int32_t val)
 
void agg_max_int32 (int32_t *agg, const int32_t val)
 
void agg_max_int16 (int16_t *agg, const int16_t val)
 
void agg_max_int8 (int8_t *agg, const int8_t val)
 
void agg_min_int32 (int32_t *agg, const int32_t val)
 
void agg_min_int16 (int16_t *agg, const int16_t val)
 
void agg_min_int8 (int8_t *agg, const int8_t val)
 
void agg_sum_float (int32_t *agg, const float val)
 
void agg_max_float (int32_t *agg, const float val)
 
void agg_min_float (int32_t *agg, const float val)
 
void agg_max_int32_skip_val (int32_t *agg, const int32_t val, const int32_t skip_val)
 
void agg_max_int16_skip_val (int16_t *agg, const int16_t val, const int16_t skip_val)
 
void agg_max_int8_skip_val (int8_t *agg, const int8_t val, const int8_t skip_val)
 
void agg_min_int32_skip_val (int32_t *agg, const int32_t val, const int32_t skip_val)
 
void agg_min_int16_skip_val (int16_t *agg, const int16_t val, const int16_t skip_val)
 
void agg_min_int8_skip_val (int8_t *agg, const int8_t val, const int8_t skip_val)
 
void agg_max_float_skip_val (int32_t *agg, const float val, const float skip_val)
 
void agg_min_float_skip_val (int32_t *agg, const float val, const float skip_val)
 
void agg_count_distinct_bitmap (int64_t *agg, const int64_t val, const int64_t min_val)
 
uint32_t key_hash (const int64_t *key, const uint32_t key_qw_count, const uint32_t key_byte_width)
 
int64_t * get_group_value (int64_t *groups_buffer, const uint32_t groups_buffer_entry_count, const int64_t *key, const uint32_t key_count, const uint32_t key_width, const uint32_t row_size_quad, const int64_t *init_val=nullptr)
 
int64_t * get_group_value_with_watchdog (int64_t *groups_buffer, const uint32_t groups_buffer_entry_count, const int64_t *key, const uint32_t key_count, const uint32_t key_width, const uint32_t row_size_quad, const int64_t *init_val=nullptr)
 
int64_t * get_group_value_columnar (int64_t *groups_buffer, const uint32_t groups_buffer_entry_count, const int64_t *key, const uint32_t key_qw_count)
 
int64_t * get_group_value_columnar_with_watchdog (int64_t *groups_buffer, const uint32_t groups_buffer_entry_count, const int64_t *key, const uint32_t key_qw_count)
 
int64_t * get_group_value_fast (int64_t *groups_buffer, const int64_t key, const int64_t min_key, const int64_t bucket, const uint32_t row_size_quad)
 
int64_t * get_group_value_fast_with_original_key (int64_t *groups_buffer, const int64_t key, const int64_t orig_key, const int64_t min_key, const int64_t bucket, const uint32_t row_size_quad)
 
uint32_t get_columnar_group_bin_offset (int64_t *key_base_ptr, const int64_t key, const int64_t min_key, const int64_t bucket)
 
int64_t * get_matching_group_value_perfect_hash (int64_t *groups_buffer, const uint32_t h, const int64_t *key, const uint32_t key_qw_count, const uint32_t row_size_quad)
 
int32_t * get_bucketized_hash_slot (int32_t *buff, const int64_t key, const int64_t min_key, const int64_t bucket_normalization=1)
 
int32_t * get_hash_slot (int32_t *buff, const int64_t key, const int64_t min_key)
 
int32_t * get_hash_slot_sharded (int32_t *buff, const int64_t key, const int64_t min_key, const uint32_t entry_count_per_shard, const uint32_t num_shards, const uint32_t device_count)
 
int32_t * get_bucketized_hash_slot_sharded (int32_t *buff, const int64_t key, const int64_t min_key, const uint32_t entry_count_per_shard, const uint32_t num_shards, const uint32_t device_count, const int64_t bucket_normalization)
 
void linear_probabilistic_count (uint8_t *bitmap, const uint32_t bitmap_bytes, const uint8_t *key_bytes, const uint32_t key_len)
 
int64_t fixed_width_int_decode_noinline (const int8_t *byte_stream, const int32_t byte_width, const int64_t pos)
 
int64_t fixed_width_unsigned_decode_noinline (const int8_t *byte_stream, const int32_t byte_width, const int64_t pos)
 
float fixed_width_float_decode_noinline (const int8_t *byte_stream, const int64_t pos)
 
double fixed_width_double_decode_noinline (const int8_t *byte_stream, const int64_t pos)
 
int64_t fixed_width_small_date_decode_noinline (const int8_t *byte_stream, const int32_t byte_width, const int32_t null_val, const int64_t ret_null_val, const int64_t pos)
 
int8_t * extract_str_ptr_noinline (const uint64_t str_and_len)
 
int32_t extract_str_len_noinline (const uint64_t str_and_len)
 
template<typename T = int64_t>
get_empty_key ()
 
template<>
int32_t get_empty_key ()
 

Macro Definition Documentation

#define EMPTY_KEY_16   std::numeric_limits<int16_t>::max()

Definition at line 116 of file RuntimeFunctions.h.

#define EMPTY_KEY_32   std::numeric_limits<int32_t>::max()

Definition at line 115 of file RuntimeFunctions.h.

Referenced by get_empty_key().

#define EMPTY_KEY_64   std::numeric_limits<int64_t>::max()

Definition at line 114 of file RuntimeFunctions.h.

Referenced by get_empty_key().

#define EMPTY_KEY_8   std::numeric_limits<int8_t>::max()

Definition at line 117 of file RuntimeFunctions.h.

Function Documentation

void agg_count_distinct_bitmap ( int64_t *  agg,
const int64_t  val,
const int64_t  min_val 
)

Definition at line 283 of file RuntimeFunctions.cpp.

Referenced by agg_count_distinct_bitmap_skip_val(), WindowFunctionContext::fillPartitionEnd(), WindowFunctionContext::fillPartitionStart(), anonymous_namespace{WindowContext.cpp}::index_to_partition_end(), and InValuesBitmap::InValuesBitmap().

285  {
286  const uint64_t bitmap_idx = val - min_val;
287  reinterpret_cast<int8_t*>(*agg)[bitmap_idx >> 3] |= (1 << (bitmap_idx & 7));
288 }

+ Here is the caller graph for this function:

void agg_max ( int64_t *  agg,
const int64_t  val 
)

Definition at line 344 of file RuntimeFunctions.cpp.

344  {
345  *agg = std::max(*agg, val);
346 }
void agg_max_double ( int64_t *  agg,
const double  val 
)

Definition at line 527 of file RuntimeFunctions.cpp.

527  {
528  const auto r = std::max(*reinterpret_cast<const double*>(agg), val);
529  *agg = *(reinterpret_cast<const int64_t*>(may_alias_ptr(&r)));
530 }
void agg_max_double_skip_val ( int64_t *  agg,
const double  val,
const double  skip_val 
)

Referenced by Executor::reduceResults().

+ Here is the caller graph for this function:

void agg_max_float ( int32_t *  agg,
const float  val 
)

Definition at line 550 of file RuntimeFunctions.cpp.

550  {
551  const auto r = std::max(*reinterpret_cast<const float*>(agg), val);
552  *agg = *(reinterpret_cast<const int32_t*>(may_alias_ptr(&r)));
553 }
void agg_max_float_skip_val ( int32_t *  agg,
const float  val,
const float  skip_val 
)

Referenced by Executor::reduceResults().

+ Here is the caller graph for this function:

void agg_max_int16 ( int16_t *  agg,
const int16_t  val 
)
void agg_max_int16_skip_val ( int16_t *  agg,
const int16_t  val,
const int16_t  skip_val 
)
void agg_max_int32 ( int32_t *  agg,
const int32_t  val 
)
void agg_max_int32_skip_val ( int32_t *  agg,
const int32_t  val,
const int32_t  skip_val 
)
void agg_max_int8 ( int8_t *  agg,
const int8_t  val 
)
void agg_max_int8_skip_val ( int8_t *  agg,
const int8_t  val,
const int8_t  skip_val 
)
void agg_max_skip_val ( int64_t *  agg,
const int64_t  val,
const int64_t  skip_val 
)

Referenced by Executor::reduceResults().

+ Here is the caller graph for this function:

void agg_min ( int64_t *  agg,
const int64_t  val 
)

Definition at line 348 of file RuntimeFunctions.cpp.

348  {
349  *agg = std::min(*agg, val);
350 }
void agg_min_double ( int64_t *  agg,
const double  val 
)

Definition at line 532 of file RuntimeFunctions.cpp.

532  {
533  const auto r = std::min(*reinterpret_cast<const double*>(agg), val);
534  *agg = *(reinterpret_cast<const int64_t*>(may_alias_ptr(&r)));
535 }
void agg_min_double_skip_val ( int64_t *  agg,
const double  val,
const double  skip_val 
)

Referenced by Executor::reduceResults().

+ Here is the caller graph for this function:

void agg_min_float ( int32_t *  agg,
const float  val 
)

Definition at line 555 of file RuntimeFunctions.cpp.

555  {
556  const auto r = std::min(*reinterpret_cast<const float*>(agg), val);
557  *agg = *(reinterpret_cast<const int32_t*>(may_alias_ptr(&r)));
558 }
void agg_min_float_skip_val ( int32_t *  agg,
const float  val,
const float  skip_val 
)

Referenced by Executor::reduceResults().

+ Here is the caller graph for this function:

void agg_min_int16 ( int16_t *  agg,
const int16_t  val 
)
void agg_min_int16_skip_val ( int16_t *  agg,
const int16_t  val,
const int16_t  skip_val 
)
void agg_min_int32 ( int32_t *  agg,
const int32_t  val 
)
void agg_min_int32_skip_val ( int32_t *  agg,
const int32_t  val,
const int32_t  skip_val 
)
void agg_min_int8 ( int8_t *  agg,
const int8_t  val 
)
void agg_min_int8_skip_val ( int8_t *  agg,
const int8_t  val,
const int8_t  skip_val 
)
void agg_min_skip_val ( int64_t *  agg,
const int64_t  val,
const int64_t  skip_val 
)

Referenced by Executor::reduceResults().

+ Here is the caller graph for this function:

int64_t agg_sum ( int64_t *  agg,
const int64_t  val 
)

Definition at line 338 of file RuntimeFunctions.cpp.

Referenced by agg_sum_skip_val().

338  {
339  const auto old = *agg;
340  *agg += val;
341  return old;
342 }

+ Here is the caller graph for this function:

void agg_sum_double ( int64_t *  agg,
const double  val 
)

Definition at line 522 of file RuntimeFunctions.cpp.

522  {
523  const auto r = *reinterpret_cast<const double*>(agg) + val;
524  *agg = *reinterpret_cast<const int64_t*>(may_alias_ptr(&r));
525 }
void agg_sum_double_skip_val ( int64_t *  agg,
const double  val,
const double  skip_val 
)

Referenced by Executor::reduceResults().

+ Here is the caller graph for this function:

void agg_sum_float ( int32_t *  agg,
const float  val 
)

Definition at line 545 of file RuntimeFunctions.cpp.

545  {
546  const auto r = *reinterpret_cast<const float*>(agg) + val;
547  *agg = *reinterpret_cast<const int32_t*>(may_alias_ptr(&r));
548 }
void agg_sum_float_skip_val ( int32_t *  agg,
const float  val,
const float  skip_val 
)

Referenced by Executor::reduceResults().

+ Here is the caller graph for this function:

int32_t agg_sum_int32 ( int32_t *  agg,
const int32_t  val 
)

Definition at line 378 of file RuntimeFunctions.cpp.

Referenced by agg_sum_int32_skip_val().

378  {
379  const auto old = *agg;
380  *agg += val;
381  return old;
382 }

+ Here is the caller graph for this function:

int32_t agg_sum_int32_skip_val ( int32_t *  agg,
const int32_t  val,
const int32_t  skip_val 
)

Definition at line 440 of file RuntimeFunctions.cpp.

References agg_sum_int32().

442  {
443  const auto old = *agg;
444  if (val != skip_val) {
445  if (old != skip_val) {
446  return agg_sum_int32(agg, val);
447  } else {
448  *agg = val;
449  }
450  }
451  return old;
452 }
ALWAYS_INLINE int32_t agg_sum_int32(int32_t *agg, const int32_t val)

+ Here is the call graph for this function:

int64_t agg_sum_skip_val ( int64_t *  agg,
const int64_t  val,
const int64_t  skip_val 
)

Definition at line 426 of file RuntimeFunctions.cpp.

References agg_sum().

Referenced by Executor::reduceResults().

428  {
429  const auto old = *agg;
430  if (val != skip_val) {
431  if (old != skip_val) {
432  return agg_sum(agg, val);
433  } else {
434  *agg = val;
435  }
436  }
437  return old;
438 }
ALWAYS_INLINE int64_t agg_sum(int64_t *agg, const int64_t val)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

int32_t extract_str_len_noinline ( const uint64_t  str_and_len)

Referenced by string_compress().

+ Here is the caller graph for this function:

int8_t* extract_str_ptr_noinline ( const uint64_t  str_and_len)

Referenced by string_compress().

+ Here is the caller graph for this function:

double fixed_width_double_decode_noinline ( const int8_t *  byte_stream,
const int64_t  pos 
)

Definition at line 126 of file DecodersImpl.h.

References fixed_width_double_decode(), and SUFFIX.

Referenced by compute_bucket_sizes_impl(), lazy_decode(), and OverlapsKeyHandler::operator()().

126  {
127  return SUFFIX(fixed_width_double_decode)(byte_stream, pos);
128 }
#define SUFFIX(name)
DEVICE ALWAYS_INLINE double SUFFIX() fixed_width_double_decode(const int8_t *byte_stream, const int64_t pos)
Definition: DecodersImpl.h:118

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

float fixed_width_float_decode_noinline ( const int8_t *  byte_stream,
const int64_t  pos 
)

Definition at line 113 of file DecodersImpl.h.

References fixed_width_float_decode(), and SUFFIX.

Referenced by lazy_decode().

113  {
114  return SUFFIX(fixed_width_float_decode)(byte_stream, pos);
115 }
#define SUFFIX(name)
DEVICE ALWAYS_INLINE float SUFFIX() fixed_width_float_decode(const int8_t *byte_stream, const int64_t pos)
Definition: DecodersImpl.h:105

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

int64_t fixed_width_int_decode_noinline ( const int8_t *  byte_stream,
const int32_t  byte_width,
const int64_t  pos 
)

Definition at line 83 of file DecodersImpl.h.

References fixed_width_int_decode(), and SUFFIX.

Referenced by get_join_column_element_value(), and lazy_decode().

85  {
86  return SUFFIX(fixed_width_int_decode)(byte_stream, byte_width, pos);
87 }
DEVICE ALWAYS_INLINE int64_t SUFFIX() fixed_width_int_decode(const int8_t *byte_stream, const int32_t byte_width, const int64_t pos)
Definition: DecodersImpl.h:31
#define SUFFIX(name)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

int64_t fixed_width_small_date_decode_noinline ( const int8_t *  byte_stream,
const int32_t  byte_width,
const int32_t  null_val,
const int64_t  ret_null_val,
const int64_t  pos 
)

Definition at line 141 of file DecodersImpl.h.

References fixed_width_small_date_decode(), and SUFFIX.

Referenced by get_join_column_element_value(), and lazy_decode().

145  {
147  byte_stream, byte_width, null_val, ret_null_val, pos);
148 }
#define SUFFIX(name)
DEVICE ALWAYS_INLINE int64_t SUFFIX() fixed_width_small_date_decode(const int8_t *byte_stream, const int32_t byte_width, const int32_t null_val, const int64_t ret_null_val, const int64_t pos)
Definition: DecodersImpl.h:131

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

int64_t fixed_width_unsigned_decode_noinline ( const int8_t *  byte_stream,
const int32_t  byte_width,
const int64_t  pos 
)

Definition at line 90 of file DecodersImpl.h.

References fixed_width_unsigned_decode(), and SUFFIX.

Referenced by get_join_column_element_value(), and lazy_decode().

92  {
93  return SUFFIX(fixed_width_unsigned_decode)(byte_stream, byte_width, pos);
94 }
#define SUFFIX(name)
DEVICE ALWAYS_INLINE int64_t SUFFIX() fixed_width_unsigned_decode(const int8_t *byte_stream, const int32_t byte_width, const int64_t pos)
Definition: DecodersImpl.h:57

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

int32_t* get_bucketized_hash_slot ( int32_t *  buff,
const int64_t  key,
const int64_t  min_key,
const int64_t  bucket_normalization = 1 
)

Definition at line 31 of file JoinHashImpl.h.

Referenced by bucketized_hash_join_idx(), count_matches_bucketized(), fill_hash_join_buff_bucketized(), and fill_row_ids_bucketized().

35  {
36  return buff + (key - min_key) / bucket_normalization;
37 }

+ Here is the caller graph for this function:

int32_t* get_bucketized_hash_slot_sharded ( int32_t *  buff,
const int64_t  key,
const int64_t  min_key,
const uint32_t  entry_count_per_shard,
const uint32_t  num_shards,
const uint32_t  device_count,
const int64_t  bucket_normalization 
)

Definition at line 45 of file JoinHashImpl.h.

References SHARD_FOR_KEY.

Referenced by fill_hash_join_buff_sharded_bucketized(), and fill_row_ids_sharded_bucketized().

52  {
53  const uint32_t shard = SHARD_FOR_KEY(key, num_shards);
54  const uint32_t shard_buffer_index =
55  shard / device_count; // shard sub-buffer index within `buff`
56  int32_t* shard_buffer = buff + shard_buffer_index * entry_count_per_shard;
57  return shard_buffer + (key - min_key) / bucket_normalization / num_shards;
58 }
#define SHARD_FOR_KEY(key, num_shards)
Definition: shard_key.h:20

+ Here is the caller graph for this function:

uint32_t get_columnar_group_bin_offset ( int64_t *  key_base_ptr,
const int64_t  key,
const int64_t  min_key,
const int64_t  bucket 
)

Definition at line 231 of file GroupByRuntime.cpp.

References EMPTY_KEY_64.

234  {
235  int64_t off = key - min_key;
236  if (bucket) {
237  off /= bucket;
238  }
239  if (key_base_ptr[off] == EMPTY_KEY_64) {
240  key_base_ptr[off] = key;
241  }
242  return off;
243 }
#define EMPTY_KEY_64
template<typename T = int64_t>
T get_empty_key ( )
inline

Definition at line 232 of file RuntimeFunctions.h.

References EMPTY_KEY_64.

232  {
233  static_assert(std::is_same<T, int64_t>::value,
234  "Unsupported template parameter other than int64_t for now");
235  return EMPTY_KEY_64;
236 }
#define EMPTY_KEY_64
template<>
int32_t get_empty_key ( )
inline

Definition at line 239 of file RuntimeFunctions.h.

References EMPTY_KEY_32.

239  {
240  return EMPTY_KEY_32;
241 }
#define EMPTY_KEY_32
int64_t* get_group_value ( int64_t *  groups_buffer,
const uint32_t  groups_buffer_entry_count,
const int64_t *  key,
const uint32_t  key_count,
const uint32_t  key_width,
const uint32_t  row_size_quad,
const int64_t *  init_val = nullptr 
)

Definition at line 26 of file GroupByRuntime.cpp.

References get_matching_group_value(), groups_buffer_entry_count, and key_hash().

Referenced by ResultSetStorage::moveOneEntryToBuffer().

33  {
34  uint32_t h = key_hash(key, key_count, key_width) % groups_buffer_entry_count;
35  int64_t* matching_group = get_matching_group_value(
36  groups_buffer, h, key, key_count, key_width, row_size_quad, init_vals);
37  if (matching_group) {
38  return matching_group;
39  }
40  uint32_t h_probe = (h + 1) % groups_buffer_entry_count;
41  while (h_probe != h) {
42  matching_group = get_matching_group_value(
43  groups_buffer, h_probe, key, key_count, key_width, row_size_quad, init_vals);
44  if (matching_group) {
45  return matching_group;
46  }
47  h_probe = (h_probe + 1) % groups_buffer_entry_count;
48  }
49  return NULL;
50 }
const int32_t groups_buffer_size return groups_buffer
ALWAYS_INLINE DEVICE uint32_t key_hash(const int64_t *key, const uint32_t key_count, const uint32_t key_byte_width)
__device__ int64_t * get_matching_group_value(int64_t *groups_buffer, const uint32_t h, const T *key, const uint32_t key_count, const uint32_t row_size_quad)
const int64_t const uint32_t groups_buffer_entry_count
const int64_t * init_vals

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

int64_t* get_group_value_columnar ( int64_t *  groups_buffer,
const uint32_t  groups_buffer_entry_count,
const int64_t *  key,
const uint32_t  key_qw_count 
)

Definition at line 142 of file GroupByRuntime.cpp.

References get_matching_group_value_columnar(), groups_buffer_entry_count, and key_hash().

Referenced by ResultSetStorage::moveOneEntryToBuffer().

146  {
147  uint32_t h = key_hash(key, key_qw_count, sizeof(int64_t)) % groups_buffer_entry_count;
148  int64_t* matching_group = get_matching_group_value_columnar(
150  if (matching_group) {
151  return matching_group;
152  }
153  uint32_t h_probe = (h + 1) % groups_buffer_entry_count;
154  while (h_probe != h) {
155  matching_group = get_matching_group_value_columnar(
157  if (matching_group) {
158  return matching_group;
159  }
160  h_probe = (h_probe + 1) % groups_buffer_entry_count;
161  }
162  return NULL;
163 }
const int32_t groups_buffer_size return groups_buffer
__device__ int64_t * get_matching_group_value_columnar(int64_t *groups_buffer, const uint32_t h, const int64_t *key, const uint32_t key_qw_count, const size_t entry_count)
ALWAYS_INLINE DEVICE uint32_t key_hash(const int64_t *key, const uint32_t key_count, const uint32_t key_byte_width)
const int64_t const uint32_t groups_buffer_entry_count
const int64_t const uint32_t const uint32_t key_qw_count

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

int64_t* get_group_value_columnar_with_watchdog ( int64_t *  groups_buffer,
const uint32_t  groups_buffer_entry_count,
const int64_t *  key,
const uint32_t  key_qw_count 
)

Definition at line 165 of file GroupByRuntime.cpp.

References dynamic_watchdog(), get_matching_group_value_columnar(), groups_buffer_entry_count, and key_hash().

169  {
170  uint32_t h = key_hash(key, key_qw_count, sizeof(int64_t)) % groups_buffer_entry_count;
171  int64_t* matching_group = get_matching_group_value_columnar(
173  if (matching_group) {
174  return matching_group;
175  }
176  uint32_t watchdog_countdown = 100;
177  uint32_t h_probe = (h + 1) % groups_buffer_entry_count;
178  while (h_probe != h) {
179  matching_group = get_matching_group_value_columnar(
181  if (matching_group) {
182  return matching_group;
183  }
184  h_probe = (h_probe + 1) % groups_buffer_entry_count;
185  if (--watchdog_countdown == 0) {
186  if (dynamic_watchdog()) {
187  return NULL;
188  }
189  watchdog_countdown = 100;
190  }
191  }
192  return NULL;
193 }
const int32_t groups_buffer_size return groups_buffer
__device__ bool dynamic_watchdog()
__device__ int64_t * get_matching_group_value_columnar(int64_t *groups_buffer, const uint32_t h, const int64_t *key, const uint32_t key_qw_count, const size_t entry_count)
ALWAYS_INLINE DEVICE uint32_t key_hash(const int64_t *key, const uint32_t key_count, const uint32_t key_byte_width)
const int64_t const uint32_t groups_buffer_entry_count
const int64_t const uint32_t const uint32_t key_qw_count

+ Here is the call graph for this function:

int64_t* get_group_value_fast ( int64_t *  groups_buffer,
const int64_t  key,
const int64_t  min_key,
const int64_t  bucket,
const uint32_t  row_size_quad 
)

Definition at line 195 of file GroupByRuntime.cpp.

References EMPTY_KEY_64.

200  {
201  int64_t key_diff = key - min_key;
202  if (bucket) {
203  key_diff /= bucket;
204  }
205  int64_t off = key_diff * row_size_quad;
206  if (groups_buffer[off] == EMPTY_KEY_64) {
207  groups_buffer[off] = key;
208  }
209  return groups_buffer + off + 1;
210 }
const int32_t groups_buffer_size return groups_buffer
#define EMPTY_KEY_64
int64_t* get_group_value_fast_with_original_key ( int64_t *  groups_buffer,
const int64_t  key,
const int64_t  orig_key,
const int64_t  min_key,
const int64_t  bucket,
const uint32_t  row_size_quad 
)

Definition at line 212 of file GroupByRuntime.cpp.

References EMPTY_KEY_64.

218  {
219  int64_t key_diff = key - min_key;
220  if (bucket) {
221  key_diff /= bucket;
222  }
223  int64_t off = key_diff * row_size_quad;
224  if (groups_buffer[off] == EMPTY_KEY_64) {
225  groups_buffer[off] = orig_key;
226  }
227  return groups_buffer + off + 1;
228 }
const int32_t groups_buffer_size return groups_buffer
#define EMPTY_KEY_64
int64_t* get_group_value_with_watchdog ( int64_t *  groups_buffer,
const uint32_t  groups_buffer_entry_count,
const int64_t *  key,
const uint32_t  key_count,
const uint32_t  key_width,
const uint32_t  row_size_quad,
const int64_t *  init_val = nullptr 
)

Definition at line 54 of file GroupByRuntime.cpp.

References dynamic_watchdog(), get_matching_group_value(), groups_buffer_entry_count, and key_hash().

61  {
62  uint32_t h = key_hash(key, key_count, key_width) % groups_buffer_entry_count;
63  int64_t* matching_group = get_matching_group_value(
64  groups_buffer, h, key, key_count, key_width, row_size_quad, init_vals);
65  if (matching_group) {
66  return matching_group;
67  }
68  uint32_t watchdog_countdown = 100;
69  uint32_t h_probe = (h + 1) % groups_buffer_entry_count;
70  while (h_probe != h) {
71  matching_group = get_matching_group_value(
72  groups_buffer, h_probe, key, key_count, key_width, row_size_quad, init_vals);
73  if (matching_group) {
74  return matching_group;
75  }
76  h_probe = (h_probe + 1) % groups_buffer_entry_count;
77  if (--watchdog_countdown == 0) {
78  if (dynamic_watchdog()) {
79  return NULL;
80  }
81  watchdog_countdown = 100;
82  }
83  }
84  return NULL;
85 }
const int32_t groups_buffer_size return groups_buffer
__device__ bool dynamic_watchdog()
ALWAYS_INLINE DEVICE uint32_t key_hash(const int64_t *key, const uint32_t key_count, const uint32_t key_byte_width)
__device__ int64_t * get_matching_group_value(int64_t *groups_buffer, const uint32_t h, const T *key, const uint32_t key_count, const uint32_t row_size_quad)
const int64_t const uint32_t groups_buffer_entry_count
const int64_t * init_vals

+ Here is the call graph for this function:

int32_t* get_hash_slot ( int32_t *  buff,
const int64_t  key,
const int64_t  min_key 
)

Definition at line 39 of file JoinHashImpl.h.

Referenced by count_matches(), fill_hash_join_buff(), fill_row_ids(), and hash_join_idx().

41  {
42  return buff + (key - min_key);
43 }

+ Here is the caller graph for this function:

int32_t* get_hash_slot_sharded ( int32_t *  buff,
const int64_t  key,
const int64_t  min_key,
const uint32_t  entry_count_per_shard,
const uint32_t  num_shards,
const uint32_t  device_count 
)

Definition at line 60 of file JoinHashImpl.h.

References SHARD_FOR_KEY.

Referenced by count_matches_sharded(), fill_hash_join_buff_sharded(), fill_row_ids_sharded(), and hash_join_idx_sharded().

66  {
67  const uint32_t shard = SHARD_FOR_KEY(key, num_shards);
68  const uint32_t shard_buffer_index =
69  shard / device_count; // shard sub-buffer index within `buff`
70  int32_t* shard_buffer = buff + shard_buffer_index * entry_count_per_shard;
71  return shard_buffer + (key - min_key) / num_shards;
72 }
#define SHARD_FOR_KEY(key, num_shards)
Definition: shard_key.h:20

+ Here is the caller graph for this function:

int64_t* get_matching_group_value_perfect_hash ( int64_t *  groups_buffer,
const uint32_t  h,
const int64_t *  key,
const uint32_t  key_qw_count,
const uint32_t  row_size_quad 
)

Definition at line 1016 of file RuntimeFunctions.cpp.

References EMPTY_KEY_64.

1021  {
1022  uint32_t off = hashed_index * row_size_quad;
1023  if (groups_buffer[off] == EMPTY_KEY_64) {
1024  for (uint32_t i = 0; i < key_count; ++i) {
1025  groups_buffer[off + i] = key[i];
1026  }
1027  }
1028  return groups_buffer + off + key_count;
1029 }
const int32_t groups_buffer_size return groups_buffer
#define EMPTY_KEY_64
uint32_t key_hash ( const int64_t *  key,
const uint32_t  key_qw_count,
const uint32_t  key_byte_width 
)

Definition at line 20 of file GroupByRuntime.cpp.

References MurmurHash1().

Referenced by get_group_value(), get_group_value_columnar(), anonymous_namespace{ResultSetReduction.cpp}::get_group_value_columnar_reduction(), get_group_value_columnar_slot(), get_group_value_columnar_slot_with_watchdog(), get_group_value_columnar_with_watchdog(), get_group_value_reduction(), and get_group_value_with_watchdog().

22  {
23  return MurmurHash1(key, key_byte_width * key_count, 0);
24 }
NEVER_INLINE DEVICE uint32_t MurmurHash1(const void *key, int len, const uint32_t seed)
Definition: MurmurHash.cpp:20

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

void linear_probabilistic_count ( uint8_t *  bitmap,
const uint32_t  bitmap_bytes,
const uint8_t *  key_bytes,
const uint32_t  key_len 
)

Definition at line 1145 of file cuda_mapd_rt.cu.

References MurmurHash1().

1148  {
1149  const uint32_t bit_pos = MurmurHash1(key_bytes, key_len, 0) % (bitmap_bytes * 8);
1150  const uint32_t word_idx = bit_pos / 32;
1151  const uint32_t bit_idx = bit_pos % 32;
1152  atomicOr(((uint32_t*)bitmap) + word_idx, 1 << bit_idx);
1153 }
NEVER_INLINE DEVICE uint32_t MurmurHash1(const void *key, int len, const uint32_t seed)
Definition: MurmurHash.cpp:20

+ Here is the call graph for this function: