OmniSciDB  0fdbebe030
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
RuntimeFunctions.h File Reference
#include <cassert>
#include <cstdint>
#include <ctime>
#include <limits>
#include <type_traits>
+ Include dependency graph for RuntimeFunctions.h:
+ This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Macros

#define EMPTY_KEY_64   std::numeric_limits<int64_t>::max()
 
#define EMPTY_KEY_32   std::numeric_limits<int32_t>::max()
 
#define EMPTY_KEY_16   std::numeric_limits<int16_t>::max()
 
#define EMPTY_KEY_8   std::numeric_limits<int8_t>::max()
 

Enumerations

enum  RuntimeInterruptFlags { INT_CHECK = 0, INT_ABORT = -1, INT_RESET = -2 }
 

Functions

int64_t agg_sum (int64_t *agg, const int64_t val)
 
void agg_max (int64_t *agg, const int64_t val)
 
void agg_min (int64_t *agg, const int64_t val)
 
void agg_sum_double (int64_t *agg, const double val)
 
void agg_max_double (int64_t *agg, const double val)
 
void agg_min_double (int64_t *agg, const double val)
 
int32_t agg_sum_int32_skip_val (int32_t *agg, const int32_t val, const int32_t skip_val)
 
int64_t agg_sum_skip_val (int64_t *agg, const int64_t val, const int64_t skip_val)
 
void agg_max_skip_val (int64_t *agg, const int64_t val, const int64_t skip_val)
 
void agg_min_skip_val (int64_t *agg, const int64_t val, const int64_t skip_val)
 
void agg_sum_float_skip_val (int32_t *agg, const float val, const float skip_val)
 
void agg_sum_double_skip_val (int64_t *agg, const double val, const double skip_val)
 
void agg_max_double_skip_val (int64_t *agg, const double val, const double skip_val)
 
void agg_min_double_skip_val (int64_t *agg, const double val, const double skip_val)
 
int32_t agg_sum_int32 (int32_t *agg, const int32_t val)
 
void agg_max_int32 (int32_t *agg, const int32_t val)
 
void agg_max_int16 (int16_t *agg, const int16_t val)
 
void agg_max_int8 (int8_t *agg, const int8_t val)
 
void agg_min_int32 (int32_t *agg, const int32_t val)
 
void agg_min_int16 (int16_t *agg, const int16_t val)
 
void agg_min_int8 (int8_t *agg, const int8_t val)
 
void agg_sum_float (int32_t *agg, const float val)
 
void agg_max_float (int32_t *agg, const float val)
 
void agg_min_float (int32_t *agg, const float val)
 
void agg_max_int32_skip_val (int32_t *agg, const int32_t val, const int32_t skip_val)
 
void agg_max_int16_skip_val (int16_t *agg, const int16_t val, const int16_t skip_val)
 
void agg_max_int8_skip_val (int8_t *agg, const int8_t val, const int8_t skip_val)
 
void agg_min_int32_skip_val (int32_t *agg, const int32_t val, const int32_t skip_val)
 
void agg_min_int16_skip_val (int16_t *agg, const int16_t val, const int16_t skip_val)
 
void agg_min_int8_skip_val (int8_t *agg, const int8_t val, const int8_t skip_val)
 
void agg_max_float_skip_val (int32_t *agg, const float val, const float skip_val)
 
void agg_min_float_skip_val (int32_t *agg, const float val, const float skip_val)
 
void agg_count_distinct_bitmap (int64_t *agg, const int64_t val, const int64_t min_val)
 
uint32_t key_hash (const int64_t *key, const uint32_t key_qw_count, const uint32_t key_byte_width)
 
int64_t * get_group_value (int64_t *groups_buffer, const uint32_t groups_buffer_entry_count, const int64_t *key, const uint32_t key_count, const uint32_t key_width, const uint32_t row_size_quad, const int64_t *init_val=nullptr)
 
bool check_interrupt ()
 
bool check_interrupt_init (unsigned command)
 
int64_t * get_group_value_with_watchdog (int64_t *groups_buffer, const uint32_t groups_buffer_entry_count, const int64_t *key, const uint32_t key_count, const uint32_t key_width, const uint32_t row_size_quad, const int64_t *init_val=nullptr)
 
int64_t * get_group_value_columnar (int64_t *groups_buffer, const uint32_t groups_buffer_entry_count, const int64_t *key, const uint32_t key_qw_count)
 
int64_t * get_group_value_columnar_with_watchdog (int64_t *groups_buffer, const uint32_t groups_buffer_entry_count, const int64_t *key, const uint32_t key_qw_count)
 
int64_t * get_group_value_fast (int64_t *groups_buffer, const int64_t key, const int64_t min_key, const int64_t bucket, const uint32_t row_size_quad)
 
int64_t * get_group_value_fast_with_original_key (int64_t *groups_buffer, const int64_t key, const int64_t orig_key, const int64_t min_key, const int64_t bucket, const uint32_t row_size_quad)
 
uint32_t get_columnar_group_bin_offset (int64_t *key_base_ptr, const int64_t key, const int64_t min_key, const int64_t bucket)
 
int64_t * get_matching_group_value_perfect_hash (int64_t *groups_buffer, const uint32_t h, const int64_t *key, const uint32_t key_qw_count, const uint32_t row_size_quad)
 
int64_t * get_matching_group_value_perfect_hash_keyless (int64_t *groups_buffer, const uint32_t hashed_index, const uint32_t row_size_quad)
 
int32_t * get_bucketized_hash_slot (int32_t *buff, const int64_t key, const int64_t min_key, const int64_t bucket_normalization=1)
 
int32_t * get_hash_slot (int32_t *buff, const int64_t key, const int64_t min_key)
 
int32_t * get_hash_slot_sharded (int32_t *buff, const int64_t key, const int64_t min_key, const uint32_t entry_count_per_shard, const uint32_t num_shards, const uint32_t device_count)
 
int32_t * get_bucketized_hash_slot_sharded (int32_t *buff, const int64_t key, const int64_t min_key, const uint32_t entry_count_per_shard, const uint32_t num_shards, const uint32_t device_count, const int64_t bucket_normalization)
 
void linear_probabilistic_count (uint8_t *bitmap, const uint32_t bitmap_bytes, const uint8_t *key_bytes, const uint32_t key_len)
 
int64_t fixed_width_int_decode_noinline (const int8_t *byte_stream, const int32_t byte_width, const int64_t pos)
 
int64_t fixed_width_unsigned_decode_noinline (const int8_t *byte_stream, const int32_t byte_width, const int64_t pos)
 
float fixed_width_float_decode_noinline (const int8_t *byte_stream, const int64_t pos)
 
double fixed_width_double_decode_noinline (const int8_t *byte_stream, const int64_t pos)
 
int64_t fixed_width_small_date_decode_noinline (const int8_t *byte_stream, const int32_t byte_width, const int32_t null_val, const int64_t ret_null_val, const int64_t pos)
 
int8_t * extract_str_ptr_noinline (const uint64_t str_and_len)
 
int32_t extract_str_len_noinline (const uint64_t str_and_len)
 
template<typename T = int64_t>
get_empty_key ()
 
template<>
int32_t get_empty_key ()
 

Macro Definition Documentation

#define EMPTY_KEY_16   std::numeric_limits<int16_t>::max()

Definition at line 116 of file RuntimeFunctions.h.

#define EMPTY_KEY_32   std::numeric_limits<int32_t>::max()

Definition at line 115 of file RuntimeFunctions.h.

Referenced by get_empty_key().

#define EMPTY_KEY_64   std::numeric_limits<int64_t>::max()

Definition at line 114 of file RuntimeFunctions.h.

Referenced by get_empty_key().

#define EMPTY_KEY_8   std::numeric_limits<int8_t>::max()

Definition at line 117 of file RuntimeFunctions.h.

Enumeration Type Documentation

Enumerator
INT_CHECK 
INT_ABORT 
INT_RESET 

Definition at line 131 of file RuntimeFunctions.h.

Function Documentation

void agg_count_distinct_bitmap ( int64_t *  agg,
const int64_t  val,
const int64_t  min_val 
)

Definition at line 285 of file RuntimeFunctions.cpp.

Referenced by agg_count_distinct_bitmap_skip_val(), WindowFunctionContext::fillPartitionEnd(), WindowFunctionContext::fillPartitionStart(), anonymous_namespace{WindowContext.cpp}::index_to_partition_end(), and InValuesBitmap::InValuesBitmap().

287  {
288  const uint64_t bitmap_idx = val - min_val;
289  reinterpret_cast<int8_t*>(*agg)[bitmap_idx >> 3] |= (1 << (bitmap_idx & 7));
290 }

+ Here is the caller graph for this function:

void agg_max ( int64_t *  agg,
const int64_t  val 
)

Definition at line 346 of file RuntimeFunctions.cpp.

346  {
347  *agg = std::max(*agg, val);
348 }
void agg_max_double ( int64_t *  agg,
const double  val 
)

Definition at line 570 of file RuntimeFunctions.cpp.

570  {
571  const auto r = std::max(*reinterpret_cast<const double*>(agg), val);
572  *agg = *(reinterpret_cast<const int64_t*>(may_alias_ptr(&r)));
573 }
void agg_max_double_skip_val ( int64_t *  agg,
const double  val,
const double  skip_val 
)

Referenced by Executor::reduceResults().

+ Here is the caller graph for this function:

void agg_max_float ( int32_t *  agg,
const float  val 
)

Definition at line 611 of file RuntimeFunctions.cpp.

611  {
612  const auto r = std::max(*reinterpret_cast<const float*>(agg), val);
613  *agg = *(reinterpret_cast<const int32_t*>(may_alias_ptr(&r)));
614 }
void agg_max_float_skip_val ( int32_t *  agg,
const float  val,
const float  skip_val 
)

Referenced by Executor::reduceResults().

+ Here is the caller graph for this function:

void agg_max_int16 ( int16_t *  agg,
const int16_t  val 
)
void agg_max_int16_skip_val ( int16_t *  agg,
const int16_t  val,
const int16_t  skip_val 
)
void agg_max_int32 ( int32_t *  agg,
const int32_t  val 
)
void agg_max_int32_skip_val ( int32_t *  agg,
const int32_t  val,
const int32_t  skip_val 
)
void agg_max_int8 ( int8_t *  agg,
const int8_t  val 
)
void agg_max_int8_skip_val ( int8_t *  agg,
const int8_t  val,
const int8_t  skip_val 
)
void agg_max_skip_val ( int64_t *  agg,
const int64_t  val,
const int64_t  skip_val 
)

Referenced by Executor::reduceResults().

+ Here is the caller graph for this function:

void agg_min ( int64_t *  agg,
const int64_t  val 
)

Definition at line 350 of file RuntimeFunctions.cpp.

350  {
351  *agg = std::min(*agg, val);
352 }
void agg_min_double ( int64_t *  agg,
const double  val 
)

Definition at line 575 of file RuntimeFunctions.cpp.

575  {
576  const auto r = std::min(*reinterpret_cast<const double*>(agg), val);
577  *agg = *(reinterpret_cast<const int64_t*>(may_alias_ptr(&r)));
578 }
void agg_min_double_skip_val ( int64_t *  agg,
const double  val,
const double  skip_val 
)

Referenced by Executor::reduceResults().

+ Here is the caller graph for this function:

void agg_min_float ( int32_t *  agg,
const float  val 
)

Definition at line 616 of file RuntimeFunctions.cpp.

616  {
617  const auto r = std::min(*reinterpret_cast<const float*>(agg), val);
618  *agg = *(reinterpret_cast<const int32_t*>(may_alias_ptr(&r)));
619 }
void agg_min_float_skip_val ( int32_t *  agg,
const float  val,
const float  skip_val 
)

Referenced by Executor::reduceResults().

+ Here is the caller graph for this function:

void agg_min_int16 ( int16_t *  agg,
const int16_t  val 
)
void agg_min_int16_skip_val ( int16_t *  agg,
const int16_t  val,
const int16_t  skip_val 
)
void agg_min_int32 ( int32_t *  agg,
const int32_t  val 
)
void agg_min_int32_skip_val ( int32_t *  agg,
const int32_t  val,
const int32_t  skip_val 
)
void agg_min_int8 ( int8_t *  agg,
const int8_t  val 
)
void agg_min_int8_skip_val ( int8_t *  agg,
const int8_t  val,
const int8_t  skip_val 
)
void agg_min_skip_val ( int64_t *  agg,
const int64_t  val,
const int64_t  skip_val 
)

Referenced by Executor::reduceResults().

+ Here is the caller graph for this function:

int64_t agg_sum ( int64_t *  agg,
const int64_t  val 
)

Definition at line 340 of file RuntimeFunctions.cpp.

Referenced by agg_sum_skip_val().

340  {
341  const auto old = *agg;
342  *agg += val;
343  return old;
344 }

+ Here is the caller graph for this function:

void agg_sum_double ( int64_t *  agg,
const double  val 
)

Definition at line 565 of file RuntimeFunctions.cpp.

565  {
566  const auto r = *reinterpret_cast<const double*>(agg) + val;
567  *agg = *reinterpret_cast<const int64_t*>(may_alias_ptr(&r));
568 }
void agg_sum_double_skip_val ( int64_t *  agg,
const double  val,
const double  skip_val 
)

Referenced by Executor::reduceResults().

+ Here is the caller graph for this function:

void agg_sum_float ( int32_t *  agg,
const float  val 
)

Definition at line 606 of file RuntimeFunctions.cpp.

606  {
607  const auto r = *reinterpret_cast<const float*>(agg) + val;
608  *agg = *reinterpret_cast<const int32_t*>(may_alias_ptr(&r));
609 }
void agg_sum_float_skip_val ( int32_t *  agg,
const float  val,
const float  skip_val 
)

Referenced by Executor::reduceResults().

+ Here is the caller graph for this function:

int32_t agg_sum_int32 ( int32_t *  agg,
const int32_t  val 
)

Definition at line 398 of file RuntimeFunctions.cpp.

Referenced by agg_sum_int32_skip_val().

398  {
399  const auto old = *agg;
400  *agg += val;
401  return old;
402 }

+ Here is the caller graph for this function:

int32_t agg_sum_int32_skip_val ( int32_t *  agg,
const int32_t  val,
const int32_t  skip_val 
)

Definition at line 483 of file RuntimeFunctions.cpp.

References agg_sum_int32().

485  {
486  const auto old = *agg;
487  if (val != skip_val) {
488  if (old != skip_val) {
489  return agg_sum_int32(agg, val);
490  } else {
491  *agg = val;
492  }
493  }
494  return old;
495 }
ALWAYS_INLINE int32_t agg_sum_int32(int32_t *agg, const int32_t val)

+ Here is the call graph for this function:

int64_t agg_sum_skip_val ( int64_t *  agg,
const int64_t  val,
const int64_t  skip_val 
)

Definition at line 469 of file RuntimeFunctions.cpp.

References agg_sum().

Referenced by Executor::reduceResults().

471  {
472  const auto old = *agg;
473  if (val != skip_val) {
474  if (old != skip_val) {
475  return agg_sum(agg, val);
476  } else {
477  *agg = val;
478  }
479  }
480  return old;
481 }
ALWAYS_INLINE int64_t agg_sum(int64_t *agg, const int64_t val)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

bool check_interrupt ( )

Definition at line 226 of file cuda_mapd_rt.cu.

References check_interrupt_init(), INT_CHECK, and runtime_interrupt_flag.

226  {
227  return (runtime_interrupt_flag == 1) ? true : false;
228 }
__device__ int32_t runtime_interrupt_flag

+ Here is the call graph for this function:

bool check_interrupt_init ( unsigned  command)

Definition at line 1394 of file RuntimeFunctions.cpp.

References INT_ABORT, INT_CHECK, INT_RESET, and runtime_interrupt_flag.

Referenced by check_interrupt(), Executor::interrupt(), and Executor::resetInterrupt().

1394  {
1395  static std::atomic_bool runtime_interrupt_flag{false};
1396 
1397  if (command == static_cast<unsigned>(INT_CHECK)) {
1398  if (runtime_interrupt_flag.load()) {
1399  return true;
1400  }
1401  return false;
1402  }
1403  if (command == static_cast<unsigned>(INT_ABORT)) {
1404  runtime_interrupt_flag = true;
1405  return false;
1406  }
1407  if (command == static_cast<unsigned>(INT_RESET)) {
1408  runtime_interrupt_flag = false;
1409  return false;
1410  }
1411  return false;
1412 }
__device__ int32_t runtime_interrupt_flag

+ Here is the caller graph for this function:

int32_t extract_str_len_noinline ( const uint64_t  str_and_len)

Referenced by string_compress().

+ Here is the caller graph for this function:

int8_t* extract_str_ptr_noinline ( const uint64_t  str_and_len)

Referenced by string_compress().

+ Here is the caller graph for this function:

double fixed_width_double_decode_noinline ( const int8_t *  byte_stream,
const int64_t  pos 
)

Definition at line 126 of file DecodersImpl.h.

References fixed_width_double_decode(), and SUFFIX.

Referenced by compute_bucket_sizes_impl(), JoinColumnIterator::getElementSwitch(), lazy_decode(), and OverlapsKeyHandler::operator()().

126  {
127  return SUFFIX(fixed_width_double_decode)(byte_stream, pos);
128 }
#define SUFFIX(name)
DEVICE ALWAYS_INLINE double SUFFIX() fixed_width_double_decode(const int8_t *byte_stream, const int64_t pos)
Definition: DecodersImpl.h:118

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

float fixed_width_float_decode_noinline ( const int8_t *  byte_stream,
const int64_t  pos 
)

Definition at line 113 of file DecodersImpl.h.

References fixed_width_float_decode(), and SUFFIX.

Referenced by lazy_decode().

113  {
114  return SUFFIX(fixed_width_float_decode)(byte_stream, pos);
115 }
#define SUFFIX(name)
DEVICE ALWAYS_INLINE float SUFFIX() fixed_width_float_decode(const int8_t *byte_stream, const int64_t pos)
Definition: DecodersImpl.h:105

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

int64_t fixed_width_int_decode_noinline ( const int8_t *  byte_stream,
const int32_t  byte_width,
const int64_t  pos 
)

Definition at line 83 of file DecodersImpl.h.

References fixed_width_int_decode(), and SUFFIX.

Referenced by JoinColumnIterator::getElementSwitch(), and lazy_decode().

85  {
86  return SUFFIX(fixed_width_int_decode)(byte_stream, byte_width, pos);
87 }
DEVICE ALWAYS_INLINE int64_t SUFFIX() fixed_width_int_decode(const int8_t *byte_stream, const int32_t byte_width, const int64_t pos)
Definition: DecodersImpl.h:31
#define SUFFIX(name)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

int64_t fixed_width_small_date_decode_noinline ( const int8_t *  byte_stream,
const int32_t  byte_width,
const int32_t  null_val,
const int64_t  ret_null_val,
const int64_t  pos 
)

Definition at line 141 of file DecodersImpl.h.

References fixed_width_small_date_decode(), and SUFFIX.

Referenced by JoinColumnIterator::getElementSwitch(), and lazy_decode().

145  {
147  byte_stream, byte_width, null_val, ret_null_val, pos);
148 }
#define SUFFIX(name)
DEVICE ALWAYS_INLINE int64_t SUFFIX() fixed_width_small_date_decode(const int8_t *byte_stream, const int32_t byte_width, const int32_t null_val, const int64_t ret_null_val, const int64_t pos)
Definition: DecodersImpl.h:131

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

int64_t fixed_width_unsigned_decode_noinline ( const int8_t *  byte_stream,
const int32_t  byte_width,
const int64_t  pos 
)

Definition at line 90 of file DecodersImpl.h.

References fixed_width_unsigned_decode(), and SUFFIX.

Referenced by JoinColumnIterator::getElementSwitch(), and lazy_decode().

92  {
93  return SUFFIX(fixed_width_unsigned_decode)(byte_stream, byte_width, pos);
94 }
#define SUFFIX(name)
DEVICE ALWAYS_INLINE int64_t SUFFIX() fixed_width_unsigned_decode(const int8_t *byte_stream, const int32_t byte_width, const int64_t pos)
Definition: DecodersImpl.h:57

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

int32_t* get_bucketized_hash_slot ( int32_t *  buff,
const int64_t  key,
const int64_t  min_key,
const int64_t  bucket_normalization = 1 
)

Definition at line 31 of file JoinHashImpl.h.

Referenced by bucketized_hash_join_idx(), count_matches_bucketized(), fill_hash_join_buff_bucketized(), and fill_row_ids_bucketized().

35  {
36  return buff + (key - min_key) / bucket_normalization;
37 }

+ Here is the caller graph for this function:

int32_t* get_bucketized_hash_slot_sharded ( int32_t *  buff,
const int64_t  key,
const int64_t  min_key,
const uint32_t  entry_count_per_shard,
const uint32_t  num_shards,
const uint32_t  device_count,
const int64_t  bucket_normalization 
)

Definition at line 45 of file JoinHashImpl.h.

References SHARD_FOR_KEY.

Referenced by fill_hash_join_buff_sharded_bucketized(), and fill_row_ids_sharded_bucketized().

52  {
53  const uint32_t shard = SHARD_FOR_KEY(key, num_shards);
54  const uint32_t shard_buffer_index =
55  shard / device_count; // shard sub-buffer index within `buff`
56  int32_t* shard_buffer = buff + shard_buffer_index * entry_count_per_shard;
57  return shard_buffer + (key - min_key) / bucket_normalization / num_shards;
58 }
#define SHARD_FOR_KEY(key, num_shards)
Definition: shard_key.h:20

+ Here is the caller graph for this function:

uint32_t get_columnar_group_bin_offset ( int64_t *  key_base_ptr,
const int64_t  key,
const int64_t  min_key,
const int64_t  bucket 
)

Definition at line 231 of file GroupByRuntime.cpp.

References EMPTY_KEY_64.

234  {
235  int64_t off = key - min_key;
236  if (bucket) {
237  off /= bucket;
238  }
239  if (key_base_ptr[off] == EMPTY_KEY_64) {
240  key_base_ptr[off] = key;
241  }
242  return off;
243 }
#define EMPTY_KEY_64
template<typename T = int64_t>
T get_empty_key ( )
inline

Definition at line 243 of file RuntimeFunctions.h.

References EMPTY_KEY_64.

243  {
244  static_assert(std::is_same<T, int64_t>::value,
245  "Unsupported template parameter other than int64_t for now");
246  return EMPTY_KEY_64;
247 }
#define EMPTY_KEY_64
template<>
int32_t get_empty_key ( )
inline

Definition at line 250 of file RuntimeFunctions.h.

References EMPTY_KEY_32.

250  {
251  return EMPTY_KEY_32;
252 }
#define EMPTY_KEY_32
int64_t* get_group_value ( int64_t *  groups_buffer,
const uint32_t  groups_buffer_entry_count,
const int64_t *  key,
const uint32_t  key_count,
const uint32_t  key_width,
const uint32_t  row_size_quad,
const int64_t *  init_val = nullptr 
)

Definition at line 26 of file GroupByRuntime.cpp.

References get_matching_group_value(), groups_buffer_entry_count, and key_hash().

Referenced by ResultSetStorage::moveOneEntryToBuffer().

33  {
34  uint32_t h = key_hash(key, key_count, key_width) % groups_buffer_entry_count;
35  int64_t* matching_group = get_matching_group_value(
36  groups_buffer, h, key, key_count, key_width, row_size_quad, init_vals);
37  if (matching_group) {
38  return matching_group;
39  }
40  uint32_t h_probe = (h + 1) % groups_buffer_entry_count;
41  while (h_probe != h) {
42  matching_group = get_matching_group_value(
43  groups_buffer, h_probe, key, key_count, key_width, row_size_quad, init_vals);
44  if (matching_group) {
45  return matching_group;
46  }
47  h_probe = (h_probe + 1) % groups_buffer_entry_count;
48  }
49  return NULL;
50 }
const int32_t groups_buffer_size return groups_buffer
ALWAYS_INLINE DEVICE uint32_t key_hash(const int64_t *key, const uint32_t key_count, const uint32_t key_byte_width)
__device__ int64_t * get_matching_group_value(int64_t *groups_buffer, const uint32_t h, const T *key, const uint32_t key_count, const uint32_t row_size_quad)
const int64_t const uint32_t groups_buffer_entry_count
const int64_t * init_vals

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

int64_t* get_group_value_columnar ( int64_t *  groups_buffer,
const uint32_t  groups_buffer_entry_count,
const int64_t *  key,
const uint32_t  key_qw_count 
)

Definition at line 142 of file GroupByRuntime.cpp.

References get_matching_group_value_columnar(), groups_buffer_entry_count, and key_hash().

Referenced by ResultSetStorage::moveOneEntryToBuffer().

146  {
147  uint32_t h = key_hash(key, key_qw_count, sizeof(int64_t)) % groups_buffer_entry_count;
148  int64_t* matching_group = get_matching_group_value_columnar(
150  if (matching_group) {
151  return matching_group;
152  }
153  uint32_t h_probe = (h + 1) % groups_buffer_entry_count;
154  while (h_probe != h) {
155  matching_group = get_matching_group_value_columnar(
157  if (matching_group) {
158  return matching_group;
159  }
160  h_probe = (h_probe + 1) % groups_buffer_entry_count;
161  }
162  return NULL;
163 }
const int32_t groups_buffer_size return groups_buffer
__device__ int64_t * get_matching_group_value_columnar(int64_t *groups_buffer, const uint32_t h, const int64_t *key, const uint32_t key_qw_count, const size_t entry_count)
ALWAYS_INLINE DEVICE uint32_t key_hash(const int64_t *key, const uint32_t key_count, const uint32_t key_byte_width)
const int64_t const uint32_t groups_buffer_entry_count
const int64_t const uint32_t const uint32_t key_qw_count

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

int64_t* get_group_value_columnar_with_watchdog ( int64_t *  groups_buffer,
const uint32_t  groups_buffer_entry_count,
const int64_t *  key,
const uint32_t  key_qw_count 
)

Definition at line 165 of file GroupByRuntime.cpp.

References dynamic_watchdog(), get_matching_group_value_columnar(), groups_buffer_entry_count, and key_hash().

169  {
170  uint32_t h = key_hash(key, key_qw_count, sizeof(int64_t)) % groups_buffer_entry_count;
171  int64_t* matching_group = get_matching_group_value_columnar(
173  if (matching_group) {
174  return matching_group;
175  }
176  uint32_t watchdog_countdown = 100;
177  uint32_t h_probe = (h + 1) % groups_buffer_entry_count;
178  while (h_probe != h) {
179  matching_group = get_matching_group_value_columnar(
181  if (matching_group) {
182  return matching_group;
183  }
184  h_probe = (h_probe + 1) % groups_buffer_entry_count;
185  if (--watchdog_countdown == 0) {
186  if (dynamic_watchdog()) {
187  return NULL;
188  }
189  watchdog_countdown = 100;
190  }
191  }
192  return NULL;
193 }
const int32_t groups_buffer_size return groups_buffer
__device__ bool dynamic_watchdog()
__device__ int64_t * get_matching_group_value_columnar(int64_t *groups_buffer, const uint32_t h, const int64_t *key, const uint32_t key_qw_count, const size_t entry_count)
ALWAYS_INLINE DEVICE uint32_t key_hash(const int64_t *key, const uint32_t key_count, const uint32_t key_byte_width)
const int64_t const uint32_t groups_buffer_entry_count
const int64_t const uint32_t const uint32_t key_qw_count

+ Here is the call graph for this function:

int64_t* get_group_value_fast ( int64_t *  groups_buffer,
const int64_t  key,
const int64_t  min_key,
const int64_t  bucket,
const uint32_t  row_size_quad 
)

Definition at line 195 of file GroupByRuntime.cpp.

References EMPTY_KEY_64.

200  {
201  int64_t key_diff = key - min_key;
202  if (bucket) {
203  key_diff /= bucket;
204  }
205  int64_t off = key_diff * row_size_quad;
206  if (groups_buffer[off] == EMPTY_KEY_64) {
207  groups_buffer[off] = key;
208  }
209  return groups_buffer + off + 1;
210 }
const int32_t groups_buffer_size return groups_buffer
#define EMPTY_KEY_64
int64_t* get_group_value_fast_with_original_key ( int64_t *  groups_buffer,
const int64_t  key,
const int64_t  orig_key,
const int64_t  min_key,
const int64_t  bucket,
const uint32_t  row_size_quad 
)

Definition at line 212 of file GroupByRuntime.cpp.

References EMPTY_KEY_64.

218  {
219  int64_t key_diff = key - min_key;
220  if (bucket) {
221  key_diff /= bucket;
222  }
223  int64_t off = key_diff * row_size_quad;
224  if (groups_buffer[off] == EMPTY_KEY_64) {
225  groups_buffer[off] = orig_key;
226  }
227  return groups_buffer + off + 1;
228 }
const int32_t groups_buffer_size return groups_buffer
#define EMPTY_KEY_64
int64_t* get_group_value_with_watchdog ( int64_t *  groups_buffer,
const uint32_t  groups_buffer_entry_count,
const int64_t *  key,
const uint32_t  key_count,
const uint32_t  key_width,
const uint32_t  row_size_quad,
const int64_t *  init_val = nullptr 
)

Definition at line 54 of file GroupByRuntime.cpp.

References dynamic_watchdog(), get_matching_group_value(), groups_buffer_entry_count, and key_hash().

61  {
62  uint32_t h = key_hash(key, key_count, key_width) % groups_buffer_entry_count;
63  int64_t* matching_group = get_matching_group_value(
64  groups_buffer, h, key, key_count, key_width, row_size_quad, init_vals);
65  if (matching_group) {
66  return matching_group;
67  }
68  uint32_t watchdog_countdown = 100;
69  uint32_t h_probe = (h + 1) % groups_buffer_entry_count;
70  while (h_probe != h) {
71  matching_group = get_matching_group_value(
72  groups_buffer, h_probe, key, key_count, key_width, row_size_quad, init_vals);
73  if (matching_group) {
74  return matching_group;
75  }
76  h_probe = (h_probe + 1) % groups_buffer_entry_count;
77  if (--watchdog_countdown == 0) {
78  if (dynamic_watchdog()) {
79  return NULL;
80  }
81  watchdog_countdown = 100;
82  }
83  }
84  return NULL;
85 }
const int32_t groups_buffer_size return groups_buffer
__device__ bool dynamic_watchdog()
ALWAYS_INLINE DEVICE uint32_t key_hash(const int64_t *key, const uint32_t key_count, const uint32_t key_byte_width)
__device__ int64_t * get_matching_group_value(int64_t *groups_buffer, const uint32_t h, const T *key, const uint32_t key_count, const uint32_t row_size_quad)
const int64_t const uint32_t groups_buffer_entry_count
const int64_t * init_vals

+ Here is the call graph for this function:

int32_t* get_hash_slot ( int32_t *  buff,
const int64_t  key,
const int64_t  min_key 
)

Definition at line 39 of file JoinHashImpl.h.

Referenced by count_matches(), fill_hash_join_buff(), fill_row_ids(), and hash_join_idx().

41  {
42  return buff + (key - min_key);
43 }

+ Here is the caller graph for this function:

int32_t* get_hash_slot_sharded ( int32_t *  buff,
const int64_t  key,
const int64_t  min_key,
const uint32_t  entry_count_per_shard,
const uint32_t  num_shards,
const uint32_t  device_count 
)

Definition at line 60 of file JoinHashImpl.h.

References SHARD_FOR_KEY.

Referenced by count_matches_sharded(), fill_hash_join_buff_sharded(), fill_row_ids_sharded(), and hash_join_idx_sharded().

66  {
67  const uint32_t shard = SHARD_FOR_KEY(key, num_shards);
68  const uint32_t shard_buffer_index =
69  shard / device_count; // shard sub-buffer index within `buff`
70  int32_t* shard_buffer = buff + shard_buffer_index * entry_count_per_shard;
71  return shard_buffer + (key - min_key) / num_shards;
72 }
#define SHARD_FOR_KEY(key, num_shards)
Definition: shard_key.h:20

+ Here is the caller graph for this function:

int64_t* get_matching_group_value_perfect_hash ( int64_t *  groups_buffer,
const uint32_t  h,
const int64_t *  key,
const uint32_t  key_qw_count,
const uint32_t  row_size_quad 
)

Definition at line 1132 of file RuntimeFunctions.cpp.

References EMPTY_KEY_64.

1137  {
1138  uint32_t off = hashed_index * row_size_quad;
1139  if (groups_buffer[off] == EMPTY_KEY_64) {
1140  for (uint32_t i = 0; i < key_count; ++i) {
1141  groups_buffer[off + i] = key[i];
1142  }
1143  }
1144  return groups_buffer + off + key_count;
1145 }
const int32_t groups_buffer_size return groups_buffer
#define EMPTY_KEY_64
int64_t* get_matching_group_value_perfect_hash_keyless ( int64_t *  groups_buffer,
const uint32_t  hashed_index,
const uint32_t  row_size_quad 
)

For a particular hashed index (only used with multi-column perfect hash group by) it returns the row-wise offset of the group in the output buffer. Since it is intended for keyless hash use, it assumes there is no group columns prepending the output buffer.

Definition at line 1153 of file RuntimeFunctions.cpp.

1156  {
1157  return groups_buffer + row_size_quad * hashed_index;
1158 }
const int32_t groups_buffer_size return groups_buffer
uint32_t key_hash ( const int64_t *  key,
const uint32_t  key_qw_count,
const uint32_t  key_byte_width 
)

Definition at line 20 of file GroupByRuntime.cpp.

References MurmurHash1().

Referenced by get_group_value(), get_group_value_columnar(), anonymous_namespace{ResultSetReduction.cpp}::get_group_value_columnar_reduction(), get_group_value_columnar_slot(), get_group_value_columnar_slot_with_watchdog(), get_group_value_columnar_with_watchdog(), get_group_value_reduction(), and get_group_value_with_watchdog().

22  {
23  return MurmurHash1(key, key_byte_width * key_count, 0);
24 }
NEVER_INLINE DEVICE uint32_t MurmurHash1(const void *key, int len, const uint32_t seed)
Definition: MurmurHash.cpp:20

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

void linear_probabilistic_count ( uint8_t *  bitmap,
const uint32_t  bitmap_bytes,
const uint8_t *  key_bytes,
const uint32_t  key_len 
)

Definition at line 1261 of file cuda_mapd_rt.cu.

References MurmurHash1().

1264  {
1265  const uint32_t bit_pos = MurmurHash1(key_bytes, key_len, 0) % (bitmap_bytes * 8);
1266  const uint32_t word_idx = bit_pos / 32;
1267  const uint32_t bit_idx = bit_pos % 32;
1268  atomicOr(((uint32_t*)bitmap) + word_idx, 1 << bit_idx);
1269 }
NEVER_INLINE DEVICE uint32_t MurmurHash1(const void *key, int len, const uint32_t seed)
Definition: MurmurHash.cpp:20

+ Here is the call graph for this function: