OmniSciDB  c07336695a
GroupByRuntime.cpp File Reference
#include "JoinHashImpl.h"
#include "MurmurHash.h"
+ Include dependency graph for GroupByRuntime.cpp:
+ This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Macros

#define DEF_TRANSLATE_NULL_KEY(key_type)
 

Functions

ALWAYS_INLINE DEVICE uint32_t key_hash (const int64_t *key, const uint32_t key_count, const uint32_t key_byte_width)
 
NEVER_INLINE DEVICE int64_t * get_group_value (int64_t *groups_buffer, const uint32_t groups_buffer_entry_count, const int64_t *key, const uint32_t key_count, const uint32_t key_width, const uint32_t row_size_quad, const int64_t *init_vals)
 
NEVER_INLINE DEVICE bool dynamic_watchdog ()
 
NEVER_INLINE DEVICE int64_t * get_group_value_with_watchdog (int64_t *groups_buffer, const uint32_t groups_buffer_entry_count, const int64_t *key, const uint32_t key_count, const uint32_t key_width, const uint32_t row_size_quad, const int64_t *init_vals)
 
NEVER_INLINE DEVICE int32_t get_group_value_columnar_slot (int64_t *groups_buffer, const uint32_t groups_buffer_entry_count, const int64_t *key, const uint32_t key_count, const uint32_t key_width)
 
NEVER_INLINE DEVICE int32_t get_group_value_columnar_slot_with_watchdog (int64_t *groups_buffer, const uint32_t groups_buffer_entry_count, const int64_t *key, const uint32_t key_count, const uint32_t key_width)
 
NEVER_INLINE DEVICE int64_t * get_group_value_columnar (int64_t *groups_buffer, const uint32_t groups_buffer_entry_count, const int64_t *key, const uint32_t key_qw_count)
 
NEVER_INLINE DEVICE int64_t * get_group_value_columnar_with_watchdog (int64_t *groups_buffer, const uint32_t groups_buffer_entry_count, const int64_t *key, const uint32_t key_qw_count)
 
ALWAYS_INLINE DEVICE int64_t * get_group_value_fast (int64_t *groups_buffer, const int64_t key, const int64_t min_key, const int64_t bucket, const uint32_t row_size_quad)
 
ALWAYS_INLINE DEVICE int64_t * get_group_value_fast_with_original_key (int64_t *groups_buffer, const int64_t key, const int64_t orig_key, const int64_t min_key, const int64_t bucket, const uint32_t row_size_quad)
 
ALWAYS_INLINE DEVICE uint32_t get_columnar_group_bin_offset (int64_t *key_base_ptr, const int64_t key, const int64_t min_key, const int64_t bucket)
 
ALWAYS_INLINE DEVICE int64_t * get_scan_output_slot (int64_t *output_buffer, const uint32_t output_buffer_entry_count, const uint32_t pos, const int64_t offset_in_fragment, const uint32_t row_size_quad)
 
ALWAYS_INLINE DEVICE int32_t get_columnar_scan_output_offset (int64_t *output_buffer, const uint32_t output_buffer_entry_count, const uint32_t pos, const int64_t offset_in_fragment)
 
ALWAYS_INLINE DEVICE int64_t bucketized_hash_join_idx (int64_t hash_buff, int64_t const key, int64_t const min_key, int64_t const max_key, int64_t bucket_normalization)
 
ALWAYS_INLINE DEVICE int64_t hash_join_idx (int64_t hash_buff, const int64_t key, const int64_t min_key, const int64_t max_key)
 
ALWAYS_INLINE DEVICE int64_t bucketized_hash_join_idx_nullable (int64_t hash_buff, const int64_t key, const int64_t min_key, const int64_t max_key, const int64_t null_val, const int64_t bucket_normalization)
 
ALWAYS_INLINE DEVICE int64_t hash_join_idx_nullable (int64_t hash_buff, const int64_t key, const int64_t min_key, const int64_t max_key, const int64_t null_val)
 
ALWAYS_INLINE DEVICE int64_t bucketized_hash_join_idx_bitwise (int64_t hash_buff, const int64_t key, const int64_t min_key, const int64_t max_key, const int64_t null_val, const int64_t translated_val, const int64_t bucket_normalization)
 
ALWAYS_INLINE DEVICE int64_t hash_join_idx_bitwise (int64_t hash_buff, const int64_t key, const int64_t min_key, const int64_t max_key, const int64_t null_val, const int64_t translated_val)
 
ALWAYS_INLINE DEVICE int64_t hash_join_idx_sharded (int64_t hash_buff, const int64_t key, const int64_t min_key, const int64_t max_key, const uint32_t entry_count_per_shard, const uint32_t num_shards, const uint32_t device_count)
 
ALWAYS_INLINE DEVICE int64_t hash_join_idx_sharded_nullable (int64_t hash_buff, const int64_t key, const int64_t min_key, const int64_t max_key, const uint32_t entry_count_per_shard, const uint32_t num_shards, const uint32_t device_count, const int64_t null_val)
 
ALWAYS_INLINE DEVICE int64_t hash_join_idx_bitwise_sharded (int64_t hash_buff, const int64_t key, const int64_t min_key, const int64_t max_key, const uint32_t entry_count_per_shard, const uint32_t num_shards, const uint32_t device_count, const int64_t null_val, const int64_t translated_val)
 

Macro Definition Documentation

◆ DEF_TRANSLATE_NULL_KEY

#define DEF_TRANSLATE_NULL_KEY (   key_type)
Value:
extern "C" NEVER_INLINE DEVICE int64_t translate_null_key_##key_type( \
const key_type key, const key_type null_val, const int64_t translated_val) { \
if (key == null_val) { \
return translated_val; \
} \
return key; \
}
#define DEVICE
#define NEVER_INLINE

Definition at line 407 of file GroupByRuntime.cpp.

Function Documentation

◆ bucketized_hash_join_idx()

ALWAYS_INLINE DEVICE int64_t bucketized_hash_join_idx ( int64_t  hash_buff,
int64_t const  key,
int64_t const  min_key,
int64_t const  max_key,
int64_t  bucket_normalization 
)

Definition at line 272 of file GroupByRuntime.cpp.

References get_bucketized_hash_slot(), and SUFFIX.

Referenced by bucketized_hash_join_idx_bitwise(), and bucketized_hash_join_idx_nullable().

276  {
277  if (key >= min_key && key <= max_key) {
279  reinterpret_cast<int32_t*>(hash_buff), key, min_key, bucket_normalization);
280  }
281  return -1;
282 }
ALWAYS_INLINE DEVICE int32_t *SUFFIX() get_bucketized_hash_slot(int32_t *buff, const int64_t key, const int64_t min_key, const int64_t bucket_normalization)
Definition: JoinHashImpl.h:31
#define SUFFIX(name)
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ bucketized_hash_join_idx_bitwise()

ALWAYS_INLINE DEVICE int64_t bucketized_hash_join_idx_bitwise ( int64_t  hash_buff,
const int64_t  key,
const int64_t  min_key,
const int64_t  max_key,
const int64_t  null_val,
const int64_t  translated_val,
const int64_t  bucket_normalization 
)

Definition at line 315 of file GroupByRuntime.cpp.

References ALWAYS_INLINE, bucketized_hash_join_idx(), and DEVICE.

321  {
322  return key != null_val ? bucketized_hash_join_idx(
323  hash_buff, key, min_key, max_key, bucket_normalization)
324  : bucketized_hash_join_idx(hash_buff,
325  translated_val,
326  min_key,
327  translated_val,
328  bucket_normalization);
329 }
ALWAYS_INLINE DEVICE int64_t bucketized_hash_join_idx(int64_t hash_buff, int64_t const key, int64_t const min_key, int64_t const max_key, int64_t bucket_normalization)
+ Here is the call graph for this function:

◆ bucketized_hash_join_idx_nullable()

ALWAYS_INLINE DEVICE int64_t bucketized_hash_join_idx_nullable ( int64_t  hash_buff,
const int64_t  key,
const int64_t  min_key,
const int64_t  max_key,
const int64_t  null_val,
const int64_t  bucket_normalization 
)

Definition at line 295 of file GroupByRuntime.cpp.

References bucketized_hash_join_idx().

300  {
301  return key != null_val ? bucketized_hash_join_idx(
302  hash_buff, key, min_key, max_key, bucket_normalization)
303  : -1;
304 }
ALWAYS_INLINE DEVICE int64_t bucketized_hash_join_idx(int64_t hash_buff, int64_t const key, int64_t const min_key, int64_t const max_key, int64_t bucket_normalization)
+ Here is the call graph for this function:

◆ dynamic_watchdog()

NEVER_INLINE DEVICE bool dynamic_watchdog ( )

Definition at line 69 of file DynamicWatchdog.cpp.

References DW_DEADLINE, dynamic_watchdog_init(), logger::INFO, LOG, and read_cycle_counter().

Referenced by anonymous_namespace{ResultSetReduction.cpp}::check_watchdog(), check_watchdog_rt(), get_group_value(), get_group_value_columnar_slot_with_watchdog(), get_group_value_columnar_with_watchdog(), and get_group_value_with_watchdog().

69  {
70  auto clock = read_cycle_counter();
71  auto dw_deadline = dynamic_watchdog_init(static_cast<unsigned>(DW_DEADLINE));
72  if (clock > dw_deadline) {
73  LOG(INFO) << "TIMEOUT: thread " << std::this_thread::get_id() << ": clock " << clock
74  << ", deadline " << dw_deadline;
75  return true;
76  }
77  return false;
78 }
static __inline__ uint64_t read_cycle_counter(void)
#define LOG(tag)
Definition: Logger.h:182
uint64_t dynamic_watchdog_init(unsigned ms_budget)
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ get_columnar_group_bin_offset()

ALWAYS_INLINE DEVICE uint32_t get_columnar_group_bin_offset ( int64_t *  key_base_ptr,
const int64_t  key,
const int64_t  min_key,
const int64_t  bucket 
)

Definition at line 231 of file GroupByRuntime.cpp.

References EMPTY_KEY_64.

234  {
235  int64_t off = key - min_key;
236  if (bucket) {
237  off /= bucket;
238  }
239  if (key_base_ptr[off] == EMPTY_KEY_64) {
240  key_base_ptr[off] = key;
241  }
242  return off;
243 }
#define EMPTY_KEY_64

◆ get_columnar_scan_output_offset()

ALWAYS_INLINE DEVICE int32_t get_columnar_scan_output_offset ( int64_t *  output_buffer,
const uint32_t  output_buffer_entry_count,
const uint32_t  pos,
const int64_t  offset_in_fragment 
)

Definition at line 260 of file GroupByRuntime.cpp.

References ALWAYS_INLINE, and DEVICE.

263  {
264  if (pos < output_buffer_entry_count) {
265  output_buffer[pos] = offset_in_fragment;
266  return pos;
267  }
268  return -1;
269 }

◆ get_group_value()

NEVER_INLINE DEVICE int64_t* get_group_value ( int64_t *  groups_buffer,
const uint32_t  groups_buffer_entry_count,
const int64_t *  key,
const uint32_t  key_count,
const uint32_t  key_width,
const uint32_t  row_size_quad,
const int64_t *  init_vals 
)

Definition at line 26 of file GroupByRuntime.cpp.

References DEVICE, dynamic_watchdog(), get_matching_group_value(), groups_buffer_entry_count, key_hash(), and NEVER_INLINE.

Referenced by fill_storage_buffer_baseline_rowwise(), anonymous_namespace{ResultSetBaselineRadixSortTest.cpp}::fill_storage_buffer_baseline_sort_fp(), anonymous_namespace{ResultSetBaselineRadixSortTest.cpp}::fill_storage_buffer_baseline_sort_int(), ResultSetStorage::moveOneEntryToBuffer(), anonymous_namespace{ResultSetTest.cpp}::ResultSetEmulator::rse_fill_storage_buffer_baseline_rowwise(), and TEST().

33  {
34  uint32_t h = key_hash(key, key_count, key_width) % groups_buffer_entry_count;
35  int64_t* matching_group = get_matching_group_value(
36  groups_buffer, h, key, key_count, key_width, row_size_quad, init_vals);
37  if (matching_group) {
38  return matching_group;
39  }
40  uint32_t h_probe = (h + 1) % groups_buffer_entry_count;
41  while (h_probe != h) {
42  matching_group = get_matching_group_value(
43  groups_buffer, h_probe, key, key_count, key_width, row_size_quad, init_vals);
44  if (matching_group) {
45  return matching_group;
46  }
47  h_probe = (h_probe + 1) % groups_buffer_entry_count;
48  }
49  return NULL;
50 }
ALWAYS_INLINE DEVICE uint32_t key_hash(const int64_t *key, const uint32_t key_count, const uint32_t key_byte_width)
const int64_t const uint32_t groups_buffer_entry_count
ALWAYS_INLINE int64_t * get_matching_group_value(int64_t *groups_buffer, const uint32_t h, const T *key, const uint32_t key_count, const uint32_t row_size_quad)
const int64_t * init_vals
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ get_group_value_columnar()

NEVER_INLINE DEVICE int64_t* get_group_value_columnar ( int64_t *  groups_buffer,
const uint32_t  groups_buffer_entry_count,
const int64_t *  key,
const uint32_t  key_qw_count 
)

Definition at line 142 of file GroupByRuntime.cpp.

References get_matching_group_value_columnar(), groups_buffer_entry_count, and key_hash().

Referenced by fill_storage_buffer_baseline_colwise(), ResultSetStorage::moveOneEntryToBuffer(), and anonymous_namespace{ResultSetTest.cpp}::ResultSetEmulator::rse_fill_storage_buffer_baseline_colwise().

146  {
147  uint32_t h = key_hash(key, key_qw_count, sizeof(int64_t)) % groups_buffer_entry_count;
148  int64_t* matching_group = get_matching_group_value_columnar(
149  groups_buffer, h, key, key_qw_count, groups_buffer_entry_count);
150  if (matching_group) {
151  return matching_group;
152  }
153  uint32_t h_probe = (h + 1) % groups_buffer_entry_count;
154  while (h_probe != h) {
155  matching_group = get_matching_group_value_columnar(
156  groups_buffer, h_probe, key, key_qw_count, groups_buffer_entry_count);
157  if (matching_group) {
158  return matching_group;
159  }
160  h_probe = (h_probe + 1) % groups_buffer_entry_count;
161  }
162  return NULL;
163 }
ALWAYS_INLINE DEVICE uint32_t key_hash(const int64_t *key, const uint32_t key_count, const uint32_t key_byte_width)
const int64_t const uint32_t groups_buffer_entry_count
const int64_t const uint32_t const uint32_t key_qw_count
ALWAYS_INLINE int64_t * get_matching_group_value_columnar(int64_t *groups_buffer, const uint32_t h, const int64_t *key, const uint32_t key_qw_count, const size_t entry_count)
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ get_group_value_columnar_slot()

NEVER_INLINE DEVICE int32_t get_group_value_columnar_slot ( int64_t *  groups_buffer,
const uint32_t  groups_buffer_entry_count,
const int64_t *  key,
const uint32_t  key_count,
const uint32_t  key_width 
)

Definition at line 88 of file GroupByRuntime.cpp.

References DEVICE, get_matching_group_value_columnar_slot(), groups_buffer_entry_count, key_hash(), and NEVER_INLINE.

92  {
93  uint32_t h = key_hash(key, key_count, key_width) % groups_buffer_entry_count;
94  int32_t matching_slot = get_matching_group_value_columnar_slot(
95  groups_buffer, groups_buffer_entry_count, h, key, key_count, key_width);
96  if (matching_slot != -1) {
97  return h;
98  }
99  uint32_t h_probe = (h + 1) % groups_buffer_entry_count;
100  while (h_probe != h) {
102  groups_buffer, groups_buffer_entry_count, h_probe, key, key_count, key_width);
103  if (matching_slot != -1) {
104  return h_probe;
105  }
106  h_probe = (h_probe + 1) % groups_buffer_entry_count;
107  }
108  return -1;
109 }
ALWAYS_INLINE int32_t get_matching_group_value_columnar_slot(int64_t *groups_buffer, const uint32_t entry_count, const uint32_t h, const T *key, const uint32_t key_count)
ALWAYS_INLINE DEVICE uint32_t key_hash(const int64_t *key, const uint32_t key_count, const uint32_t key_byte_width)
const int64_t const uint32_t groups_buffer_entry_count
+ Here is the call graph for this function:

◆ get_group_value_columnar_slot_with_watchdog()

NEVER_INLINE DEVICE int32_t get_group_value_columnar_slot_with_watchdog ( int64_t *  groups_buffer,
const uint32_t  groups_buffer_entry_count,
const int64_t *  key,
const uint32_t  key_count,
const uint32_t  key_width 
)

Definition at line 112 of file GroupByRuntime.cpp.

References dynamic_watchdog(), get_matching_group_value_columnar_slot(), groups_buffer_entry_count, and key_hash().

116  {
117  uint32_t h = key_hash(key, key_count, key_width) % groups_buffer_entry_count;
118  int32_t matching_slot = get_matching_group_value_columnar_slot(
119  groups_buffer, groups_buffer_entry_count, h, key, key_count, key_width);
120  if (matching_slot != -1) {
121  return h;
122  }
123  uint32_t watchdog_countdown = 100;
124  uint32_t h_probe = (h + 1) % groups_buffer_entry_count;
125  while (h_probe != h) {
127  groups_buffer, groups_buffer_entry_count, h_probe, key, key_count, key_width);
128  if (matching_slot != -1) {
129  return h_probe;
130  }
131  h_probe = (h_probe + 1) % groups_buffer_entry_count;
132  if (--watchdog_countdown == 0) {
133  if (dynamic_watchdog()) {
134  return -1;
135  }
136  watchdog_countdown = 100;
137  }
138  }
139  return -1;
140 }
ALWAYS_INLINE int32_t get_matching_group_value_columnar_slot(int64_t *groups_buffer, const uint32_t entry_count, const uint32_t h, const T *key, const uint32_t key_count)
ALWAYS_INLINE DEVICE uint32_t key_hash(const int64_t *key, const uint32_t key_count, const uint32_t key_byte_width)
const int64_t const uint32_t groups_buffer_entry_count
NEVER_INLINE DEVICE bool dynamic_watchdog()
+ Here is the call graph for this function:

◆ get_group_value_columnar_with_watchdog()

NEVER_INLINE DEVICE int64_t* get_group_value_columnar_with_watchdog ( int64_t *  groups_buffer,
const uint32_t  groups_buffer_entry_count,
const int64_t *  key,
const uint32_t  key_qw_count 
)

Definition at line 165 of file GroupByRuntime.cpp.

References dynamic_watchdog(), get_matching_group_value_columnar(), groups_buffer_entry_count, and key_hash().

169  {
170  uint32_t h = key_hash(key, key_qw_count, sizeof(int64_t)) % groups_buffer_entry_count;
171  int64_t* matching_group = get_matching_group_value_columnar(
172  groups_buffer, h, key, key_qw_count, groups_buffer_entry_count);
173  if (matching_group) {
174  return matching_group;
175  }
176  uint32_t watchdog_countdown = 100;
177  uint32_t h_probe = (h + 1) % groups_buffer_entry_count;
178  while (h_probe != h) {
179  matching_group = get_matching_group_value_columnar(
180  groups_buffer, h_probe, key, key_qw_count, groups_buffer_entry_count);
181  if (matching_group) {
182  return matching_group;
183  }
184  h_probe = (h_probe + 1) % groups_buffer_entry_count;
185  if (--watchdog_countdown == 0) {
186  if (dynamic_watchdog()) {
187  return NULL;
188  }
189  watchdog_countdown = 100;
190  }
191  }
192  return NULL;
193 }
ALWAYS_INLINE DEVICE uint32_t key_hash(const int64_t *key, const uint32_t key_count, const uint32_t key_byte_width)
const int64_t const uint32_t groups_buffer_entry_count
const int64_t const uint32_t const uint32_t key_qw_count
NEVER_INLINE DEVICE bool dynamic_watchdog()
ALWAYS_INLINE int64_t * get_matching_group_value_columnar(int64_t *groups_buffer, const uint32_t h, const int64_t *key, const uint32_t key_qw_count, const size_t entry_count)
+ Here is the call graph for this function:

◆ get_group_value_fast()

ALWAYS_INLINE DEVICE int64_t* get_group_value_fast ( int64_t *  groups_buffer,
const int64_t  key,
const int64_t  min_key,
const int64_t  bucket,
const uint32_t  row_size_quad 
)

Definition at line 195 of file GroupByRuntime.cpp.

References EMPTY_KEY_64.

200  {
201  int64_t key_diff = key - min_key;
202  if (bucket) {
203  key_diff /= bucket;
204  }
205  int64_t off = key_diff * row_size_quad;
206  if (groups_buffer[off] == EMPTY_KEY_64) {
207  groups_buffer[off] = key;
208  }
209  return groups_buffer + off + 1;
210 }
#define EMPTY_KEY_64

◆ get_group_value_fast_with_original_key()

ALWAYS_INLINE DEVICE int64_t* get_group_value_fast_with_original_key ( int64_t *  groups_buffer,
const int64_t  key,
const int64_t  orig_key,
const int64_t  min_key,
const int64_t  bucket,
const uint32_t  row_size_quad 
)

Definition at line 212 of file GroupByRuntime.cpp.

References ALWAYS_INLINE, DEVICE, and EMPTY_KEY_64.

218  {
219  int64_t key_diff = key - min_key;
220  if (bucket) {
221  key_diff /= bucket;
222  }
223  int64_t off = key_diff * row_size_quad;
224  if (groups_buffer[off] == EMPTY_KEY_64) {
225  groups_buffer[off] = orig_key;
226  }
227  return groups_buffer + off + 1;
228 }
#define EMPTY_KEY_64

◆ get_group_value_with_watchdog()

NEVER_INLINE DEVICE int64_t* get_group_value_with_watchdog ( int64_t *  groups_buffer,
const uint32_t  groups_buffer_entry_count,
const int64_t *  key,
const uint32_t  key_count,
const uint32_t  key_width,
const uint32_t  row_size_quad,
const int64_t *  init_vals 
)

Definition at line 54 of file GroupByRuntime.cpp.

References DEVICE, dynamic_watchdog(), get_matching_group_value(), groups_buffer_entry_count, key_hash(), and NEVER_INLINE.

61  {
62  uint32_t h = key_hash(key, key_count, key_width) % groups_buffer_entry_count;
63  int64_t* matching_group = get_matching_group_value(
64  groups_buffer, h, key, key_count, key_width, row_size_quad, init_vals);
65  if (matching_group) {
66  return matching_group;
67  }
68  uint32_t watchdog_countdown = 100;
69  uint32_t h_probe = (h + 1) % groups_buffer_entry_count;
70  while (h_probe != h) {
71  matching_group = get_matching_group_value(
72  groups_buffer, h_probe, key, key_count, key_width, row_size_quad, init_vals);
73  if (matching_group) {
74  return matching_group;
75  }
76  h_probe = (h_probe + 1) % groups_buffer_entry_count;
77  if (--watchdog_countdown == 0) {
78  if (dynamic_watchdog()) {
79  return NULL;
80  }
81  watchdog_countdown = 100;
82  }
83  }
84  return NULL;
85 }
ALWAYS_INLINE DEVICE uint32_t key_hash(const int64_t *key, const uint32_t key_count, const uint32_t key_byte_width)
const int64_t const uint32_t groups_buffer_entry_count
ALWAYS_INLINE int64_t * get_matching_group_value(int64_t *groups_buffer, const uint32_t h, const T *key, const uint32_t key_count, const uint32_t row_size_quad)
NEVER_INLINE DEVICE bool dynamic_watchdog()
const int64_t * init_vals
+ Here is the call graph for this function:

◆ get_scan_output_slot()

ALWAYS_INLINE DEVICE int64_t* get_scan_output_slot ( int64_t *  output_buffer,
const uint32_t  output_buffer_entry_count,
const uint32_t  pos,
const int64_t  offset_in_fragment,
const uint32_t  row_size_quad 
)

Definition at line 245 of file GroupByRuntime.cpp.

References ALWAYS_INLINE, and DEVICE.

250  {
251  uint64_t off = static_cast<uint64_t>(pos) * static_cast<uint64_t>(row_size_quad);
252  if (pos < output_buffer_entry_count) {
253  output_buffer[off] = offset_in_fragment;
254  return output_buffer + off + 1;
255  }
256  return NULL;
257 }

◆ hash_join_idx()

ALWAYS_INLINE DEVICE int64_t hash_join_idx ( int64_t  hash_buff,
const int64_t  key,
const int64_t  min_key,
const int64_t  max_key 
)

Definition at line 284 of file GroupByRuntime.cpp.

References ALWAYS_INLINE, DEVICE, get_hash_slot(), and SUFFIX.

Referenced by hash_join_idx_bitwise(), and hash_join_idx_nullable().

287  {
288  if (key >= min_key && key <= max_key) {
289  return *SUFFIX(get_hash_slot)(reinterpret_cast<int32_t*>(hash_buff), key, min_key);
290  }
291  return -1;
292 }
#define SUFFIX(name)
ALWAYS_INLINE DEVICE int32_t *SUFFIX() get_hash_slot(int32_t *buff, const int64_t key, const int64_t min_key)
Definition: JoinHashImpl.h:39
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ hash_join_idx_bitwise()

ALWAYS_INLINE DEVICE int64_t hash_join_idx_bitwise ( int64_t  hash_buff,
const int64_t  key,
const int64_t  min_key,
const int64_t  max_key,
const int64_t  null_val,
const int64_t  translated_val 
)

Definition at line 332 of file GroupByRuntime.cpp.

References ALWAYS_INLINE, DEVICE, and hash_join_idx().

337  {
338  return key != null_val
339  ? hash_join_idx(hash_buff, key, min_key, max_key)
340  : hash_join_idx(hash_buff, translated_val, min_key, translated_val);
341 }
ALWAYS_INLINE DEVICE int64_t hash_join_idx(int64_t hash_buff, const int64_t key, const int64_t min_key, const int64_t max_key)
+ Here is the call graph for this function:

◆ hash_join_idx_bitwise_sharded()

ALWAYS_INLINE DEVICE int64_t hash_join_idx_bitwise_sharded ( int64_t  hash_buff,
const int64_t  key,
const int64_t  min_key,
const int64_t  max_key,
const uint32_t  entry_count_per_shard,
const uint32_t  num_shards,
const uint32_t  device_count,
const int64_t  null_val,
const int64_t  translated_val 
)

Definition at line 382 of file GroupByRuntime.cpp.

References hash_join_idx_sharded().

390  {
391  return key != null_val ? hash_join_idx_sharded(hash_buff,
392  key,
393  min_key,
394  max_key,
395  entry_count_per_shard,
396  num_shards,
397  device_count)
398  : hash_join_idx_sharded(hash_buff,
399  translated_val,
400  min_key,
401  translated_val,
402  entry_count_per_shard,
403  num_shards,
404  device_count);
405 }
ALWAYS_INLINE DEVICE int64_t hash_join_idx_sharded(int64_t hash_buff, const int64_t key, const int64_t min_key, const int64_t max_key, const uint32_t entry_count_per_shard, const uint32_t num_shards, const uint32_t device_count)
+ Here is the call graph for this function:

◆ hash_join_idx_nullable()

ALWAYS_INLINE DEVICE int64_t hash_join_idx_nullable ( int64_t  hash_buff,
const int64_t  key,
const int64_t  min_key,
const int64_t  max_key,
const int64_t  null_val 
)

Definition at line 306 of file GroupByRuntime.cpp.

References ALWAYS_INLINE, DEVICE, and hash_join_idx().

310  {
311  return key != null_val ? hash_join_idx(hash_buff, key, min_key, max_key) : -1;
312 }
ALWAYS_INLINE DEVICE int64_t hash_join_idx(int64_t hash_buff, const int64_t key, const int64_t min_key, const int64_t max_key)
+ Here is the call graph for this function:

◆ hash_join_idx_sharded()

ALWAYS_INLINE DEVICE int64_t hash_join_idx_sharded ( int64_t  hash_buff,
const int64_t  key,
const int64_t  min_key,
const int64_t  max_key,
const uint32_t  entry_count_per_shard,
const uint32_t  num_shards,
const uint32_t  device_count 
)

Definition at line 344 of file GroupByRuntime.cpp.

References ALWAYS_INLINE, DEVICE, get_hash_slot_sharded(), and SUFFIX.

Referenced by hash_join_idx_bitwise_sharded(), and hash_join_idx_sharded_nullable().

350  {
351  if (key >= min_key && key <= max_key) {
352  return *SUFFIX(get_hash_slot_sharded)(reinterpret_cast<int32_t*>(hash_buff),
353  key,
354  min_key,
355  entry_count_per_shard,
356  num_shards,
357  device_count);
358  }
359  return -1;
360 }
#define SUFFIX(name)
ALWAYS_INLINE DEVICE int32_t *SUFFIX() get_hash_slot_sharded(int32_t *buff, const int64_t key, const int64_t min_key, const uint32_t entry_count_per_shard, const uint32_t num_shards, const uint32_t device_count)
Definition: JoinHashImpl.h:60
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ hash_join_idx_sharded_nullable()

ALWAYS_INLINE DEVICE int64_t hash_join_idx_sharded_nullable ( int64_t  hash_buff,
const int64_t  key,
const int64_t  min_key,
const int64_t  max_key,
const uint32_t  entry_count_per_shard,
const uint32_t  num_shards,
const uint32_t  device_count,
const int64_t  null_val 
)

Definition at line 363 of file GroupByRuntime.cpp.

References ALWAYS_INLINE, DEVICE, and hash_join_idx_sharded().

370  {
371  return key != null_val ? hash_join_idx_sharded(hash_buff,
372  key,
373  min_key,
374  max_key,
375  entry_count_per_shard,
376  num_shards,
377  device_count)
378  : -1;
379 }
ALWAYS_INLINE DEVICE int64_t hash_join_idx_sharded(int64_t hash_buff, const int64_t key, const int64_t min_key, const int64_t max_key, const uint32_t entry_count_per_shard, const uint32_t num_shards, const uint32_t device_count)
+ Here is the call graph for this function:

◆ key_hash()

ALWAYS_INLINE DEVICE uint32_t key_hash ( const int64_t *  key,
const uint32_t  key_count,
const uint32_t  key_byte_width 
)

Definition at line 20 of file GroupByRuntime.cpp.

References MurmurHash1().

Referenced by get_group_value(), get_group_value_columnar(), anonymous_namespace{ResultSetReduction.cpp}::get_group_value_columnar_reduction(), get_group_value_columnar_slot(), get_group_value_columnar_slot_with_watchdog(), get_group_value_columnar_with_watchdog(), get_group_value_reduction(), and get_group_value_with_watchdog().

22  {
23  return MurmurHash1(key, key_byte_width * key_count, 0);
24 }
NEVER_INLINE DEVICE uint32_t MurmurHash1(const void *key, int len, const uint32_t seed)
Definition: MurmurHash.cpp:20
+ Here is the call graph for this function:
+ Here is the caller graph for this function: