OmniSciDB  bf83d84833
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
GroupByRuntime.cpp File Reference
+ Include dependency graph for GroupByRuntime.cpp:
+ This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Macros

#define DEF_TRANSLATE_NULL_KEY(key_type)
 

Functions

ALWAYS_INLINE DEVICE uint32_t key_hash (const int64_t *key, const uint32_t key_count, const uint32_t key_byte_width)
 
NEVER_INLINE DEVICE int64_t * get_group_value (int64_t *groups_buffer, const uint32_t groups_buffer_entry_count, const int64_t *key, const uint32_t key_count, const uint32_t key_width, const uint32_t row_size_quad)
 
NEVER_INLINE DEVICE bool dynamic_watchdog ()
 
NEVER_INLINE DEVICE int64_t * get_group_value_with_watchdog (int64_t *groups_buffer, const uint32_t groups_buffer_entry_count, const int64_t *key, const uint32_t key_count, const uint32_t key_width, const uint32_t row_size_quad)
 
NEVER_INLINE DEVICE int32_t get_group_value_columnar_slot (int64_t *groups_buffer, const uint32_t groups_buffer_entry_count, const int64_t *key, const uint32_t key_count, const uint32_t key_width)
 
NEVER_INLINE DEVICE int32_t get_group_value_columnar_slot_with_watchdog (int64_t *groups_buffer, const uint32_t groups_buffer_entry_count, const int64_t *key, const uint32_t key_count, const uint32_t key_width)
 
NEVER_INLINE DEVICE int64_t * get_group_value_columnar (int64_t *groups_buffer, const uint32_t groups_buffer_entry_count, const int64_t *key, const uint32_t key_qw_count)
 
NEVER_INLINE DEVICE int64_t * get_group_value_columnar_with_watchdog (int64_t *groups_buffer, const uint32_t groups_buffer_entry_count, const int64_t *key, const uint32_t key_qw_count)
 
ALWAYS_INLINE DEVICE int64_t * get_group_value_fast (int64_t *groups_buffer, const int64_t key, const int64_t min_key, const int64_t bucket, const uint32_t row_size_quad)
 
ALWAYS_INLINE DEVICE int64_t * get_group_value_fast_with_original_key (int64_t *groups_buffer, const int64_t key, const int64_t orig_key, const int64_t min_key, const int64_t bucket, const uint32_t row_size_quad)
 
ALWAYS_INLINE DEVICE uint32_t get_columnar_group_bin_offset (int64_t *key_base_ptr, const int64_t key, const int64_t min_key, const int64_t bucket)
 
ALWAYS_INLINE DEVICE int64_t * get_scan_output_slot (int64_t *output_buffer, const uint32_t output_buffer_entry_count, const uint32_t pos, const int64_t offset_in_fragment, const uint32_t row_size_quad)
 
ALWAYS_INLINE DEVICE int32_t get_columnar_scan_output_offset (int64_t *output_buffer, const uint32_t output_buffer_entry_count, const uint32_t pos, const int64_t offset_in_fragment)
 
ALWAYS_INLINE DEVICE int64_t bucketized_hash_join_idx (int64_t hash_buff, int64_t const key, int64_t const min_key, int64_t const max_key, int64_t bucket_normalization)
 
ALWAYS_INLINE DEVICE int64_t hash_join_idx (int64_t hash_buff, const int64_t key, const int64_t min_key, const int64_t max_key)
 
ALWAYS_INLINE DEVICE int64_t bucketized_hash_join_idx_nullable (int64_t hash_buff, const int64_t key, const int64_t min_key, const int64_t max_key, const int64_t null_val, const int64_t bucket_normalization)
 
ALWAYS_INLINE DEVICE int64_t hash_join_idx_nullable (int64_t hash_buff, const int64_t key, const int64_t min_key, const int64_t max_key, const int64_t null_val)
 
ALWAYS_INLINE DEVICE int64_t bucketized_hash_join_idx_bitwise (int64_t hash_buff, const int64_t key, const int64_t min_key, const int64_t max_key, const int64_t null_val, const int64_t translated_val, const int64_t bucket_normalization)
 
ALWAYS_INLINE DEVICE int64_t hash_join_idx_bitwise (int64_t hash_buff, const int64_t key, const int64_t min_key, const int64_t max_key, const int64_t null_val, const int64_t translated_val)
 
ALWAYS_INLINE DEVICE int64_t hash_join_idx_sharded (int64_t hash_buff, const int64_t key, const int64_t min_key, const int64_t max_key, const uint32_t entry_count_per_shard, const uint32_t num_shards, const uint32_t device_count)
 
ALWAYS_INLINE DEVICE int64_t hash_join_idx_sharded_nullable (int64_t hash_buff, const int64_t key, const int64_t min_key, const int64_t max_key, const uint32_t entry_count_per_shard, const uint32_t num_shards, const uint32_t device_count, const int64_t null_val)
 
ALWAYS_INLINE DEVICE int64_t hash_join_idx_bitwise_sharded (int64_t hash_buff, const int64_t key, const int64_t min_key, const int64_t max_key, const uint32_t entry_count_per_shard, const uint32_t num_shards, const uint32_t device_count, const int64_t null_val, const int64_t translated_val)
 

Macro Definition Documentation

#define DEF_TRANSLATE_NULL_KEY (   key_type)
Value:
extern "C" NEVER_INLINE DEVICE int64_t translate_null_key_##key_type( \
const key_type key, const key_type null_val, const int64_t translated_val) { \
if (key == null_val) { \
return translated_val; \
} \
return key; \
}
#define DEVICE
#define NEVER_INLINE

Definition at line 405 of file GroupByRuntime.cpp.

Function Documentation

ALWAYS_INLINE DEVICE int64_t bucketized_hash_join_idx ( int64_t  hash_buff,
int64_t const  key,
int64_t const  min_key,
int64_t const  max_key,
int64_t  bucket_normalization 
)

Definition at line 270 of file GroupByRuntime.cpp.

References get_bucketized_hash_slot(), and SUFFIX.

Referenced by bucketized_hash_join_idx_bitwise(), and bucketized_hash_join_idx_nullable().

274  {
275  if (key >= min_key && key <= max_key) {
277  reinterpret_cast<int32_t*>(hash_buff), key, min_key, bucket_normalization);
278  }
279  return -1;
280 }
ALWAYS_INLINE DEVICE int32_t *SUFFIX() get_bucketized_hash_slot(int32_t *buff, const int64_t key, const int64_t min_key, const int64_t bucket_normalization)
Definition: JoinHashImpl.h:31
#define SUFFIX(name)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

ALWAYS_INLINE DEVICE int64_t bucketized_hash_join_idx_bitwise ( int64_t  hash_buff,
const int64_t  key,
const int64_t  min_key,
const int64_t  max_key,
const int64_t  null_val,
const int64_t  translated_val,
const int64_t  bucket_normalization 
)

Definition at line 313 of file GroupByRuntime.cpp.

References bucketized_hash_join_idx().

319  {
320  return key != null_val ? bucketized_hash_join_idx(
321  hash_buff, key, min_key, max_key, bucket_normalization)
322  : bucketized_hash_join_idx(hash_buff,
323  translated_val,
324  min_key,
325  translated_val,
326  bucket_normalization);
327 }
ALWAYS_INLINE DEVICE int64_t bucketized_hash_join_idx(int64_t hash_buff, int64_t const key, int64_t const min_key, int64_t const max_key, int64_t bucket_normalization)

+ Here is the call graph for this function:

ALWAYS_INLINE DEVICE int64_t bucketized_hash_join_idx_nullable ( int64_t  hash_buff,
const int64_t  key,
const int64_t  min_key,
const int64_t  max_key,
const int64_t  null_val,
const int64_t  bucket_normalization 
)

Definition at line 293 of file GroupByRuntime.cpp.

References bucketized_hash_join_idx().

298  {
299  return key != null_val ? bucketized_hash_join_idx(
300  hash_buff, key, min_key, max_key, bucket_normalization)
301  : -1;
302 }
ALWAYS_INLINE DEVICE int64_t bucketized_hash_join_idx(int64_t hash_buff, int64_t const key, int64_t const min_key, int64_t const max_key, int64_t bucket_normalization)

+ Here is the call graph for this function:

NEVER_INLINE DEVICE bool dynamic_watchdog ( )

Definition at line 116 of file cuda_mapd_rt.cu.

References dw_abort, dw_cycle_budget, DW_DEADLINE, dw_sm_cycle_start, dynamic_watchdog_init(), get_smid(), logger::INFO, LOG, and read_cycle_counter().

Referenced by anonymous_namespace{ResultSetReduction.cpp}::check_watchdog(), check_watchdog_rt(), get_group_value_columnar_slot_with_watchdog(), get_group_value_columnar_with_watchdog(), and get_group_value_with_watchdog().

116  {
117  // check for dynamic watchdog, if triggered all threads return true
118  if (dw_cycle_budget == 0LL) {
119  return false; // Uninitialized watchdog can't check time
120  }
121  if (dw_abort == 1) {
122  return true; // Received host request to abort
123  }
124  uint32_t smid = get_smid();
125  if (smid >= 128) {
126  return false;
127  }
128  __shared__ volatile int64_t dw_block_cycle_start; // Thread block shared cycle start
129  __shared__ volatile bool
130  dw_should_terminate; // all threads within a block should return together if
131  // watchdog criteria is met
132 
133  // thread 0 either initializes or read the initial clock cycle, the result is stored
134  // into shared memory. Since all threads wihtin a block shares the same SM, there's no
135  // point in using more threads here.
136  if (threadIdx.x == 0) {
137  dw_block_cycle_start = 0LL;
138  int64_t cycle_count = static_cast<int64_t>(clock64());
139  // Make sure the block hasn't switched SMs
140  if (smid == get_smid()) {
141  dw_block_cycle_start = static_cast<int64_t>(
142  atomicCAS(reinterpret_cast<unsigned long long*>(&dw_sm_cycle_start[smid]),
143  0ULL,
144  static_cast<unsigned long long>(cycle_count)));
145  }
146 
147  int64_t cycles = cycle_count - dw_block_cycle_start;
148  if ((smid == get_smid()) && (dw_block_cycle_start > 0LL) &&
149  (cycles > dw_cycle_budget)) {
150  // Check if we're out of time on this particular SM
151  dw_should_terminate = true;
152  } else {
153  dw_should_terminate = false;
154  }
155  }
156  __syncthreads();
157  return dw_should_terminate;
158 }
__device__ int64_t dw_sm_cycle_start[128]
Definition: cuda_mapd_rt.cu:92
__device__ int64_t dw_cycle_budget
Definition: cuda_mapd_rt.cu:94
__inline__ __device__ uint32_t get_smid(void)
Definition: cuda_mapd_rt.cu:98
__device__ int32_t dw_abort
Definition: cuda_mapd_rt.cu:95

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

ALWAYS_INLINE DEVICE uint32_t get_columnar_group_bin_offset ( int64_t *  key_base_ptr,
const int64_t  key,
const int64_t  min_key,
const int64_t  bucket 
)

Definition at line 229 of file GroupByRuntime.cpp.

References EMPTY_KEY_64.

232  {
233  int64_t off = key - min_key;
234  if (bucket) {
235  off /= bucket;
236  }
237  if (key_base_ptr[off] == EMPTY_KEY_64) {
238  key_base_ptr[off] = key;
239  }
240  return off;
241 }
#define EMPTY_KEY_64
ALWAYS_INLINE DEVICE int32_t get_columnar_scan_output_offset ( int64_t *  output_buffer,
const uint32_t  output_buffer_entry_count,
const uint32_t  pos,
const int64_t  offset_in_fragment 
)

Definition at line 258 of file GroupByRuntime.cpp.

261  {
262  if (pos < output_buffer_entry_count) {
263  output_buffer[pos] = offset_in_fragment;
264  return pos;
265  }
266  return -1;
267 }
NEVER_INLINE DEVICE int64_t* get_group_value ( int64_t *  groups_buffer,
const uint32_t  groups_buffer_entry_count,
const int64_t *  key,
const uint32_t  key_count,
const uint32_t  key_width,
const uint32_t  row_size_quad 
)

Definition at line 26 of file GroupByRuntime.cpp.

References get_matching_group_value(), and key_hash().

Referenced by ResultSetStorage::moveOneEntryToBuffer().

32  {
33  uint32_t h = key_hash(key, key_count, key_width) % groups_buffer_entry_count;
34  int64_t* matching_group = get_matching_group_value(
35  groups_buffer, h, key, key_count, key_width, row_size_quad);
36  if (matching_group) {
37  return matching_group;
38  }
39  uint32_t h_probe = (h + 1) % groups_buffer_entry_count;
40  while (h_probe != h) {
41  matching_group = get_matching_group_value(
42  groups_buffer, h_probe, key, key_count, key_width, row_size_quad);
43  if (matching_group) {
44  return matching_group;
45  }
46  h_probe = (h_probe + 1) % groups_buffer_entry_count;
47  }
48  return NULL;
49 }
ALWAYS_INLINE DEVICE uint32_t key_hash(const int64_t *key, const uint32_t key_count, const uint32_t key_byte_width)
__device__ int64_t * get_matching_group_value(int64_t *groups_buffer, const uint32_t h, const T *key, const uint32_t key_count, const uint32_t row_size_quad)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

NEVER_INLINE DEVICE int64_t* get_group_value_columnar ( int64_t *  groups_buffer,
const uint32_t  groups_buffer_entry_count,
const int64_t *  key,
const uint32_t  key_qw_count 
)

Definition at line 140 of file GroupByRuntime.cpp.

References get_matching_group_value_columnar(), and key_hash().

Referenced by ResultSetStorage::moveOneEntryToBuffer().

144  {
145  uint32_t h = key_hash(key, key_qw_count, sizeof(int64_t)) % groups_buffer_entry_count;
146  int64_t* matching_group = get_matching_group_value_columnar(
147  groups_buffer, h, key, key_qw_count, groups_buffer_entry_count);
148  if (matching_group) {
149  return matching_group;
150  }
151  uint32_t h_probe = (h + 1) % groups_buffer_entry_count;
152  while (h_probe != h) {
153  matching_group = get_matching_group_value_columnar(
154  groups_buffer, h_probe, key, key_qw_count, groups_buffer_entry_count);
155  if (matching_group) {
156  return matching_group;
157  }
158  h_probe = (h_probe + 1) % groups_buffer_entry_count;
159  }
160  return NULL;
161 }
__device__ int64_t * get_matching_group_value_columnar(int64_t *groups_buffer, const uint32_t h, const int64_t *key, const uint32_t key_qw_count, const size_t entry_count)
ALWAYS_INLINE DEVICE uint32_t key_hash(const int64_t *key, const uint32_t key_count, const uint32_t key_byte_width)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

NEVER_INLINE DEVICE int32_t get_group_value_columnar_slot ( int64_t *  groups_buffer,
const uint32_t  groups_buffer_entry_count,
const int64_t *  key,
const uint32_t  key_count,
const uint32_t  key_width 
)

Definition at line 86 of file GroupByRuntime.cpp.

References get_matching_group_value_columnar_slot(), and key_hash().

90  {
91  uint32_t h = key_hash(key, key_count, key_width) % groups_buffer_entry_count;
92  int32_t matching_slot = get_matching_group_value_columnar_slot(
93  groups_buffer, groups_buffer_entry_count, h, key, key_count, key_width);
94  if (matching_slot != -1) {
95  return h;
96  }
97  uint32_t h_probe = (h + 1) % groups_buffer_entry_count;
98  while (h_probe != h) {
100  groups_buffer, groups_buffer_entry_count, h_probe, key, key_count, key_width);
101  if (matching_slot != -1) {
102  return h_probe;
103  }
104  h_probe = (h_probe + 1) % groups_buffer_entry_count;
105  }
106  return -1;
107 }
ALWAYS_INLINE DEVICE uint32_t key_hash(const int64_t *key, const uint32_t key_count, const uint32_t key_byte_width)
__device__ int32_t get_matching_group_value_columnar_slot(int64_t *groups_buffer, const uint32_t entry_count, const uint32_t h, const T *key, const uint32_t key_count)

+ Here is the call graph for this function:

NEVER_INLINE DEVICE int32_t get_group_value_columnar_slot_with_watchdog ( int64_t *  groups_buffer,
const uint32_t  groups_buffer_entry_count,
const int64_t *  key,
const uint32_t  key_count,
const uint32_t  key_width 
)

Definition at line 110 of file GroupByRuntime.cpp.

References dynamic_watchdog(), get_matching_group_value_columnar_slot(), and key_hash().

114  {
115  uint32_t h = key_hash(key, key_count, key_width) % groups_buffer_entry_count;
116  int32_t matching_slot = get_matching_group_value_columnar_slot(
117  groups_buffer, groups_buffer_entry_count, h, key, key_count, key_width);
118  if (matching_slot != -1) {
119  return h;
120  }
121  uint32_t watchdog_countdown = 100;
122  uint32_t h_probe = (h + 1) % groups_buffer_entry_count;
123  while (h_probe != h) {
125  groups_buffer, groups_buffer_entry_count, h_probe, key, key_count, key_width);
126  if (matching_slot != -1) {
127  return h_probe;
128  }
129  h_probe = (h_probe + 1) % groups_buffer_entry_count;
130  if (--watchdog_countdown == 0) {
131  if (dynamic_watchdog()) {
132  return -1;
133  }
134  watchdog_countdown = 100;
135  }
136  }
137  return -1;
138 }
__device__ bool dynamic_watchdog()
ALWAYS_INLINE DEVICE uint32_t key_hash(const int64_t *key, const uint32_t key_count, const uint32_t key_byte_width)
__device__ int32_t get_matching_group_value_columnar_slot(int64_t *groups_buffer, const uint32_t entry_count, const uint32_t h, const T *key, const uint32_t key_count)

+ Here is the call graph for this function:

NEVER_INLINE DEVICE int64_t* get_group_value_columnar_with_watchdog ( int64_t *  groups_buffer,
const uint32_t  groups_buffer_entry_count,
const int64_t *  key,
const uint32_t  key_qw_count 
)

Definition at line 163 of file GroupByRuntime.cpp.

References dynamic_watchdog(), get_matching_group_value_columnar(), and key_hash().

167  {
168  uint32_t h = key_hash(key, key_qw_count, sizeof(int64_t)) % groups_buffer_entry_count;
169  int64_t* matching_group = get_matching_group_value_columnar(
170  groups_buffer, h, key, key_qw_count, groups_buffer_entry_count);
171  if (matching_group) {
172  return matching_group;
173  }
174  uint32_t watchdog_countdown = 100;
175  uint32_t h_probe = (h + 1) % groups_buffer_entry_count;
176  while (h_probe != h) {
177  matching_group = get_matching_group_value_columnar(
178  groups_buffer, h_probe, key, key_qw_count, groups_buffer_entry_count);
179  if (matching_group) {
180  return matching_group;
181  }
182  h_probe = (h_probe + 1) % groups_buffer_entry_count;
183  if (--watchdog_countdown == 0) {
184  if (dynamic_watchdog()) {
185  return NULL;
186  }
187  watchdog_countdown = 100;
188  }
189  }
190  return NULL;
191 }
__device__ bool dynamic_watchdog()
__device__ int64_t * get_matching_group_value_columnar(int64_t *groups_buffer, const uint32_t h, const int64_t *key, const uint32_t key_qw_count, const size_t entry_count)
ALWAYS_INLINE DEVICE uint32_t key_hash(const int64_t *key, const uint32_t key_count, const uint32_t key_byte_width)

+ Here is the call graph for this function:

ALWAYS_INLINE DEVICE int64_t* get_group_value_fast ( int64_t *  groups_buffer,
const int64_t  key,
const int64_t  min_key,
const int64_t  bucket,
const uint32_t  row_size_quad 
)

Definition at line 193 of file GroupByRuntime.cpp.

References EMPTY_KEY_64.

198  {
199  int64_t key_diff = key - min_key;
200  if (bucket) {
201  key_diff /= bucket;
202  }
203  int64_t off = key_diff * row_size_quad;
204  if (groups_buffer[off] == EMPTY_KEY_64) {
205  groups_buffer[off] = key;
206  }
207  return groups_buffer + off + 1;
208 }
#define EMPTY_KEY_64
ALWAYS_INLINE DEVICE int64_t* get_group_value_fast_with_original_key ( int64_t *  groups_buffer,
const int64_t  key,
const int64_t  orig_key,
const int64_t  min_key,
const int64_t  bucket,
const uint32_t  row_size_quad 
)

Definition at line 210 of file GroupByRuntime.cpp.

References EMPTY_KEY_64.

216  {
217  int64_t key_diff = key - min_key;
218  if (bucket) {
219  key_diff /= bucket;
220  }
221  int64_t off = key_diff * row_size_quad;
222  if (groups_buffer[off] == EMPTY_KEY_64) {
223  groups_buffer[off] = orig_key;
224  }
225  return groups_buffer + off + 1;
226 }
#define EMPTY_KEY_64
NEVER_INLINE DEVICE int64_t* get_group_value_with_watchdog ( int64_t *  groups_buffer,
const uint32_t  groups_buffer_entry_count,
const int64_t *  key,
const uint32_t  key_count,
const uint32_t  key_width,
const uint32_t  row_size_quad 
)

Definition at line 53 of file GroupByRuntime.cpp.

References dynamic_watchdog(), get_matching_group_value(), and key_hash().

59  {
60  uint32_t h = key_hash(key, key_count, key_width) % groups_buffer_entry_count;
61  int64_t* matching_group = get_matching_group_value(
62  groups_buffer, h, key, key_count, key_width, row_size_quad);
63  if (matching_group) {
64  return matching_group;
65  }
66  uint32_t watchdog_countdown = 100;
67  uint32_t h_probe = (h + 1) % groups_buffer_entry_count;
68  while (h_probe != h) {
69  matching_group = get_matching_group_value(
70  groups_buffer, h_probe, key, key_count, key_width, row_size_quad);
71  if (matching_group) {
72  return matching_group;
73  }
74  h_probe = (h_probe + 1) % groups_buffer_entry_count;
75  if (--watchdog_countdown == 0) {
76  if (dynamic_watchdog()) {
77  return NULL;
78  }
79  watchdog_countdown = 100;
80  }
81  }
82  return NULL;
83 }
__device__ bool dynamic_watchdog()
ALWAYS_INLINE DEVICE uint32_t key_hash(const int64_t *key, const uint32_t key_count, const uint32_t key_byte_width)
__device__ int64_t * get_matching_group_value(int64_t *groups_buffer, const uint32_t h, const T *key, const uint32_t key_count, const uint32_t row_size_quad)

+ Here is the call graph for this function:

ALWAYS_INLINE DEVICE int64_t* get_scan_output_slot ( int64_t *  output_buffer,
const uint32_t  output_buffer_entry_count,
const uint32_t  pos,
const int64_t  offset_in_fragment,
const uint32_t  row_size_quad 
)

Definition at line 243 of file GroupByRuntime.cpp.

Referenced by SqliteMemDatabase::runSelect().

248  {
249  uint64_t off = static_cast<uint64_t>(pos) * static_cast<uint64_t>(row_size_quad);
250  if (pos < output_buffer_entry_count) {
251  output_buffer[off] = offset_in_fragment;
252  return output_buffer + off + 1;
253  }
254  return NULL;
255 }

+ Here is the caller graph for this function:

ALWAYS_INLINE DEVICE int64_t hash_join_idx ( int64_t  hash_buff,
const int64_t  key,
const int64_t  min_key,
const int64_t  max_key 
)

Definition at line 282 of file GroupByRuntime.cpp.

References get_hash_slot(), and SUFFIX.

Referenced by hash_join_idx_bitwise(), and hash_join_idx_nullable().

285  {
286  if (key >= min_key && key <= max_key) {
287  return *SUFFIX(get_hash_slot)(reinterpret_cast<int32_t*>(hash_buff), key, min_key);
288  }
289  return -1;
290 }
#define SUFFIX(name)
ALWAYS_INLINE DEVICE int32_t *SUFFIX() get_hash_slot(int32_t *buff, const int64_t key, const int64_t min_key)
Definition: JoinHashImpl.h:39

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

ALWAYS_INLINE DEVICE int64_t hash_join_idx_bitwise ( int64_t  hash_buff,
const int64_t  key,
const int64_t  min_key,
const int64_t  max_key,
const int64_t  null_val,
const int64_t  translated_val 
)

Definition at line 330 of file GroupByRuntime.cpp.

References hash_join_idx().

335  {
336  return key != null_val
337  ? hash_join_idx(hash_buff, key, min_key, max_key)
338  : hash_join_idx(hash_buff, translated_val, min_key, translated_val);
339 }
ALWAYS_INLINE DEVICE int64_t hash_join_idx(int64_t hash_buff, const int64_t key, const int64_t min_key, const int64_t max_key)

+ Here is the call graph for this function:

ALWAYS_INLINE DEVICE int64_t hash_join_idx_bitwise_sharded ( int64_t  hash_buff,
const int64_t  key,
const int64_t  min_key,
const int64_t  max_key,
const uint32_t  entry_count_per_shard,
const uint32_t  num_shards,
const uint32_t  device_count,
const int64_t  null_val,
const int64_t  translated_val 
)

Definition at line 380 of file GroupByRuntime.cpp.

References hash_join_idx_sharded().

388  {
389  return key != null_val ? hash_join_idx_sharded(hash_buff,
390  key,
391  min_key,
392  max_key,
393  entry_count_per_shard,
394  num_shards,
395  device_count)
396  : hash_join_idx_sharded(hash_buff,
397  translated_val,
398  min_key,
399  translated_val,
400  entry_count_per_shard,
401  num_shards,
402  device_count);
403 }
ALWAYS_INLINE DEVICE int64_t hash_join_idx_sharded(int64_t hash_buff, const int64_t key, const int64_t min_key, const int64_t max_key, const uint32_t entry_count_per_shard, const uint32_t num_shards, const uint32_t device_count)

+ Here is the call graph for this function:

ALWAYS_INLINE DEVICE int64_t hash_join_idx_nullable ( int64_t  hash_buff,
const int64_t  key,
const int64_t  min_key,
const int64_t  max_key,
const int64_t  null_val 
)

Definition at line 304 of file GroupByRuntime.cpp.

References hash_join_idx().

308  {
309  return key != null_val ? hash_join_idx(hash_buff, key, min_key, max_key) : -1;
310 }
ALWAYS_INLINE DEVICE int64_t hash_join_idx(int64_t hash_buff, const int64_t key, const int64_t min_key, const int64_t max_key)

+ Here is the call graph for this function:

ALWAYS_INLINE DEVICE int64_t hash_join_idx_sharded ( int64_t  hash_buff,
const int64_t  key,
const int64_t  min_key,
const int64_t  max_key,
const uint32_t  entry_count_per_shard,
const uint32_t  num_shards,
const uint32_t  device_count 
)

Definition at line 342 of file GroupByRuntime.cpp.

References get_hash_slot_sharded(), and SUFFIX.

Referenced by hash_join_idx_bitwise_sharded(), and hash_join_idx_sharded_nullable().

348  {
349  if (key >= min_key && key <= max_key) {
350  return *SUFFIX(get_hash_slot_sharded)(reinterpret_cast<int32_t*>(hash_buff),
351  key,
352  min_key,
353  entry_count_per_shard,
354  num_shards,
355  device_count);
356  }
357  return -1;
358 }
#define SUFFIX(name)
ALWAYS_INLINE DEVICE int32_t *SUFFIX() get_hash_slot_sharded(int32_t *buff, const int64_t key, const int64_t min_key, const uint32_t entry_count_per_shard, const uint32_t num_shards, const uint32_t device_count)
Definition: JoinHashImpl.h:60

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

ALWAYS_INLINE DEVICE int64_t hash_join_idx_sharded_nullable ( int64_t  hash_buff,
const int64_t  key,
const int64_t  min_key,
const int64_t  max_key,
const uint32_t  entry_count_per_shard,
const uint32_t  num_shards,
const uint32_t  device_count,
const int64_t  null_val 
)

Definition at line 361 of file GroupByRuntime.cpp.

References hash_join_idx_sharded().

368  {
369  return key != null_val ? hash_join_idx_sharded(hash_buff,
370  key,
371  min_key,
372  max_key,
373  entry_count_per_shard,
374  num_shards,
375  device_count)
376  : -1;
377 }
ALWAYS_INLINE DEVICE int64_t hash_join_idx_sharded(int64_t hash_buff, const int64_t key, const int64_t min_key, const int64_t max_key, const uint32_t entry_count_per_shard, const uint32_t num_shards, const uint32_t device_count)

+ Here is the call graph for this function:

ALWAYS_INLINE DEVICE uint32_t key_hash ( const int64_t *  key,
const uint32_t  key_count,
const uint32_t  key_byte_width 
)

Definition at line 20 of file GroupByRuntime.cpp.

References MurmurHash1().

Referenced by get_group_value(), get_group_value_columnar(), anonymous_namespace{ResultSetReduction.cpp}::get_group_value_columnar_reduction(), get_group_value_columnar_slot(), get_group_value_columnar_slot_with_watchdog(), get_group_value_columnar_with_watchdog(), result_set::get_group_value_reduction(), and get_group_value_with_watchdog().

22  {
23  return MurmurHash1(key, key_byte_width * key_count, 0);
24 }
NEVER_INLINE DEVICE uint32_t MurmurHash1(const void *key, int len, const uint32_t seed)
Definition: MurmurHash.cpp:20

+ Here is the call graph for this function:

+ Here is the caller graph for this function: