OmniSciDB  04ee39c94c
RuntimeFunctions.cpp File Reference
#include "RuntimeFunctions.h"
#include "../Shared/funcannotations.h"
#include "BufferCompaction.h"
#include "HyperLogLogRank.h"
#include "MurmurHash.h"
#include "TypePunning.h"
#include <algorithm>
#include <chrono>
#include <cmath>
#include <cstring>
#include <thread>
#include <tuple>
#include "DecodersImpl.h"
#include "GroupByRuntime.cpp"
#include "JoinHashTableQueryRuntime.cpp"
#include "TopKRuntime.cpp"
+ Include dependency graph for RuntimeFunctions.cpp:

Go to the source code of this file.

Macros

#define DEF_ARITH_NULLABLE(type, null_type, opname, opsym)
 
#define DEF_ARITH_NULLABLE_LHS(type, null_type, opname, opsym)
 
#define DEF_ARITH_NULLABLE_RHS(type, null_type, opname, opsym)
 
#define DEF_CMP_NULLABLE(type, null_type, opname, opsym)
 
#define DEF_CMP_NULLABLE_LHS(type, null_type, opname, opsym)
 
#define DEF_CMP_NULLABLE_RHS(type, null_type, opname, opsym)
 
#define DEF_SAFE_DIV_NULLABLE(type, null_type, opname)
 
#define DEF_BINARY_NULLABLE_ALL_OPS(type, null_type)
 
#define DEF_UMINUS_NULLABLE(type, null_type)
 
#define DEF_CAST_NULLABLE(from_type, to_type)
 
#define DEF_CAST_NULLABLE_BIDIR(type1, type2)
 
#define GPU_RT_STUB   NEVER_INLINE __attribute__((optnone))
 
#define DEF_AGG_MAX_INT(n)
 
#define DEF_AGG_MIN_INT(n)
 
#define DEF_AGG_ID_INT(n)
 
#define DEF_WRITE_PROJECTION_INT(n)
 
#define DEF_SKIP_AGG_ADD(base_agg_func)
 
#define DEF_SKIP_AGG(base_agg_func)
 
#define DATA_T   int64_t
 
#define DATA_T   int32_t
 
#define DATA_T   int16_t
 
#define DATA_T   int8_t
 
#define DEF_SKIP_AGG_ADD(base_agg_func)
 
#define DEF_SKIP_AGG(base_agg_func)
 
#define DATA_T   double
 
#define ADDR_T   int64_t
 
#define DATA_T   float
 
#define ADDR_T   int32_t
 
#define DEF_SHARED_AGG_RET_STUBS(base_agg_func)
 
#define DEF_SHARED_AGG_STUBS(base_agg_func)
 

Functions

ALWAYS_INLINE int64_t scale_decimal_up (const int64_t operand, const uint64_t scale, const int64_t operand_null_val, const int64_t result_null_val)
 
ALWAYS_INLINE int64_t scale_decimal_down_nullable (const int64_t operand, const int64_t scale, const int64_t null_val)
 
ALWAYS_INLINE int64_t scale_decimal_down_not_nullable (const int64_t operand, const int64_t scale, const int64_t null_val)
 
ALWAYS_INLINE int8_t logical_not (const int8_t operand, const int8_t null_val)
 
ALWAYS_INLINE int8_t logical_and (const int8_t lhs, const int8_t rhs, const int8_t null_val)
 
ALWAYS_INLINE int8_t logical_or (const int8_t lhs, const int8_t rhs, const int8_t null_val)
 
ALWAYS_INLINE uint64_t agg_count (uint64_t *agg, const int64_t)
 
ALWAYS_INLINE void agg_count_distinct_bitmap (int64_t *agg, const int64_t val, const int64_t min_val)
 
GPU_RT_STUB void agg_count_distinct_bitmap_gpu (int64_t *, const int64_t, const int64_t, const int64_t, const int64_t, const uint64_t, const uint64_t)
 
NEVER_INLINE void agg_approximate_count_distinct (int64_t *agg, const int64_t key, const uint32_t b)
 
GPU_RT_STUB void agg_approximate_count_distinct_gpu (int64_t *, const int64_t, const uint32_t, const int64_t, const int64_t)
 
ALWAYS_INLINE int8_t bit_is_set (const int64_t bitset, const int64_t val, const int64_t min_val, const int64_t max_val, const int64_t null_val, const int8_t null_bool_val)
 
ALWAYS_INLINE int64_t agg_sum (int64_t *agg, const int64_t val)
 
ALWAYS_INLINE void agg_max (int64_t *agg, const int64_t val)
 
ALWAYS_INLINE void agg_min (int64_t *agg, const int64_t val)
 
ALWAYS_INLINE void agg_id (int64_t *agg, const int64_t val)
 
ALWAYS_INLINE void agg_count_distinct_bitmap_skip_val (int64_t *agg, const int64_t val, const int64_t min_val, const int64_t skip_val)
 
GPU_RT_STUB void agg_count_distinct_bitmap_skip_val_gpu (int64_t *, const int64_t, const int64_t, const int64_t, const int64_t, const int64_t, const uint64_t, const uint64_t)
 
ALWAYS_INLINE uint32_t agg_count_int32 (uint32_t *agg, const int32_t)
 
ALWAYS_INLINE int32_t agg_sum_int32 (int32_t *agg, const int32_t val)
 
ALWAYS_INLINE int64_t agg_sum_skip_val (int64_t *agg, const int64_t val, const int64_t skip_val)
 
ALWAYS_INLINE int32_t agg_sum_int32_skip_val (int32_t *agg, const int32_t val, const int32_t skip_val)
 
ALWAYS_INLINE uint64_t agg_count_skip_val (uint64_t *agg, const int64_t val, const int64_t skip_val)
 
ALWAYS_INLINE uint32_t agg_count_int32_skip_val (uint32_t *agg, const int32_t val, const int32_t skip_val)
 
ALWAYS_INLINE uint64_t agg_count_double (uint64_t *agg, const double val)
 
ALWAYS_INLINE void agg_sum_double (int64_t *agg, const double val)
 
ALWAYS_INLINE void agg_max_double (int64_t *agg, const double val)
 
ALWAYS_INLINE void agg_min_double (int64_t *agg, const double val)
 
ALWAYS_INLINE void agg_id_double (int64_t *agg, const double val)
 
ALWAYS_INLINE uint32_t agg_count_float (uint32_t *agg, const float val)
 
ALWAYS_INLINE void agg_sum_float (int32_t *agg, const float val)
 
ALWAYS_INLINE void agg_max_float (int32_t *agg, const float val)
 
ALWAYS_INLINE void agg_min_float (int32_t *agg, const float val)
 
ALWAYS_INLINE void agg_id_float (int32_t *agg, const float val)
 
ALWAYS_INLINE uint64_t agg_count_double_skip_val (uint64_t *agg, const double val, const double skip_val)
 
ALWAYS_INLINE uint32_t agg_count_float_skip_val (uint32_t *agg, const float val, const float skip_val)
 
ALWAYS_INLINE int64_t decimal_floor (const int64_t x, const int64_t scale)
 
ALWAYS_INLINE int64_t decimal_ceil (const int64_t x, const int64_t scale)
 
GPU_RT_STUB void agg_max_int16_skip_val_shared (int16_t *agg, const int16_t val, const int16_t skip_val)
 
GPU_RT_STUB void agg_max_int8_skip_val_shared (int8_t *agg, const int8_t val, const int8_t skip_val)
 
GPU_RT_STUB void agg_min_int16_skip_val_shared (int16_t *agg, const int16_t val, const int16_t skip_val)
 
GPU_RT_STUB void agg_min_int8_skip_val_shared (int8_t *agg, const int8_t val, const int8_t skip_val)
 
GPU_RT_STUB void agg_id_double_shared_slow (int64_t *agg, const double *val)
 
GPU_RT_STUB int64_t agg_sum_shared (int64_t *agg, const int64_t val)
 
GPU_RT_STUB int64_t agg_sum_skip_val_shared (int64_t *agg, const int64_t val, const int64_t skip_val)
 
GPU_RT_STUB int32_t agg_sum_int32_shared (int32_t *agg, const int32_t val)
 
GPU_RT_STUB int32_t agg_sum_int32_skip_val_shared (int32_t *agg, const int32_t val, const int32_t skip_val)
 
GPU_RT_STUB void agg_sum_double_shared (int64_t *agg, const double val)
 
GPU_RT_STUB void agg_sum_double_skip_val_shared (int64_t *agg, const double val, const double skip_val)
 
GPU_RT_STUB void agg_sum_float_shared (int32_t *agg, const float val)
 
GPU_RT_STUB void agg_sum_float_skip_val_shared (int32_t *agg, const float val, const float skip_val)
 
GPU_RT_STUB void force_sync ()
 
GPU_RT_STUB void sync_warp ()
 
GPU_RT_STUB void sync_warp_protected (int64_t thread_pos, int64_t row_count)
 
 __attribute__ ((noinline)) int32_t pos_start_impl(int32_t *error_code)
 
GPU_RT_STUB int8_t thread_warp_idx (const int8_t warp_sz)
 
ALWAYS_INLINE int32_t record_error_code (const int32_t err_code, int32_t *error_codes)
 
template<typename T >
ALWAYS_INLINE int64_t * get_matching_group_value (int64_t *groups_buffer, const uint32_t h, const T *key, const uint32_t key_count, const uint32_t row_size_quad)
 
ALWAYS_INLINE int64_t * get_matching_group_value (int64_t *groups_buffer, const uint32_t h, const int64_t *key, const uint32_t key_count, const uint32_t key_width, const uint32_t row_size_quad, const int64_t *init_vals)
 
template<typename T >
ALWAYS_INLINE int32_t get_matching_group_value_columnar_slot (int64_t *groups_buffer, const uint32_t entry_count, const uint32_t h, const T *key, const uint32_t key_count)
 
ALWAYS_INLINE int32_t get_matching_group_value_columnar_slot (int64_t *groups_buffer, const uint32_t entry_count, const uint32_t h, const int64_t *key, const uint32_t key_count, const uint32_t key_width)
 
ALWAYS_INLINE int64_t * get_matching_group_value_columnar (int64_t *groups_buffer, const uint32_t h, const int64_t *key, const uint32_t key_qw_count, const size_t entry_count)
 
ALWAYS_INLINE int64_t * get_matching_group_value_perfect_hash (int64_t *groups_buffer, const uint32_t hashed_index, const int64_t *key, const uint32_t key_count, const uint32_t row_size_quad)
 
ALWAYS_INLINE void set_matching_group_value_perfect_hash_columnar (int64_t *groups_buffer, const uint32_t hashed_index, const int64_t *key, const uint32_t key_count, const uint32_t entry_count)
 
ALWAYS_INLINE int64_t * get_group_value_fast_keyless (int64_t *groups_buffer, const int64_t key, const int64_t min_key, const int64_t, const uint32_t row_size_quad)
 
ALWAYS_INLINE int64_t * get_group_value_fast_keyless_semiprivate (int64_t *groups_buffer, const int64_t key, const int64_t min_key, const int64_t, const uint32_t row_size_quad, const uint8_t thread_warp_idx, const uint8_t warp_size)
 
ALWAYS_INLINE int8_t * extract_str_ptr (const uint64_t str_and_len)
 
ALWAYS_INLINE int32_t extract_str_len (const uint64_t str_and_len)
 
ALWAYS_INLINE uint64_t string_pack (const int8_t *ptr, const int32_t len)
 
ALWAYS_INLINE DEVICE int32_t char_length (const char *str, const int32_t str_len)
 
ALWAYS_INLINE DEVICE int32_t char_length_nullable (const char *str, const int32_t str_len, const int32_t int_null)
 
ALWAYS_INLINE DEVICE int32_t key_for_string_encoded (const int32_t str_id)
 
ALWAYS_INLINE int64_t row_number_window_func (const int64_t output_buff, const int64_t pos)
 
ALWAYS_INLINE double percent_window_func (const int64_t output_buff, const int64_t pos)
 
ALWAYS_INLINE double load_double (const int64_t *agg)
 
ALWAYS_INLINE float load_float (const int32_t *agg)
 
ALWAYS_INLINE double load_avg_int (const int64_t *sum, const int64_t *count, const double null_val)
 
ALWAYS_INLINE double load_avg_decimal (const int64_t *sum, const int64_t *count, const double null_val, const uint32_t scale)
 
ALWAYS_INLINE double load_avg_double (const int64_t *agg, const int64_t *count, const double null_val)
 
ALWAYS_INLINE double load_avg_float (const int32_t *agg, const int32_t *count, const double null_val)
 
NEVER_INLINE void linear_probabilistic_count (uint8_t *bitmap, const uint32_t bitmap_bytes, const uint8_t *key_bytes, const uint32_t key_len)
 
void multifrag_query_hoisted_literals (const int8_t ***col_buffers, const uint64_t *num_fragments, const int8_t *literals, const int64_t *num_rows, const uint64_t *frag_row_offsets, const int32_t *max_matched, int32_t *total_matched, const int64_t *init_agg_value, int64_t **out, int32_t *error_code, const uint32_t *num_tables_ptr, const int64_t *join_hash_tables)
 
void multifrag_query (const int8_t ***col_buffers, const uint64_t *num_fragments, const int64_t *num_rows, const uint64_t *frag_row_offsets, const int32_t *max_matched, int32_t *total_matched, const int64_t *init_agg_value, int64_t **out, int32_t *error_code, const uint32_t *num_tables_ptr, const int64_t *join_hash_tables)
 

Variables

const int32_t groups_buffer_size
 
int64_t * src
 
int64_t const int32_t sz
 
const int64_t * init_vals
 
const int64_t const uint32_t groups_buffer_entry_count
 
const int64_t const uint32_t const uint32_t key_qw_count
 
const int64_t const uint32_t const uint32_t const uint32_t agg_col_count
 
const int64_t const uint32_t const uint32_t const uint32_t const bool keyless
 
const int64_t const uint32_t const uint32_t const uint32_t const bool const int8_t warp_size
 
const int64_t const uint32_t const uint32_t const uint32_t const bool const bool blocks_share_memory
 
const int64_t const uint32_t const uint32_t const uint32_t const bool const bool const int32_t frag_idx
 
const int8_t * literals
 
const int8_t const int64_t * num_rows
 
const int8_t const int64_t const uint64_t * frag_row_offsets
 
const int8_t const int64_t const uint64_t const int32_t * max_matched
 
const int8_t const int64_t const uint64_t const int32_t const int64_t * init_agg_value
 
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t ** out
 
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t * join_hash_tables
 
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t int32_t * error_code
 
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t int32_t int32_t * total_matched
 

Macro Definition Documentation

◆ ADDR_T [1/2]

#define ADDR_T   int64_t

Definition at line 614 of file RuntimeFunctions.cpp.

◆ ADDR_T [2/2]

#define ADDR_T   int32_t

Definition at line 614 of file RuntimeFunctions.cpp.

◆ DATA_T [1/6]

#define DATA_T   int64_t

Definition at line 613 of file RuntimeFunctions.cpp.

◆ DATA_T [2/6]

#define DATA_T   int32_t

Definition at line 613 of file RuntimeFunctions.cpp.

◆ DATA_T [3/6]

#define DATA_T   int16_t

Definition at line 613 of file RuntimeFunctions.cpp.

◆ DATA_T [4/6]

#define DATA_T   int8_t

Definition at line 613 of file RuntimeFunctions.cpp.

◆ DATA_T [5/6]

#define DATA_T   double

Definition at line 613 of file RuntimeFunctions.cpp.

◆ DATA_T [6/6]

#define DATA_T   float

Definition at line 613 of file RuntimeFunctions.cpp.

◆ DEF_AGG_ID_INT

#define DEF_AGG_ID_INT (   n)
Value:
extern "C" ALWAYS_INLINE void agg_id_int##n(int##n##_t* agg, const int##n##_t val) { \
*agg = val; \
}
#define ALWAYS_INLINE

Definition at line 406 of file RuntimeFunctions.cpp.

◆ DEF_AGG_MAX_INT

#define DEF_AGG_MAX_INT (   n)
Value:
extern "C" ALWAYS_INLINE void agg_max_int##n(int##n##_t* agg, const int##n##_t val) { \
*agg = std::max(*agg, val); \
}
#define ALWAYS_INLINE

Definition at line 386 of file RuntimeFunctions.cpp.

◆ DEF_AGG_MIN_INT

#define DEF_AGG_MIN_INT (   n)
Value:
extern "C" ALWAYS_INLINE void agg_min_int##n(int##n##_t* agg, const int##n##_t val) { \
*agg = std::min(*agg, val); \
}
#define ALWAYS_INLINE

Definition at line 396 of file RuntimeFunctions.cpp.

◆ DEF_ARITH_NULLABLE

#define DEF_ARITH_NULLABLE (   type,
  null_type,
  opname,
  opsym 
)
Value:
extern "C" ALWAYS_INLINE type opname##_##type##_nullable( \
const type lhs, const type rhs, const null_type null_val) { \
if (lhs != null_val && rhs != null_val) { \
return lhs opsym rhs; \
} \
return null_val; \
}
#define ALWAYS_INLINE

Definition at line 41 of file RuntimeFunctions.cpp.

◆ DEF_ARITH_NULLABLE_LHS

#define DEF_ARITH_NULLABLE_LHS (   type,
  null_type,
  opname,
  opsym 
)
Value:
extern "C" ALWAYS_INLINE type opname##_##type##_nullable_lhs( \
const type lhs, const type rhs, const null_type null_val) { \
if (lhs != null_val) { \
return lhs opsym rhs; \
} \
return null_val; \
}
#define ALWAYS_INLINE

Definition at line 50 of file RuntimeFunctions.cpp.

◆ DEF_ARITH_NULLABLE_RHS

#define DEF_ARITH_NULLABLE_RHS (   type,
  null_type,
  opname,
  opsym 
)
Value:
extern "C" ALWAYS_INLINE type opname##_##type##_nullable_rhs( \
const type lhs, const type rhs, const null_type null_val) { \
if (rhs != null_val) { \
return lhs opsym rhs; \
} \
return null_val; \
}
#define ALWAYS_INLINE

Definition at line 59 of file RuntimeFunctions.cpp.

◆ DEF_BINARY_NULLABLE_ALL_OPS

#define DEF_BINARY_NULLABLE_ALL_OPS (   type,
  null_type 
)

Definition at line 113 of file RuntimeFunctions.cpp.

◆ DEF_CAST_NULLABLE

#define DEF_CAST_NULLABLE (   from_type,
  to_type 
)
Value:
extern "C" ALWAYS_INLINE to_type cast_##from_type##_to_##to_type##_nullable( \
const from_type operand, \
const from_type from_null_val, \
const to_type to_null_val) { \
return operand == from_null_val ? to_null_val : operand; \
}
#define ALWAYS_INLINE

Definition at line 221 of file RuntimeFunctions.cpp.

◆ DEF_CAST_NULLABLE_BIDIR

#define DEF_CAST_NULLABLE_BIDIR (   type1,
  type2 
)
Value:
DEF_CAST_NULLABLE(type1, type2) \
DEF_CAST_NULLABLE(type2, type1)
#define DEF_CAST_NULLABLE(from_type, to_type)

Definition at line 229 of file RuntimeFunctions.cpp.

◆ DEF_CMP_NULLABLE

#define DEF_CMP_NULLABLE (   type,
  null_type,
  opname,
  opsym 
)
Value:
extern "C" ALWAYS_INLINE int8_t opname##_##type##_nullable( \
const type lhs, \
const type rhs, \
const null_type null_val, \
const int8_t null_bool_val) { \
if (lhs != null_val && rhs != null_val) { \
return lhs opsym rhs; \
} \
return null_bool_val; \
}
#define ALWAYS_INLINE

Definition at line 68 of file RuntimeFunctions.cpp.

◆ DEF_CMP_NULLABLE_LHS

#define DEF_CMP_NULLABLE_LHS (   type,
  null_type,
  opname,
  opsym 
)
Value:
extern "C" ALWAYS_INLINE int8_t opname##_##type##_nullable_lhs( \
const type lhs, \
const type rhs, \
const null_type null_val, \
const int8_t null_bool_val) { \
if (lhs != null_val) { \
return lhs opsym rhs; \
} \
return null_bool_val; \
}
#define ALWAYS_INLINE

Definition at line 80 of file RuntimeFunctions.cpp.

◆ DEF_CMP_NULLABLE_RHS

#define DEF_CMP_NULLABLE_RHS (   type,
  null_type,
  opname,
  opsym 
)
Value:
extern "C" ALWAYS_INLINE int8_t opname##_##type##_nullable_rhs( \
const type lhs, \
const type rhs, \
const null_type null_val, \
const int8_t null_bool_val) { \
if (rhs != null_val) { \
return lhs opsym rhs; \
} \
return null_bool_val; \
}
#define ALWAYS_INLINE

Definition at line 92 of file RuntimeFunctions.cpp.

◆ DEF_SAFE_DIV_NULLABLE

#define DEF_SAFE_DIV_NULLABLE (   type,
  null_type,
  opname 
)
Value:
extern "C" ALWAYS_INLINE type safe_div_##type( \
const type lhs, const type rhs, const null_type null_val) { \
if (lhs != null_val && rhs != null_val && rhs != 0) { \
return lhs / rhs; \
} \
return null_val; \
}
#define ALWAYS_INLINE

Definition at line 104 of file RuntimeFunctions.cpp.

◆ DEF_SHARED_AGG_RET_STUBS

#define DEF_SHARED_AGG_RET_STUBS (   base_agg_func)

Definition at line 640 of file RuntimeFunctions.cpp.

◆ DEF_SHARED_AGG_STUBS

#define DEF_SHARED_AGG_STUBS (   base_agg_func)
Value:
extern "C" GPU_RT_STUB void base_agg_func##_shared(int64_t* agg, const int64_t val) {} \
\
extern "C" GPU_RT_STUB void base_agg_func##_skip_val_shared( \
int64_t* agg, const int64_t val, const int64_t skip_val) {} \
extern "C" GPU_RT_STUB void base_agg_func##_int32_shared(int32_t* agg, \
const int32_t val) {} \
extern "C" GPU_RT_STUB void base_agg_func##_int16_shared(int16_t* agg, \
const int16_t val) {} \
extern "C" GPU_RT_STUB void base_agg_func##_int8_shared(int8_t* agg, \
const int8_t val) {} \
\
extern "C" GPU_RT_STUB void base_agg_func##_int32_skip_val_shared( \
int32_t* agg, const int32_t val, const int32_t skip_val) {} \
\
extern "C" GPU_RT_STUB void base_agg_func##_double_shared(int64_t* agg, \
const double val) {} \
\
extern "C" GPU_RT_STUB void base_agg_func##_double_skip_val_shared( \
int64_t* agg, const double val, const double skip_val) {} \
extern "C" GPU_RT_STUB void base_agg_func##_float_shared(int32_t* agg, \
const float val) {} \
\
extern "C" GPU_RT_STUB void base_agg_func##_float_skip_val_shared( \
int32_t* agg, const float val, const float skip_val) {}
#define GPU_RT_STUB

Definition at line 679 of file RuntimeFunctions.cpp.

◆ DEF_SKIP_AGG [1/2]

#define DEF_SKIP_AGG (   base_agg_func)
Value:
extern "C" ALWAYS_INLINE void base_agg_func##_skip_val( \
DATA_T* agg, const DATA_T val, const DATA_T skip_val) { \
if (val != skip_val) { \
const DATA_T old_agg = *agg; \
if (old_agg != skip_val) { \
base_agg_func(agg, val); \
} else { \
*agg = val; \
} \
} \
}
#define DATA_T
#define ALWAYS_INLINE

Definition at line 592 of file RuntimeFunctions.cpp.

Referenced by agg_count_int32_skip_val().

◆ DEF_SKIP_AGG [2/2]

#define DEF_SKIP_AGG (   base_agg_func)
Value:
extern "C" ALWAYS_INLINE void base_agg_func##_skip_val( \
ADDR_T* agg, const DATA_T val, const DATA_T skip_val) { \
if (val != skip_val) { \
const ADDR_T old_agg = *agg; \
if (old_agg != *reinterpret_cast<const ADDR_T*>(may_alias_ptr(&skip_val))) { \
base_agg_func(agg, val); \
} else { \
*agg = *reinterpret_cast<const ADDR_T*>(may_alias_ptr(&val)); \
} \
} \
}
#define ADDR_T
#define DATA_T
#define ALWAYS_INLINE

Definition at line 592 of file RuntimeFunctions.cpp.

◆ DEF_SKIP_AGG_ADD [1/2]

#define DEF_SKIP_AGG_ADD (   base_agg_func)
Value:
extern "C" ALWAYS_INLINE void base_agg_func##_skip_val( \
DATA_T* agg, const DATA_T val, const DATA_T skip_val) { \
if (val != skip_val) { \
base_agg_func(agg, val); \
} \
}
#define DATA_T
#define ALWAYS_INLINE

Definition at line 584 of file RuntimeFunctions.cpp.

◆ DEF_SKIP_AGG_ADD [2/2]

#define DEF_SKIP_AGG_ADD (   base_agg_func)
Value:
extern "C" ALWAYS_INLINE void base_agg_func##_skip_val( \
ADDR_T* agg, const DATA_T val, const DATA_T skip_val) { \
if (val != skip_val) { \
base_agg_func(agg, val); \
} \
}
#define ADDR_T
#define DATA_T
#define ALWAYS_INLINE

Definition at line 584 of file RuntimeFunctions.cpp.

◆ DEF_UMINUS_NULLABLE

#define DEF_UMINUS_NULLABLE (   type,
  null_type 
)
Value:
extern "C" ALWAYS_INLINE type uminus_##type##_nullable(const type operand, \
const null_type null_val) { \
return operand == null_val ? null_val : -operand; \
}
#define ALWAYS_INLINE

Definition at line 207 of file RuntimeFunctions.cpp.

◆ DEF_WRITE_PROJECTION_INT

#define DEF_WRITE_PROJECTION_INT (   n)
Value:
extern "C" ALWAYS_INLINE void write_projection_int##n( \
int8_t* slot_ptr, const int##n##_t val, const int64_t init_val) { \
if (val != init_val) { \
*reinterpret_cast<int##n##_t*>(slot_ptr) = val; \
} \
}
#define ALWAYS_INLINE

Definition at line 416 of file RuntimeFunctions.cpp.

◆ GPU_RT_STUB

#define GPU_RT_STUB   NEVER_INLINE __attribute__((optnone))

Definition at line 295 of file RuntimeFunctions.cpp.

Function Documentation

◆ __attribute__()

__attribute__ ( (noinline)  )

Definition at line 765 of file RuntimeFunctions.cpp.

Referenced by extract_str_len(), linear_probabilistic_count(), multifrag_query_hoisted_literals(), and record_error_code().

765  {
766  int32_t row_index_resume{0};
767  if (error_code) {
768  row_index_resume = error_code[0];
769  error_code[0] = 0;
770  }
771  return row_index_resume;
772 }
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t int32_t * error_code
+ Here is the caller graph for this function:

◆ agg_approximate_count_distinct()

NEVER_INLINE void agg_approximate_count_distinct ( int64_t *  agg,
const int64_t  key,
const uint32_t  b 
)

Definition at line 305 of file RuntimeFunctions.cpp.

References get_rank(), and MurmurHash64A().

307  {
308  const uint64_t hash = MurmurHash64A(&key, sizeof(key), 0);
309  const uint32_t index = hash >> (64 - b);
310  const uint8_t rank = get_rank(hash << b, 64 - b);
311  uint8_t* M = reinterpret_cast<uint8_t*>(*agg);
312  M[index] = std::max(M[index], rank);
313 }
FORCE_INLINE uint8_t get_rank(uint64_t x, uint32_t b)
NEVER_INLINE DEVICE uint64_t MurmurHash64A(const void *key, int len, uint64_t seed)
Definition: MurmurHash.cpp:26
+ Here is the call graph for this function:

◆ agg_approximate_count_distinct_gpu()

GPU_RT_STUB void agg_approximate_count_distinct_gpu ( int64_t *  ,
const int64_t  ,
const uint32_t  ,
const int64_t  ,
const int64_t   
)

Definition at line 315 of file RuntimeFunctions.cpp.

319  {}

◆ agg_count()

ALWAYS_INLINE uint64_t agg_count ( uint64_t *  agg,
const int64_t   
)

Definition at line 284 of file RuntimeFunctions.cpp.

Referenced by agg_count_skip_val(), and anonymous_namespace{GroupByAndAggregate.cpp}::get_agg_count().

284  {
285  return (*agg)++;
286 }
+ Here is the caller graph for this function:

◆ agg_count_distinct_bitmap()

ALWAYS_INLINE void agg_count_distinct_bitmap ( int64_t *  agg,
const int64_t  val,
const int64_t  min_val 
)

Definition at line 288 of file RuntimeFunctions.cpp.

Referenced by agg_count_distinct_bitmap_skip_val(), WindowFunctionContext::fillPartitionEnd(), WindowFunctionContext::fillPartitionStart(), anonymous_namespace{WindowContext.cpp}::index_to_partition_end(), and InValuesBitmap::InValuesBitmap().

290  {
291  const uint64_t bitmap_idx = val - min_val;
292  reinterpret_cast<int8_t*>(*agg)[bitmap_idx >> 3] |= (1 << (bitmap_idx & 7));
293 }
+ Here is the caller graph for this function:

◆ agg_count_distinct_bitmap_gpu()

GPU_RT_STUB void agg_count_distinct_bitmap_gpu ( int64_t *  ,
const int64_t  ,
const int64_t  ,
const int64_t  ,
const int64_t  ,
const uint64_t  ,
const uint64_t   
)

Definition at line 297 of file RuntimeFunctions.cpp.

303  {}

◆ agg_count_distinct_bitmap_skip_val()

ALWAYS_INLINE void agg_count_distinct_bitmap_skip_val ( int64_t *  agg,
const int64_t  val,
const int64_t  min_val,
const int64_t  skip_val 
)

Definition at line 358 of file RuntimeFunctions.cpp.

References agg_count_distinct_bitmap().

361  {
362  if (val != skip_val) {
363  agg_count_distinct_bitmap(agg, val, min_val);
364  }
365 }
ALWAYS_INLINE void agg_count_distinct_bitmap(int64_t *agg, const int64_t val, const int64_t min_val)
+ Here is the call graph for this function:

◆ agg_count_distinct_bitmap_skip_val_gpu()

GPU_RT_STUB void agg_count_distinct_bitmap_skip_val_gpu ( int64_t *  ,
const int64_t  ,
const int64_t  ,
const int64_t  ,
const int64_t  ,
const int64_t  ,
const uint64_t  ,
const uint64_t   
)

Definition at line 367 of file RuntimeFunctions.cpp.

374  {}

◆ agg_count_double()

ALWAYS_INLINE uint64_t agg_count_double ( uint64_t *  agg,
const double  val 
)

Definition at line 520 of file RuntimeFunctions.cpp.

Referenced by agg_count_double_skip_val().

520  {
521  return (*agg)++;
522 }
+ Here is the caller graph for this function:

◆ agg_count_double_skip_val()

ALWAYS_INLINE uint64_t agg_count_double_skip_val ( uint64_t *  agg,
const double  val,
const double  skip_val 
)

Definition at line 566 of file RuntimeFunctions.cpp.

References agg_count_double().

568  {
569  if (val != skip_val) {
570  return agg_count_double(agg, val);
571  }
572  return *agg;
573 }
ALWAYS_INLINE uint64_t agg_count_double(uint64_t *agg, const double val)
+ Here is the call graph for this function:

◆ agg_count_float()

ALWAYS_INLINE uint32_t agg_count_float ( uint32_t *  agg,
const float  val 
)

Definition at line 543 of file RuntimeFunctions.cpp.

Referenced by agg_count_float_skip_val().

543  {
544  return (*agg)++;
545 }
+ Here is the caller graph for this function:

◆ agg_count_float_skip_val()

ALWAYS_INLINE uint32_t agg_count_float_skip_val ( uint32_t *  agg,
const float  val,
const float  skip_val 
)

Definition at line 575 of file RuntimeFunctions.cpp.

References agg_count_float().

577  {
578  if (val != skip_val) {
579  return agg_count_float(agg, val);
580  }
581  return *agg;
582 }
ALWAYS_INLINE uint32_t agg_count_float(uint32_t *agg, const float val)
+ Here is the call graph for this function:

◆ agg_count_int32()

ALWAYS_INLINE uint32_t agg_count_int32 ( uint32_t *  agg,
const int32_t   
)

Definition at line 376 of file RuntimeFunctions.cpp.

Referenced by agg_count_int32_skip_val().

376  {
377  return (*agg)++;
378 }
+ Here is the caller graph for this function:

◆ agg_count_int32_skip_val()

ALWAYS_INLINE uint32_t agg_count_int32_skip_val ( uint32_t *  agg,
const int32_t  val,
const int32_t  skip_val 
)

Definition at line 465 of file RuntimeFunctions.cpp.

References agg_count_int32(), agg_max(), agg_max_int16(), agg_max_int32(), agg_max_int8(), agg_min(), agg_min_int16(), agg_min_int32(), agg_min_int8(), and DEF_SKIP_AGG.

467  {
468  if (val != skip_val) {
469  return agg_count_int32(agg, val);
470  }
471  return *agg;
472 }
ALWAYS_INLINE uint32_t agg_count_int32(uint32_t *agg, const int32_t)
+ Here is the call graph for this function:

◆ agg_count_skip_val()

ALWAYS_INLINE uint64_t agg_count_skip_val ( uint64_t *  agg,
const int64_t  val,
const int64_t  skip_val 
)

Definition at line 456 of file RuntimeFunctions.cpp.

References agg_count().

458  {
459  if (val != skip_val) {
460  return agg_count(agg, val);
461  }
462  return *agg;
463 }
ALWAYS_INLINE uint64_t agg_count(uint64_t *agg, const int64_t)
+ Here is the call graph for this function:

◆ agg_id()

ALWAYS_INLINE void agg_id ( int64_t *  agg,
const int64_t  val 
)

Definition at line 354 of file RuntimeFunctions.cpp.

354  {
355  *agg = val;
356 }

◆ agg_id_double()

ALWAYS_INLINE void agg_id_double ( int64_t *  agg,
const double  val 
)

Definition at line 539 of file RuntimeFunctions.cpp.

539  {
540  *agg = *(reinterpret_cast<const int64_t*>(may_alias_ptr(&val)));
541 }

◆ agg_id_double_shared_slow()

GPU_RT_STUB void agg_id_double_shared_slow ( int64_t *  agg,
const double *  val 
)

Definition at line 726 of file RuntimeFunctions.cpp.

726 {}

◆ agg_id_float()

ALWAYS_INLINE void agg_id_float ( int32_t *  agg,
const float  val 
)

Definition at line 562 of file RuntimeFunctions.cpp.

562  {
563  *agg = *(reinterpret_cast<const int32_t*>(may_alias_ptr(&val)));
564 }

◆ agg_max()

ALWAYS_INLINE void agg_max ( int64_t *  agg,
const int64_t  val 
)

Definition at line 346 of file RuntimeFunctions.cpp.

Referenced by agg_count_int32_skip_val().

346  {
347  *agg = std::max(*agg, val);
348 }
+ Here is the caller graph for this function:

◆ agg_max_double()

ALWAYS_INLINE void agg_max_double ( int64_t *  agg,
const double  val 
)

Definition at line 529 of file RuntimeFunctions.cpp.

529  {
530  const auto r = std::max(*reinterpret_cast<const double*>(agg), val);
531  *agg = *(reinterpret_cast<const int64_t*>(may_alias_ptr(&r)));
532 }

◆ agg_max_float()

ALWAYS_INLINE void agg_max_float ( int32_t *  agg,
const float  val 
)

Definition at line 552 of file RuntimeFunctions.cpp.

552  {
553  const auto r = std::max(*reinterpret_cast<const float*>(agg), val);
554  *agg = *(reinterpret_cast<const int32_t*>(may_alias_ptr(&r)));
555 }

◆ agg_max_int16_skip_val_shared()

GPU_RT_STUB void agg_max_int16_skip_val_shared ( int16_t *  agg,
const int16_t  val,
const int16_t  skip_val 
)

Definition at line 710 of file RuntimeFunctions.cpp.

712  {}

◆ agg_max_int8_skip_val_shared()

GPU_RT_STUB void agg_max_int8_skip_val_shared ( int8_t *  agg,
const int8_t  val,
const int8_t  skip_val 
)

Definition at line 714 of file RuntimeFunctions.cpp.

716  {}

◆ agg_min()

ALWAYS_INLINE void agg_min ( int64_t *  agg,
const int64_t  val 
)

Definition at line 350 of file RuntimeFunctions.cpp.

Referenced by agg_count_int32_skip_val().

350  {
351  *agg = std::min(*agg, val);
352 }
+ Here is the caller graph for this function:

◆ agg_min_double()

ALWAYS_INLINE void agg_min_double ( int64_t *  agg,
const double  val 
)

Definition at line 534 of file RuntimeFunctions.cpp.

534  {
535  const auto r = std::min(*reinterpret_cast<const double*>(agg), val);
536  *agg = *(reinterpret_cast<const int64_t*>(may_alias_ptr(&r)));
537 }

◆ agg_min_float()

ALWAYS_INLINE void agg_min_float ( int32_t *  agg,
const float  val 
)

Definition at line 557 of file RuntimeFunctions.cpp.

557  {
558  const auto r = std::min(*reinterpret_cast<const float*>(agg), val);
559  *agg = *(reinterpret_cast<const int32_t*>(may_alias_ptr(&r)));
560 }

◆ agg_min_int16_skip_val_shared()

GPU_RT_STUB void agg_min_int16_skip_val_shared ( int16_t *  agg,
const int16_t  val,
const int16_t  skip_val 
)

Definition at line 718 of file RuntimeFunctions.cpp.

720  {}

◆ agg_min_int8_skip_val_shared()

GPU_RT_STUB void agg_min_int8_skip_val_shared ( int8_t *  agg,
const int8_t  val,
const int8_t  skip_val 
)

Definition at line 722 of file RuntimeFunctions.cpp.

724  {}

◆ agg_sum()

ALWAYS_INLINE int64_t agg_sum ( int64_t *  agg,
const int64_t  val 
)

Definition at line 340 of file RuntimeFunctions.cpp.

Referenced by agg_sum_skip_val().

340  {
341  const auto old = *agg;
342  *agg += val;
343  return old;
344 }
+ Here is the caller graph for this function:

◆ agg_sum_double()

ALWAYS_INLINE void agg_sum_double ( int64_t *  agg,
const double  val 
)

Definition at line 524 of file RuntimeFunctions.cpp.

524  {
525  const auto r = *reinterpret_cast<const double*>(agg) + val;
526  *agg = *reinterpret_cast<const int64_t*>(may_alias_ptr(&r));
527 }

◆ agg_sum_double_shared()

GPU_RT_STUB void agg_sum_double_shared ( int64_t *  agg,
const double  val 
)

Definition at line 747 of file RuntimeFunctions.cpp.

747 {}

◆ agg_sum_double_skip_val_shared()

GPU_RT_STUB void agg_sum_double_skip_val_shared ( int64_t *  agg,
const double  val,
const double  skip_val 
)

Definition at line 749 of file RuntimeFunctions.cpp.

751  {}

◆ agg_sum_float()

ALWAYS_INLINE void agg_sum_float ( int32_t *  agg,
const float  val 
)

Definition at line 547 of file RuntimeFunctions.cpp.

547  {
548  const auto r = *reinterpret_cast<const float*>(agg) + val;
549  *agg = *reinterpret_cast<const int32_t*>(may_alias_ptr(&r));
550 }

◆ agg_sum_float_shared()

GPU_RT_STUB void agg_sum_float_shared ( int32_t *  agg,
const float  val 
)

Definition at line 752 of file RuntimeFunctions.cpp.

752 {}

◆ agg_sum_float_skip_val_shared()

GPU_RT_STUB void agg_sum_float_skip_val_shared ( int32_t *  agg,
const float  val,
const float  skip_val 
)

Definition at line 754 of file RuntimeFunctions.cpp.

756  {}

◆ agg_sum_int32()

ALWAYS_INLINE int32_t agg_sum_int32 ( int32_t *  agg,
const int32_t  val 
)

Definition at line 380 of file RuntimeFunctions.cpp.

Referenced by agg_sum_int32_skip_val().

380  {
381  const auto old = *agg;
382  *agg += val;
383  return old;
384 }
+ Here is the caller graph for this function:

◆ agg_sum_int32_shared()

GPU_RT_STUB int32_t agg_sum_int32_shared ( int32_t *  agg,
const int32_t  val 
)

Definition at line 737 of file RuntimeFunctions.cpp.

737  {
738  return 0;
739 }

◆ agg_sum_int32_skip_val()

ALWAYS_INLINE int32_t agg_sum_int32_skip_val ( int32_t *  agg,
const int32_t  val,
const int32_t  skip_val 
)

Definition at line 442 of file RuntimeFunctions.cpp.

References agg_sum_int32().

444  {
445  const auto old = *agg;
446  if (val != skip_val) {
447  if (old != skip_val) {
448  return agg_sum_int32(agg, val);
449  } else {
450  *agg = val;
451  }
452  }
453  return old;
454 }
ALWAYS_INLINE int32_t agg_sum_int32(int32_t *agg, const int32_t val)
+ Here is the call graph for this function:

◆ agg_sum_int32_skip_val_shared()

GPU_RT_STUB int32_t agg_sum_int32_skip_val_shared ( int32_t *  agg,
const int32_t  val,
const int32_t  skip_val 
)

Definition at line 741 of file RuntimeFunctions.cpp.

743  {
744  return 0;
745 }

◆ agg_sum_shared()

GPU_RT_STUB int64_t agg_sum_shared ( int64_t *  agg,
const int64_t  val 
)

Definition at line 728 of file RuntimeFunctions.cpp.

728  {
729  return 0;
730 }

◆ agg_sum_skip_val()

ALWAYS_INLINE int64_t agg_sum_skip_val ( int64_t *  agg,
const int64_t  val,
const int64_t  skip_val 
)

Definition at line 428 of file RuntimeFunctions.cpp.

References agg_sum().

Referenced by Executor::reduceResults().

430  {
431  const auto old = *agg;
432  if (val != skip_val) {
433  if (old != skip_val) {
434  return agg_sum(agg, val);
435  } else {
436  *agg = val;
437  }
438  }
439  return old;
440 }
ALWAYS_INLINE int64_t agg_sum(int64_t *agg, const int64_t val)
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ agg_sum_skip_val_shared()

GPU_RT_STUB int64_t agg_sum_skip_val_shared ( int64_t *  agg,
const int64_t  val,
const int64_t  skip_val 
)

Definition at line 732 of file RuntimeFunctions.cpp.

734  {
735  return 0;
736 }

◆ bit_is_set()

ALWAYS_INLINE int8_t bit_is_set ( const int64_t  bitset,
const int64_t  val,
const int64_t  min_val,
const int64_t  max_val,
const int64_t  null_val,
const int8_t  null_bool_val 
)

Definition at line 321 of file RuntimeFunctions.cpp.

326  {
327  if (val == null_val) {
328  return null_bool_val;
329  }
330  if (val < min_val || val > max_val) {
331  return false;
332  }
333  const uint64_t bitmap_idx = val - min_val;
334  return (reinterpret_cast<const int8_t*>(bitset))[bitmap_idx >> 3] &
335  (1 << (bitmap_idx & 7))
336  ? 1
337  : 0;
338 }

◆ char_length()

ALWAYS_INLINE DEVICE int32_t char_length ( const char *  str,
const int32_t  str_len 
)

Definition at line 1104 of file RuntimeFunctions.cpp.

Referenced by ScalarExprVisitor< std::unordered_set< InputColDescriptor > >::visit().

1105  {
1106  return str_len;
1107 }
+ Here is the caller graph for this function:

◆ char_length_nullable()

ALWAYS_INLINE DEVICE int32_t char_length_nullable ( const char *  str,
const int32_t  str_len,
const int32_t  int_null 
)

Definition at line 1109 of file RuntimeFunctions.cpp.

1111  {
1112  if (!str) {
1113  return int_null;
1114  }
1115  return str_len;
1116 }

◆ decimal_ceil()

ALWAYS_INLINE int64_t decimal_ceil ( const int64_t  x,
const int64_t  scale 
)

Definition at line 634 of file RuntimeFunctions.cpp.

References decimal_floor().

634  {
635  return decimal_floor(x, scale) + (x % scale ? scale : 0);
636 }
ALWAYS_INLINE int64_t decimal_floor(const int64_t x, const int64_t scale)
+ Here is the call graph for this function:

◆ decimal_floor()

ALWAYS_INLINE int64_t decimal_floor ( const int64_t  x,
const int64_t  scale 
)

Definition at line 624 of file RuntimeFunctions.cpp.

Referenced by decimal_ceil().

624  {
625  if (x >= 0) {
626  return x / scale * scale;
627  }
628  if (!(x % scale)) {
629  return x;
630  }
631  return x / scale * scale - scale;
632 }
+ Here is the caller graph for this function:

◆ extract_str_len()

ALWAYS_INLINE int32_t extract_str_len ( const uint64_t  str_and_len)

Definition at line 1077 of file RuntimeFunctions.cpp.

References __attribute__(), extract_str_len_noinline(), extract_str_ptr(), and extract_str_ptr_noinline().

1077  {
1078  return static_cast<int64_t>(str_and_len) >> 48;
1079 }
+ Here is the call graph for this function:

◆ extract_str_ptr()

ALWAYS_INLINE int8_t* extract_str_ptr ( const uint64_t  str_and_len)

Definition at line 1073 of file RuntimeFunctions.cpp.

Referenced by extract_str_len().

1073  {
1074  return reinterpret_cast<int8_t*>(str_and_len & 0xffffffffffff);
1075 }
+ Here is the caller graph for this function:

◆ force_sync()

GPU_RT_STUB void force_sync ( )

Definition at line 758 of file RuntimeFunctions.cpp.

758 {}

◆ get_group_value_fast_keyless()

ALWAYS_INLINE int64_t* get_group_value_fast_keyless ( int64_t *  groups_buffer,
const int64_t  key,
const int64_t  min_key,
const int64_t  ,
const uint32_t  row_size_quad 
)

Definition at line 1053 of file RuntimeFunctions.cpp.

1058  {
1059  return groups_buffer + row_size_quad * (key - min_key);
1060 }

◆ get_group_value_fast_keyless_semiprivate()

ALWAYS_INLINE int64_t* get_group_value_fast_keyless_semiprivate ( int64_t *  groups_buffer,
const int64_t  key,
const int64_t  min_key,
const int64_t  ,
const uint32_t  row_size_quad,
const uint8_t  thread_warp_idx,
const uint8_t  warp_size 
)

Definition at line 1062 of file RuntimeFunctions.cpp.

1069  {
1070  return groups_buffer + row_size_quad * (warp_size * (key - min_key) + thread_warp_idx);
1071 }
const int64_t const uint32_t const uint32_t const uint32_t const bool const int8_t warp_size
GPU_RT_STUB int8_t thread_warp_idx(const int8_t warp_sz)

◆ get_matching_group_value() [1/2]

template<typename T >
ALWAYS_INLINE int64_t* get_matching_group_value ( int64_t *  groups_buffer,
const uint32_t  h,
const T *  key,
const uint32_t  key_count,
const uint32_t  row_size_quad 
)

Definition at line 895 of file RuntimeFunctions.cpp.

References align_to_int64().

Referenced by get_group_value(), get_group_value_with_watchdog(), and get_matching_group_value().

899  {
900  auto off = h * row_size_quad;
901  auto row_ptr = reinterpret_cast<T*>(groups_buffer + off);
902  if (*row_ptr == get_empty_key<T>()) {
903  memcpy(row_ptr, key, key_count * sizeof(T));
904  auto row_ptr_i8 = reinterpret_cast<int8_t*>(row_ptr + key_count);
905  return reinterpret_cast<int64_t*>(align_to_int64(row_ptr_i8));
906  }
907  if (memcmp(row_ptr, key, key_count * sizeof(T)) == 0) {
908  auto row_ptr_i8 = reinterpret_cast<int8_t*>(row_ptr + key_count);
909  return reinterpret_cast<int64_t*>(align_to_int64(row_ptr_i8));
910  }
911  return nullptr;
912 }
FORCE_INLINE HOST DEVICE T align_to_int64(T addr)
+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ get_matching_group_value() [2/2]

ALWAYS_INLINE int64_t* get_matching_group_value ( int64_t *  groups_buffer,
const uint32_t  h,
const int64_t *  key,
const uint32_t  key_count,
const uint32_t  key_width,
const uint32_t  row_size_quad,
const int64_t *  init_vals 
)

Definition at line 914 of file RuntimeFunctions.cpp.

References get_matching_group_value().

920  {
921  switch (key_width) {
922  case 4:
923  return get_matching_group_value(groups_buffer,
924  h,
925  reinterpret_cast<const int32_t*>(key),
926  key_count,
927  row_size_quad);
928  case 8:
929  return get_matching_group_value(groups_buffer, h, key, key_count, row_size_quad);
930  default:;
931  }
932  return nullptr;
933 }
ALWAYS_INLINE int64_t * get_matching_group_value(int64_t *groups_buffer, const uint32_t h, const T *key, const uint32_t key_count, const uint32_t row_size_quad)
+ Here is the call graph for this function:

◆ get_matching_group_value_columnar()

ALWAYS_INLINE int64_t* get_matching_group_value_columnar ( int64_t *  groups_buffer,
const uint32_t  h,
const int64_t *  key,
const uint32_t  key_qw_count,
const size_t  entry_count 
)

Definition at line 983 of file RuntimeFunctions.cpp.

References EMPTY_KEY_64, and key_qw_count.

Referenced by get_group_value_columnar(), and get_group_value_columnar_with_watchdog().

988  {
989  auto off = h;
990  if (groups_buffer[off] == EMPTY_KEY_64) {
991  for (size_t i = 0; i < key_qw_count; ++i) {
992  groups_buffer[off] = key[i];
993  off += entry_count;
994  }
995  return &groups_buffer[off];
996  }
997  off = h;
998  for (size_t i = 0; i < key_qw_count; ++i) {
999  if (groups_buffer[off] != key[i]) {
1000  return nullptr;
1001  }
1002  off += entry_count;
1003  }
1004  return &groups_buffer[off];
1005 }
#define EMPTY_KEY_64
const int64_t const uint32_t const uint32_t key_qw_count
+ Here is the caller graph for this function:

◆ get_matching_group_value_columnar_slot() [1/2]

template<typename T >
ALWAYS_INLINE int32_t get_matching_group_value_columnar_slot ( int64_t *  groups_buffer,
const uint32_t  entry_count,
const uint32_t  h,
const T *  key,
const uint32_t  key_count 
)

Definition at line 936 of file RuntimeFunctions.cpp.

References ALWAYS_INLINE.

Referenced by get_group_value_columnar_slot(), get_group_value_columnar_slot_with_watchdog(), and get_matching_group_value_columnar_slot().

940  {
941  auto off = h;
942  auto key_buffer = reinterpret_cast<T*>(groups_buffer);
943  if (key_buffer[off] == get_empty_key<T>()) {
944  for (size_t i = 0; i < key_count; ++i) {
945  key_buffer[off] = key[i];
946  off += entry_count;
947  }
948  return h;
949  }
950  off = h;
951  for (size_t i = 0; i < key_count; ++i) {
952  if (key_buffer[off] != key[i]) {
953  return -1;
954  }
955  off += entry_count;
956  }
957  return h;
958 }
+ Here is the caller graph for this function:

◆ get_matching_group_value_columnar_slot() [2/2]

ALWAYS_INLINE int32_t get_matching_group_value_columnar_slot ( int64_t *  groups_buffer,
const uint32_t  entry_count,
const uint32_t  h,
const int64_t *  key,
const uint32_t  key_count,
const uint32_t  key_width 
)

Definition at line 961 of file RuntimeFunctions.cpp.

References get_matching_group_value_columnar_slot().

966  {
967  switch (key_width) {
968  case 4:
969  return get_matching_group_value_columnar_slot(groups_buffer,
970  entry_count,
971  h,
972  reinterpret_cast<const int32_t*>(key),
973  key_count);
974  case 8:
976  groups_buffer, entry_count, h, key, key_count);
977  default:
978  return -1;
979  }
980  return -1;
981 }
ALWAYS_INLINE int32_t get_matching_group_value_columnar_slot(int64_t *groups_buffer, const uint32_t entry_count, const uint32_t h, const T *key, const uint32_t key_count)
+ Here is the call graph for this function:

◆ get_matching_group_value_perfect_hash()

ALWAYS_INLINE int64_t* get_matching_group_value_perfect_hash ( int64_t *  groups_buffer,
const uint32_t  hashed_index,
const int64_t *  key,
const uint32_t  key_count,
const uint32_t  row_size_quad 
)

Definition at line 1018 of file RuntimeFunctions.cpp.

References EMPTY_KEY_64.

1023  {
1024  uint32_t off = hashed_index * row_size_quad;
1025  if (groups_buffer[off] == EMPTY_KEY_64) {
1026  for (uint32_t i = 0; i < key_count; ++i) {
1027  groups_buffer[off + i] = key[i];
1028  }
1029  }
1030  return groups_buffer + off + key_count;
1031 }
#define EMPTY_KEY_64

◆ key_for_string_encoded()

ALWAYS_INLINE DEVICE int32_t key_for_string_encoded ( const int32_t  str_id)

Definition at line 1118 of file RuntimeFunctions.cpp.

1118  {
1119  return str_id;
1120 }

◆ linear_probabilistic_count()

NEVER_INLINE void linear_probabilistic_count ( uint8_t *  bitmap,
const uint32_t  bitmap_bytes,
const uint8_t *  key_bytes,
const uint32_t  key_len 
)

Definition at line 1167 of file RuntimeFunctions.cpp.

References __attribute__(), and MurmurHash1().

1170  {
1171  const uint32_t bit_pos = MurmurHash1(key_bytes, key_len, 0) % (bitmap_bytes * 8);
1172  const uint32_t word_idx = bit_pos / 32;
1173  const uint32_t bit_idx = bit_pos % 32;
1174  reinterpret_cast<uint32_t*>(bitmap)[word_idx] |= 1 << bit_idx;
1175 }
NEVER_INLINE DEVICE uint32_t MurmurHash1(const void *key, int len, const uint32_t seed)
Definition: MurmurHash.cpp:20
+ Here is the call graph for this function:

◆ load_avg_decimal()

ALWAYS_INLINE double load_avg_decimal ( const int64_t *  sum,
const int64_t *  count,
const double  null_val,
const uint32_t  scale 
)

Definition at line 1146 of file RuntimeFunctions.cpp.

1149  {
1150  return *count != 0 ? (static_cast<double>(*sum) / pow(10, scale)) / *count : null_val;
1151 }

◆ load_avg_double()

ALWAYS_INLINE double load_avg_double ( const int64_t *  agg,
const int64_t *  count,
const double  null_val 
)

Definition at line 1153 of file RuntimeFunctions.cpp.

1155  {
1156  return *count != 0 ? *reinterpret_cast<const double*>(may_alias_ptr(agg)) / *count
1157  : null_val;
1158 }

◆ load_avg_float()

ALWAYS_INLINE double load_avg_float ( const int32_t *  agg,
const int32_t *  count,
const double  null_val 
)

Definition at line 1160 of file RuntimeFunctions.cpp.

1162  {
1163  return *count != 0 ? *reinterpret_cast<const float*>(may_alias_ptr(agg)) / *count
1164  : null_val;
1165 }

◆ load_avg_int()

ALWAYS_INLINE double load_avg_int ( const int64_t *  sum,
const int64_t *  count,
const double  null_val 
)

Definition at line 1140 of file RuntimeFunctions.cpp.

1142  {
1143  return *count != 0 ? static_cast<double>(*sum) / *count : null_val;
1144 }

◆ load_double()

ALWAYS_INLINE double load_double ( const int64_t *  agg)

Definition at line 1132 of file RuntimeFunctions.cpp.

1132  {
1133  return *reinterpret_cast<const double*>(may_alias_ptr(agg));
1134 }

◆ load_float()

ALWAYS_INLINE float load_float ( const int32_t *  agg)

Definition at line 1136 of file RuntimeFunctions.cpp.

1136  {
1137  return *reinterpret_cast<const float*>(may_alias_ptr(agg));
1138 }

◆ logical_and()

ALWAYS_INLINE int8_t logical_and ( const int8_t  lhs,
const int8_t  rhs,
const int8_t  null_val 
)

Definition at line 258 of file RuntimeFunctions.cpp.

260  {
261  if (lhs == null_val) {
262  return rhs == 0 ? rhs : null_val;
263  }
264  if (rhs == null_val) {
265  return lhs == 0 ? lhs : null_val;
266  }
267  return (lhs && rhs) ? 1 : 0;
268 }

◆ logical_not()

ALWAYS_INLINE int8_t logical_not ( const int8_t  operand,
const int8_t  null_val 
)

Definition at line 254 of file RuntimeFunctions.cpp.

254  {
255  return operand == null_val ? operand : (operand ? 0 : 1);
256 }

◆ logical_or()

ALWAYS_INLINE int8_t logical_or ( const int8_t  lhs,
const int8_t  rhs,
const int8_t  null_val 
)

Definition at line 270 of file RuntimeFunctions.cpp.

272  {
273  if (lhs == null_val) {
274  return rhs == 0 ? null_val : rhs;
275  }
276  if (rhs == null_val) {
277  return lhs == 0 ? null_val : lhs;
278  }
279  return (lhs || rhs) ? 1 : 0;
280 }

◆ multifrag_query()

void multifrag_query ( const int8_t ***  col_buffers,
const uint64_t *  num_fragments,
const int64_t *  num_rows,
const uint64_t *  frag_row_offsets,
const int32_t *  max_matched,
int32_t *  total_matched,
const int64_t *  init_agg_value,
int64_t **  out,
int32_t *  error_code,
const uint32_t *  num_tables_ptr,
const int64_t *  join_hash_tables 
)

Definition at line 1235 of file RuntimeFunctions.cpp.

1245  {
1246  for (uint32_t i = 0; i < *num_fragments; ++i) {
1247  query_stub(col_buffers ? col_buffers[i] : nullptr,
1248  &num_rows[i * (*num_tables_ptr)],
1249  &frag_row_offsets[i * (*num_tables_ptr)],
1250  max_matched,
1252  out,
1253  i,
1255  total_matched,
1256  error_code);
1257  }
1258 }
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t * join_hash_tables
const int8_t const int64_t * num_rows
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t int32_t int32_t * total_matched
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t int32_t * error_code
const int8_t const int64_t const uint64_t const int32_t * max_matched
const int8_t const int64_t const uint64_t const int32_t const int64_t * init_agg_value
const int8_t const int64_t const uint64_t * frag_row_offsets
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t ** out

◆ multifrag_query_hoisted_literals()

void multifrag_query_hoisted_literals ( const int8_t ***  col_buffers,
const uint64_t *  num_fragments,
const int8_t *  literals,
const int64_t *  num_rows,
const uint64_t *  frag_row_offsets,
const int32_t *  max_matched,
int32_t *  total_matched,
const int64_t *  init_agg_value,
int64_t **  out,
int32_t *  error_code,
const uint32_t *  num_tables_ptr,
const int64_t *  join_hash_tables 
)

Definition at line 1194 of file RuntimeFunctions.cpp.

References __attribute__(), error_code, frag_idx, frag_row_offsets, init_agg_value, join_hash_tables, max_matched, num_rows, out, and total_matched.

1205  {
1206  for (uint32_t i = 0; i < *num_fragments; ++i) {
1207  query_stub_hoisted_literals(col_buffers ? col_buffers[i] : nullptr,
1208  literals,
1209  &num_rows[i * (*num_tables_ptr)],
1210  &frag_row_offsets[i * (*num_tables_ptr)],
1211  max_matched,
1213  out,
1214  i,
1216  total_matched,
1217  error_code);
1218  }
1219 }
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t * join_hash_tables
const int8_t const int64_t * num_rows
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t int32_t int32_t * total_matched
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t int32_t * error_code
const int8_t const int64_t const uint64_t const int32_t * max_matched
const int8_t const int64_t const uint64_t const int32_t const int64_t * init_agg_value
const int8_t * literals
const int8_t const int64_t const uint64_t * frag_row_offsets
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t ** out
+ Here is the call graph for this function:

◆ percent_window_func()

ALWAYS_INLINE double percent_window_func ( const int64_t  output_buff,
const int64_t  pos 
)

Definition at line 1127 of file RuntimeFunctions.cpp.

1128  {
1129  return reinterpret_cast<const double*>(output_buff)[pos];
1130 }

◆ record_error_code()

ALWAYS_INLINE int32_t record_error_code ( const int32_t  err_code,
int32_t *  error_codes 
)

Definition at line 788 of file RuntimeFunctions.cpp.

References __attribute__().

789  {
790  // NB: never override persistent error codes (with code greater than zero).
791  // On GPU, a projection query with a limit can run out of slots without it
792  // being an actual error if the limit has been hit. If a persistent error
793  // (division by zero, for example) occurs before running out of slots, we
794  // have to avoid overriding it, because there's a risk that the query would
795  // go through if we override with a potentially benign out-of-slots code.
796  if (err_code && error_codes[pos_start_impl(nullptr)] <= 0) {
797  error_codes[pos_start_impl(nullptr)] = err_code;
798  }
799  return err_code;
800 }
+ Here is the call graph for this function:

◆ row_number_window_func()

ALWAYS_INLINE int64_t row_number_window_func ( const int64_t  output_buff,
const int64_t  pos 
)

Definition at line 1122 of file RuntimeFunctions.cpp.

1123  {
1124  return reinterpret_cast<const int64_t*>(output_buff)[pos];
1125 }

◆ scale_decimal_down_not_nullable()

ALWAYS_INLINE int64_t scale_decimal_down_not_nullable ( const int64_t  operand,
const int64_t  scale,
const int64_t  null_val 
)

Definition at line 199 of file RuntimeFunctions.cpp.

201  {
202  int64_t tmp = scale >> 1;
203  tmp = operand >= 0 ? operand + tmp : operand - tmp;
204  return tmp / scale;
205 }

◆ scale_decimal_down_nullable()

ALWAYS_INLINE int64_t scale_decimal_down_nullable ( const int64_t  operand,
const int64_t  scale,
const int64_t  null_val 
)

Definition at line 186 of file RuntimeFunctions.cpp.

188  {
189  // rounded scale down of a decimal
190  if (operand == null_val) {
191  return null_val;
192  }
193 
194  int64_t tmp = scale >> 1;
195  tmp = operand >= 0 ? operand + tmp : operand - tmp;
196  return tmp / scale;
197 }

◆ scale_decimal_up()

ALWAYS_INLINE int64_t scale_decimal_up ( const int64_t  operand,
const uint64_t  scale,
const int64_t  operand_null_val,
const int64_t  result_null_val 
)

Definition at line 179 of file RuntimeFunctions.cpp.

182  {
183  return operand != operand_null_val ? operand * scale : result_null_val;
184 }

◆ set_matching_group_value_perfect_hash_columnar()

ALWAYS_INLINE void set_matching_group_value_perfect_hash_columnar ( int64_t *  groups_buffer,
const uint32_t  hashed_index,
const int64_t *  key,
const uint32_t  key_count,
const uint32_t  entry_count 
)

Definition at line 1037 of file RuntimeFunctions.cpp.

References EMPTY_KEY_64.

1042  {
1043  if (groups_buffer[hashed_index] == EMPTY_KEY_64) {
1044  for (uint32_t i = 0; i < key_count; i++) {
1045  groups_buffer[i * entry_count + hashed_index] = key[i];
1046  }
1047  }
1048 }
#define EMPTY_KEY_64

◆ string_pack()

ALWAYS_INLINE uint64_t string_pack ( const int8_t *  ptr,
const int32_t  len 
)

Definition at line 1091 of file RuntimeFunctions.cpp.

1091  {
1092  return (reinterpret_cast<const uint64_t>(ptr) & 0xffffffffffff) |
1093  (static_cast<const uint64_t>(len) << 48);
1094 }

◆ sync_warp()

GPU_RT_STUB void sync_warp ( )

Definition at line 760 of file RuntimeFunctions.cpp.

760 {}

◆ sync_warp_protected()

GPU_RT_STUB void sync_warp_protected ( int64_t  thread_pos,
int64_t  row_count 
)

Definition at line 761 of file RuntimeFunctions.cpp.

761 {}

◆ thread_warp_idx()

GPU_RT_STUB int8_t thread_warp_idx ( const int8_t  warp_sz)

Definition at line 782 of file RuntimeFunctions.cpp.

782  {
783  return 0;
784 }

Variable Documentation

◆ agg_col_count

◆ blocks_share_memory

const int64_t const uint32_t const uint32_t const uint32_t const bool const bool blocks_share_memory

Definition at line 871 of file RuntimeFunctions.cpp.

◆ error_code

◆ frag_idx

const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t frag_idx
Initial value:
{
assert(groups_buffer)

Definition at line 877 of file RuntimeFunctions.cpp.

Referenced by multifrag_query_hoisted_literals(), query_group_by_template_impl(), query_template_impl(), and Executor::skipFragment().

◆ frag_row_offsets

const int64_t const uint64_t * frag_row_offsets

Definition at line 1179 of file RuntimeFunctions.cpp.

Referenced by multifrag_query_hoisted_literals().

◆ groups_buffer_entry_count

◆ groups_buffer_size

const int32_t groups_buffer_size
Initial value:
{
return groups_buffer

Definition at line 806 of file RuntimeFunctions.cpp.

Referenced by QueryMemoryInitializer::allocateCountDistinctSet(), copy_group_by_buffers_from_gpu(), and create_dev_group_by_buffers().

◆ init_agg_value

const int64_t const uint64_t const int32_t const int64_t * init_agg_value

Definition at line 1179 of file RuntimeFunctions.cpp.

Referenced by multifrag_query_hoisted_literals().

◆ init_vals

const int64_t * init_vals

Definition at line 859 of file RuntimeFunctions.cpp.

Referenced by QueryMemoryInitializer::getNumBuffers(), and TEST().

◆ join_hash_tables

const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t * join_hash_tables

◆ key_qw_count

◆ keyless

const int64_t const uint32_t const uint32_t const uint32_t const bool keyless

Definition at line 859 of file RuntimeFunctions.cpp.

Referenced by GroupByAndAggregate::getKeylessInfo().

◆ literals

const int8_t* literals

◆ max_matched

const int64_t const uint64_t const int32_t * max_matched

◆ num_rows

const int64_t * num_rows

Definition at line 1179 of file RuntimeFunctions.cpp.

Referenced by DictionaryValueConverter< TARGET_TYPE >::allocateColumnarBuffer(), StringValueConverter::allocateColumnarData(), ArrayValueConverter< ELEMENT_CONVERTER >::allocateColumnarData(), GeoPointValueConverter::allocateColumnarData(), GeoLinestringValueConverter::allocateColumnarData(), GeoPolygonValueConverter::allocateColumnarData(), GeoMultiPolygonValueConverter::allocateColumnarData(), Importer_NS::DataStreamSink::archivePlumber(), SpeculativeTopNMap::asRows(), ColumnarResults::ColumnarResults(), anonymous_namespace{TopKTest.cpp}::SQLiteComparator::compare_impl(), anonymous_namespace{ExecuteTest.cpp}::SQLiteComparator::compare_impl(), anonymous_namespace{ExecuteTest.cpp}::create_sharded_join_table(), anonymous_namespace{DataGen.cpp}::data_gen(), Executor::executePlanWithGroupBy(), Executor::executePlanWithoutGroupBy(), Fragmenter_Namespace::FixedLenArrayChunkConverter::FixedLenArrayChunkConverter(), get_num_allocated_rows_from_gpu(), Catalog_Namespace::SysCatalog::getGranteesOfSharedDashboards(), Executor::getRowCountAndOffsetForAllFrags(), itasTestBody(), QueryExecutionContext::launchCpuCode(), QueryExecutionContext::launchGpuCode(), anonymous_namespace{StoragePerfTest.cpp}::load_data_for_thread_test_2(), main(), multifrag_query_hoisted_literals(), populate_table_random(), Parser::InsertIntoTableAsSelectStmt::populateData(), QueryExecutionContext::QueryExecutionContext(), com.mapd.tests.UpdateDeleteInsertConcurrencyTest::runTest(), Fragmenter_Namespace::StringChunkConverter::StringChunkConverter(), TEST(), TEST_P(), updateColumnByLiteralTest(), Fragmenter_Namespace::InsertOrderFragmenter::updateColumns(), and TargetValueConverter::~TargetValueConverter().

◆ out

◆ src

◆ sz

◆ total_matched

const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t int32_t int32_t * total_matched
Initial value:
{
assert(col_buffers || literals || num_rows || frag_row_offsets || max_matched ||
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t * join_hash_tables
const int8_t const int64_t * num_rows
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t int32_t int32_t * total_matched
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t int32_t * error_code
const int8_t const int64_t const uint64_t const int32_t * max_matched
const int64_t const uint32_t const uint32_t const uint32_t const bool const bool const int32_t frag_idx
const int8_t const int64_t const uint64_t const int32_t const int64_t * init_agg_value
const int8_t * literals
const int8_t const int64_t const uint64_t * frag_row_offsets
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t ** out

Definition at line 1188 of file RuntimeFunctions.cpp.

Referenced by QueryExecutionContext::launchCpuCode(), multifrag_query_hoisted_literals(), query_group_by_template_impl(), and query_template_impl().

◆ warp_size

const int64_t const uint32_t const uint32_t const uint32_t const bool const int8_t warp_size