OmniSciDB  0bd2ec9cf4
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
RuntimeFunctions.cpp File Reference
#include "RuntimeFunctions.h"
#include "../Shared/funcannotations.h"
#include "BufferCompaction.h"
#include "HyperLogLogRank.h"
#include "MurmurHash.h"
#include "TypePunning.h"
#include <algorithm>
#include <chrono>
#include <cmath>
#include <cstring>
#include <thread>
#include <tuple>
#include "DecodersImpl.h"
#include "GroupByRuntime.cpp"
#include "JoinHashTableQueryRuntime.cpp"
#include "TopKRuntime.cpp"
+ Include dependency graph for RuntimeFunctions.cpp:

Go to the source code of this file.

Macros

#define DEF_ARITH_NULLABLE(type, null_type, opname, opsym)
 
#define DEF_ARITH_NULLABLE_LHS(type, null_type, opname, opsym)
 
#define DEF_ARITH_NULLABLE_RHS(type, null_type, opname, opsym)
 
#define DEF_CMP_NULLABLE(type, null_type, opname, opsym)
 
#define DEF_CMP_NULLABLE_LHS(type, null_type, opname, opsym)
 
#define DEF_CMP_NULLABLE_RHS(type, null_type, opname, opsym)
 
#define DEF_SAFE_DIV_NULLABLE(type, null_type, opname)
 
#define DEF_BINARY_NULLABLE_ALL_OPS(type, null_type)
 
#define DEF_UMINUS_NULLABLE(type, null_type)
 
#define DEF_CAST_NULLABLE(from_type, to_type)
 
#define DEF_CAST_NULLABLE_BIDIR(type1, type2)
 
#define GPU_RT_STUB   NEVER_INLINE __attribute__((optnone))
 
#define DEF_AGG_MAX_INT(n)
 
#define DEF_AGG_MIN_INT(n)
 
#define DEF_AGG_ID_INT(n)
 
#define DEF_CHECKED_SINGLE_AGG_ID_INT(n)
 
#define DEF_WRITE_PROJECTION_INT(n)
 
#define DEF_SKIP_AGG_ADD(base_agg_func)
 
#define DEF_SKIP_AGG(base_agg_func)
 
#define DATA_T   int64_t
 
#define DATA_T   int32_t
 
#define DATA_T   int16_t
 
#define DATA_T   int8_t
 
#define DEF_SKIP_AGG_ADD(base_agg_func)
 
#define DEF_SKIP_AGG(base_agg_func)
 
#define DATA_T   double
 
#define ADDR_T   int64_t
 
#define DATA_T   float
 
#define ADDR_T   int32_t
 
#define DEF_SHARED_AGG_RET_STUBS(base_agg_func)
 
#define DEF_SHARED_AGG_STUBS(base_agg_func)
 

Functions

ALWAYS_INLINE int64_t scale_decimal_up (const int64_t operand, const uint64_t scale, const int64_t operand_null_val, const int64_t result_null_val)
 
ALWAYS_INLINE int64_t scale_decimal_down_nullable (const int64_t operand, const int64_t scale, const int64_t null_val)
 
ALWAYS_INLINE int64_t scale_decimal_down_not_nullable (const int64_t operand, const int64_t scale, const int64_t null_val)
 
ALWAYS_INLINE int8_t logical_not (const int8_t operand, const int8_t null_val)
 
ALWAYS_INLINE int8_t logical_and (const int8_t lhs, const int8_t rhs, const int8_t null_val)
 
ALWAYS_INLINE int8_t logical_or (const int8_t lhs, const int8_t rhs, const int8_t null_val)
 
ALWAYS_INLINE uint64_t agg_count (uint64_t *agg, const int64_t)
 
ALWAYS_INLINE void agg_count_distinct_bitmap (int64_t *agg, const int64_t val, const int64_t min_val)
 
GPU_RT_STUB void agg_count_distinct_bitmap_gpu (int64_t *, const int64_t, const int64_t, const int64_t, const int64_t, const uint64_t, const uint64_t)
 
NEVER_INLINE void agg_approximate_count_distinct (int64_t *agg, const int64_t key, const uint32_t b)
 
GPU_RT_STUB void agg_approximate_count_distinct_gpu (int64_t *, const int64_t, const uint32_t, const int64_t, const int64_t)
 
ALWAYS_INLINE int8_t bit_is_set (const int64_t bitset, const int64_t val, const int64_t min_val, const int64_t max_val, const int64_t null_val, const int8_t null_bool_val)
 
ALWAYS_INLINE int64_t agg_sum (int64_t *agg, const int64_t val)
 
ALWAYS_INLINE void agg_max (int64_t *agg, const int64_t val)
 
ALWAYS_INLINE void agg_min (int64_t *agg, const int64_t val)
 
ALWAYS_INLINE void agg_id (int64_t *agg, const int64_t val)
 
ALWAYS_INLINE int32_t checked_single_agg_id (int64_t *agg, const int64_t val, const int64_t null_val)
 
ALWAYS_INLINE void agg_count_distinct_bitmap_skip_val (int64_t *agg, const int64_t val, const int64_t min_val, const int64_t skip_val)
 
GPU_RT_STUB void agg_count_distinct_bitmap_skip_val_gpu (int64_t *, const int64_t, const int64_t, const int64_t, const int64_t, const int64_t, const uint64_t, const uint64_t)
 
ALWAYS_INLINE uint32_t agg_count_int32 (uint32_t *agg, const int32_t)
 
ALWAYS_INLINE int32_t agg_sum_int32 (int32_t *agg, const int32_t val)
 
ALWAYS_INLINE int64_t agg_sum_skip_val (int64_t *agg, const int64_t val, const int64_t skip_val)
 
ALWAYS_INLINE int32_t agg_sum_int32_skip_val (int32_t *agg, const int32_t val, const int32_t skip_val)
 
ALWAYS_INLINE uint64_t agg_count_skip_val (uint64_t *agg, const int64_t val, const int64_t skip_val)
 
ALWAYS_INLINE uint32_t agg_count_int32_skip_val (uint32_t *agg, const int32_t val, const int32_t skip_val)
 
ALWAYS_INLINE uint64_t agg_count_double (uint64_t *agg, const double val)
 
ALWAYS_INLINE void agg_sum_double (int64_t *agg, const double val)
 
ALWAYS_INLINE void agg_max_double (int64_t *agg, const double val)
 
ALWAYS_INLINE void agg_min_double (int64_t *agg, const double val)
 
ALWAYS_INLINE void agg_id_double (int64_t *agg, const double val)
 
ALWAYS_INLINE int32_t checked_single_agg_id_double (int64_t *agg, const double val, const double null_val)
 
ALWAYS_INLINE uint32_t agg_count_float (uint32_t *agg, const float val)
 
ALWAYS_INLINE void agg_sum_float (int32_t *agg, const float val)
 
ALWAYS_INLINE void agg_max_float (int32_t *agg, const float val)
 
ALWAYS_INLINE void agg_min_float (int32_t *agg, const float val)
 
ALWAYS_INLINE void agg_id_float (int32_t *agg, const float val)
 
ALWAYS_INLINE int32_t checked_single_agg_id_float (int32_t *agg, const float val, const float null_val)
 
ALWAYS_INLINE uint64_t agg_count_double_skip_val (uint64_t *agg, const double val, const double skip_val)
 
ALWAYS_INLINE uint32_t agg_count_float_skip_val (uint32_t *agg, const float val, const float skip_val)
 
ALWAYS_INLINE int64_t decimal_floor (const int64_t x, const int64_t scale)
 
ALWAYS_INLINE int64_t decimal_ceil (const int64_t x, const int64_t scale)
 
GPU_RT_STUB int32_t checked_single_agg_id_shared (int64_t *agg, const int64_t val, const int64_t null_val)
 
GPU_RT_STUB int32_t checked_single_agg_id_int32_shared (int32_t *agg, const int32_t val, const int32_t null_val)
 
GPU_RT_STUB int32_t checked_single_agg_id_int16_shared (int16_t *agg, const int16_t val, const int16_t null_val)
 
GPU_RT_STUB int32_t checked_single_agg_id_int8_shared (int8_t *agg, const int8_t val, const int8_t null_val)
 
GPU_RT_STUB int32_t checked_single_agg_id_double_shared (int64_t *agg, const double val, const double null_val)
 
GPU_RT_STUB int32_t checked_single_agg_id_float_shared (int32_t *agg, const float val, const float null_val)
 
GPU_RT_STUB void agg_max_int16_skip_val_shared (int16_t *agg, const int16_t val, const int16_t skip_val)
 
GPU_RT_STUB void agg_max_int8_skip_val_shared (int8_t *agg, const int8_t val, const int8_t skip_val)
 
GPU_RT_STUB void agg_min_int16_skip_val_shared (int16_t *agg, const int16_t val, const int16_t skip_val)
 
GPU_RT_STUB void agg_min_int8_skip_val_shared (int8_t *agg, const int8_t val, const int8_t skip_val)
 
GPU_RT_STUB void agg_id_double_shared_slow (int64_t *agg, const double *val)
 
GPU_RT_STUB int64_t agg_sum_shared (int64_t *agg, const int64_t val)
 
GPU_RT_STUB int64_t agg_sum_skip_val_shared (int64_t *agg, const int64_t val, const int64_t skip_val)
 
GPU_RT_STUB int32_t agg_sum_int32_shared (int32_t *agg, const int32_t val)
 
GPU_RT_STUB int32_t agg_sum_int32_skip_val_shared (int32_t *agg, const int32_t val, const int32_t skip_val)
 
GPU_RT_STUB void agg_sum_double_shared (int64_t *agg, const double val)
 
GPU_RT_STUB void agg_sum_double_skip_val_shared (int64_t *agg, const double val, const double skip_val)
 
GPU_RT_STUB void agg_sum_float_shared (int32_t *agg, const float val)
 
GPU_RT_STUB void agg_sum_float_skip_val_shared (int32_t *agg, const float val, const float skip_val)
 
GPU_RT_STUB void force_sync ()
 
GPU_RT_STUB void sync_warp ()
 
GPU_RT_STUB void sync_warp_protected (int64_t thread_pos, int64_t row_count)
 
 __attribute__ ((noinline)) int32_t pos_start_impl(int32_t *error_code)
 
GPU_RT_STUB int8_t thread_warp_idx (const int8_t warp_sz)
 
ALWAYS_INLINE int32_t record_error_code (const int32_t err_code, int32_t *error_codes)
 
int64_t const int32_t sz assert (dest)
 
const int32_t
groups_buffer_size return 
init_shared_mem_nop (groups_buffer, groups_buffer_size)
 
int64_t const int32_t sz write_back_nop (dest, src, sz)
 
int64_t const int32_t sz return agg_from_smem_to_gmem_nop (dest, src, sz)
 
const int64_t const uint32_t
const uint32_t const uint32_t
const bool const int8_t
warp_size 
assert (groups_buffer)
 
template<typename T >
ALWAYS_INLINE int64_t * get_matching_group_value (int64_t *groups_buffer, const uint32_t h, const T *key, const uint32_t key_count, const uint32_t row_size_quad)
 
ALWAYS_INLINE int64_t * get_matching_group_value (int64_t *groups_buffer, const uint32_t h, const int64_t *key, const uint32_t key_count, const uint32_t key_width, const uint32_t row_size_quad, const int64_t *init_vals)
 
template<typename T >
ALWAYS_INLINE int32_t get_matching_group_value_columnar_slot (int64_t *groups_buffer, const uint32_t entry_count, const uint32_t h, const T *key, const uint32_t key_count)
 
ALWAYS_INLINE int32_t get_matching_group_value_columnar_slot (int64_t *groups_buffer, const uint32_t entry_count, const uint32_t h, const int64_t *key, const uint32_t key_count, const uint32_t key_width)
 
ALWAYS_INLINE int64_t * get_matching_group_value_columnar (int64_t *groups_buffer, const uint32_t h, const int64_t *key, const uint32_t key_qw_count, const size_t entry_count)
 
ALWAYS_INLINE int64_t * get_matching_group_value_perfect_hash (int64_t *groups_buffer, const uint32_t hashed_index, const int64_t *key, const uint32_t key_count, const uint32_t row_size_quad)
 
ALWAYS_INLINE void set_matching_group_value_perfect_hash_columnar (int64_t *groups_buffer, const uint32_t hashed_index, const int64_t *key, const uint32_t key_count, const uint32_t entry_count)
 
ALWAYS_INLINE int64_t * get_group_value_fast_keyless (int64_t *groups_buffer, const int64_t key, const int64_t min_key, const int64_t, const uint32_t row_size_quad)
 
ALWAYS_INLINE int64_t * get_group_value_fast_keyless_semiprivate (int64_t *groups_buffer, const int64_t key, const int64_t min_key, const int64_t, const uint32_t row_size_quad, const uint8_t thread_warp_idx, const uint8_t warp_size)
 
ALWAYS_INLINE int8_t * extract_str_ptr (const uint64_t str_and_len)
 
ALWAYS_INLINE int32_t extract_str_len (const uint64_t str_and_len)
 
ALWAYS_INLINE uint64_t string_pack (const int8_t *ptr, const int32_t len)
 
ALWAYS_INLINE DEVICE int32_t char_length (const char *str, const int32_t str_len)
 
ALWAYS_INLINE DEVICE int32_t char_length_nullable (const char *str, const int32_t str_len, const int32_t int_null)
 
ALWAYS_INLINE DEVICE int32_t key_for_string_encoded (const int32_t str_id)
 
ALWAYS_INLINE int64_t row_number_window_func (const int64_t output_buff, const int64_t pos)
 
ALWAYS_INLINE double percent_window_func (const int64_t output_buff, const int64_t pos)
 
ALWAYS_INLINE double load_double (const int64_t *agg)
 
ALWAYS_INLINE float load_float (const int32_t *agg)
 
ALWAYS_INLINE double load_avg_int (const int64_t *sum, const int64_t *count, const double null_val)
 
ALWAYS_INLINE double load_avg_decimal (const int64_t *sum, const int64_t *count, const double null_val, const uint32_t scale)
 
ALWAYS_INLINE double load_avg_double (const int64_t *agg, const int64_t *count, const double null_val)
 
ALWAYS_INLINE double load_avg_float (const int32_t *agg, const int32_t *count, const double null_val)
 
NEVER_INLINE void linear_probabilistic_count (uint8_t *bitmap, const uint32_t bitmap_bytes, const uint8_t *key_bytes, const uint32_t key_len)
 
const int8_t const int64_t
const uint64_t const int32_t
const int64_t int64_t uint32_t
const int64_t int32_t int32_t
*total_matched 
assert (col_buffers||literals||num_rows||frag_row_offsets||max_matched||init_agg_value||out||frag_idx||error_code||join_hash_tables||total_matched)
 
void multifrag_query_hoisted_literals (const int8_t ***col_buffers, const uint64_t *num_fragments, const int8_t *literals, const int64_t *num_rows, const uint64_t *frag_row_offsets, const int32_t *max_matched, int32_t *total_matched, const int64_t *init_agg_value, int64_t **out, int32_t *error_code, const uint32_t *num_tables_ptr, const int64_t *join_hash_tables)
 
const int64_t const uint64_t
const int32_t const int64_t
int64_t uint32_t const int64_t
int32_t int32_t *total_matched 
assert (col_buffers||num_rows||frag_row_offsets||max_matched||init_agg_value||out||frag_idx||error_code||join_hash_tables||total_matched)
 
void multifrag_query (const int8_t ***col_buffers, const uint64_t *num_fragments, const int64_t *num_rows, const uint64_t *frag_row_offsets, const int32_t *max_matched, int32_t *total_matched, const int64_t *init_agg_value, int64_t **out, int32_t *error_code, const uint32_t *num_tables_ptr, const int64_t *join_hash_tables)
 

Variables

const int32_t
groups_buffer_size return 
groups_buffer
 
int64_t * src
 
const int32_t
groups_buffer_size return 
nullptr
 
const int64_t * init_vals
 
const int64_t const uint32_t groups_buffer_entry_count
 
const int64_t const uint32_t
const uint32_t 
key_qw_count
 
const int64_t const uint32_t
const uint32_t const uint32_t 
agg_col_count
 
const int64_t const uint32_t
const uint32_t const uint32_t
const bool 
keyless
 
const int64_t const uint32_t
const uint32_t const uint32_t
const bool const bool 
blocks_share_memory
 
const int8_t * literals
 
const int8_t const int64_t * num_rows
 
const int8_t const int64_t
const uint64_t * 
frag_row_offsets
 
const int8_t const int64_t
const uint64_t const int32_t * 
max_matched
 
const int8_t const int64_t
const uint64_t const int32_t
const int64_t * 
init_agg_value
 
const int8_t const int64_t
const uint64_t const int32_t
const int64_t int64_t ** 
out
 
const int8_t const int64_t
const uint64_t const int32_t
const int64_t int64_t uint32_t 
frag_idx
 
const int8_t const int64_t
const uint64_t const int32_t
const int64_t int64_t uint32_t
const int64_t * 
join_hash_tables
 
const int8_t const int64_t
const uint64_t const int32_t
const int64_t int64_t uint32_t
const int64_t int32_t * 
error_code
 

Macro Definition Documentation

#define ADDR_T   int64_t

Definition at line 690 of file RuntimeFunctions.cpp.

#define ADDR_T   int32_t

Definition at line 690 of file RuntimeFunctions.cpp.

#define DATA_T   int64_t

Definition at line 689 of file RuntimeFunctions.cpp.

#define DATA_T   int32_t

Definition at line 689 of file RuntimeFunctions.cpp.

#define DATA_T   int16_t

Definition at line 689 of file RuntimeFunctions.cpp.

#define DATA_T   int8_t

Definition at line 689 of file RuntimeFunctions.cpp.

#define DATA_T   double

Definition at line 689 of file RuntimeFunctions.cpp.

#define DATA_T   float

Definition at line 689 of file RuntimeFunctions.cpp.

#define DEF_AGG_ID_INT (   n)
Value:
extern "C" ALWAYS_INLINE void agg_id_int##n(int##n##_t* agg, const int##n##_t val) { \
*agg = val; \
}
#define ALWAYS_INLINE

Definition at line 423 of file RuntimeFunctions.cpp.

#define DEF_AGG_MAX_INT (   n)
Value:
extern "C" ALWAYS_INLINE void agg_max_int##n(int##n##_t* agg, const int##n##_t val) { \
*agg = std::max(*agg, val); \
}
#define ALWAYS_INLINE

Definition at line 403 of file RuntimeFunctions.cpp.

#define DEF_AGG_MIN_INT (   n)
Value:
extern "C" ALWAYS_INLINE void agg_min_int##n(int##n##_t* agg, const int##n##_t val) { \
*agg = std::min(*agg, val); \
}
#define ALWAYS_INLINE

Definition at line 413 of file RuntimeFunctions.cpp.

#define DEF_ARITH_NULLABLE (   type,
  null_type,
  opname,
  opsym 
)
Value:
extern "C" ALWAYS_INLINE type opname##_##type##_nullable( \
const type lhs, const type rhs, const null_type null_val) { \
if (lhs != null_val && rhs != null_val) { \
return lhs opsym rhs; \
} \
return null_val; \
}
#define ALWAYS_INLINE

Definition at line 41 of file RuntimeFunctions.cpp.

#define DEF_ARITH_NULLABLE_LHS (   type,
  null_type,
  opname,
  opsym 
)
Value:
extern "C" ALWAYS_INLINE type opname##_##type##_nullable_lhs( \
const type lhs, const type rhs, const null_type null_val) { \
if (lhs != null_val) { \
return lhs opsym rhs; \
} \
return null_val; \
}
#define ALWAYS_INLINE

Definition at line 50 of file RuntimeFunctions.cpp.

#define DEF_ARITH_NULLABLE_RHS (   type,
  null_type,
  opname,
  opsym 
)
Value:
extern "C" ALWAYS_INLINE type opname##_##type##_nullable_rhs( \
const type lhs, const type rhs, const null_type null_val) { \
if (rhs != null_val) { \
return lhs opsym rhs; \
} \
return null_val; \
}
#define ALWAYS_INLINE

Definition at line 59 of file RuntimeFunctions.cpp.

#define DEF_BINARY_NULLABLE_ALL_OPS (   type,
  null_type 
)

Definition at line 113 of file RuntimeFunctions.cpp.

#define DEF_CAST_NULLABLE (   from_type,
  to_type 
)
Value:
extern "C" ALWAYS_INLINE to_type cast_##from_type##_to_##to_type##_nullable( \
const from_type operand, \
const from_type from_null_val, \
const to_type to_null_val) { \
return operand == from_null_val ? to_null_val : operand; \
}
#define ALWAYS_INLINE

Definition at line 217 of file RuntimeFunctions.cpp.

#define DEF_CAST_NULLABLE_BIDIR (   type1,
  type2 
)
Value:
DEF_CAST_NULLABLE(type1, type2) \
DEF_CAST_NULLABLE(type2, type1)
#define DEF_CAST_NULLABLE(from_type, to_type)

Definition at line 225 of file RuntimeFunctions.cpp.

#define DEF_CHECKED_SINGLE_AGG_ID_INT (   n)
Value:
extern "C" ALWAYS_INLINE int32_t checked_single_agg_id_int##n( \
int##n##_t* agg, const int##n##_t val, const int##n##_t null_val) { \
if (val == null_val) { \
return 0; \
} \
if (*agg == val) { \
return 0; \
} else if (*agg == null_val) { \
*agg = val; \
return 0; \
} else { \
/* see Execute::ERR_SINGLE_VALUE_FOUND_MULTIPLE_VALUES*/ \
return 15; \
} \
}
#define ALWAYS_INLINE

Definition at line 428 of file RuntimeFunctions.cpp.

#define DEF_CMP_NULLABLE (   type,
  null_type,
  opname,
  opsym 
)
Value:
extern "C" ALWAYS_INLINE int8_t opname##_##type##_nullable( \
const type lhs, \
const type rhs, \
const null_type null_val, \
const int8_t null_bool_val) { \
if (lhs != null_val && rhs != null_val) { \
return lhs opsym rhs; \
} \
return null_bool_val; \
}
#define ALWAYS_INLINE

Definition at line 68 of file RuntimeFunctions.cpp.

#define DEF_CMP_NULLABLE_LHS (   type,
  null_type,
  opname,
  opsym 
)
Value:
extern "C" ALWAYS_INLINE int8_t opname##_##type##_nullable_lhs( \
const type lhs, \
const type rhs, \
const null_type null_val, \
const int8_t null_bool_val) { \
if (lhs != null_val) { \
return lhs opsym rhs; \
} \
return null_bool_val; \
}
#define ALWAYS_INLINE

Definition at line 80 of file RuntimeFunctions.cpp.

#define DEF_CMP_NULLABLE_RHS (   type,
  null_type,
  opname,
  opsym 
)
Value:
extern "C" ALWAYS_INLINE int8_t opname##_##type##_nullable_rhs( \
const type lhs, \
const type rhs, \
const null_type null_val, \
const int8_t null_bool_val) { \
if (rhs != null_val) { \
return lhs opsym rhs; \
} \
return null_bool_val; \
}
#define ALWAYS_INLINE

Definition at line 92 of file RuntimeFunctions.cpp.

#define DEF_SAFE_DIV_NULLABLE (   type,
  null_type,
  opname 
)
Value:
extern "C" ALWAYS_INLINE type safe_div_##type( \
const type lhs, const type rhs, const null_type null_val) { \
if (lhs != null_val && rhs != null_val && rhs != 0) { \
return lhs / rhs; \
} \
return null_val; \
}
#define ALWAYS_INLINE

Definition at line 104 of file RuntimeFunctions.cpp.

#define DEF_SHARED_AGG_RET_STUBS (   base_agg_func)

Definition at line 716 of file RuntimeFunctions.cpp.

#define DEF_SHARED_AGG_STUBS (   base_agg_func)
Value:
extern "C" GPU_RT_STUB void base_agg_func##_shared(int64_t* agg, const int64_t val) {} \
\
extern "C" GPU_RT_STUB void base_agg_func##_skip_val_shared( \
int64_t* agg, const int64_t val, const int64_t skip_val) {} \
extern "C" GPU_RT_STUB void base_agg_func##_int32_shared(int32_t* agg, \
const int32_t val) {} \
extern "C" GPU_RT_STUB void base_agg_func##_int16_shared(int16_t* agg, \
const int16_t val) {} \
extern "C" GPU_RT_STUB void base_agg_func##_int8_shared(int8_t* agg, \
const int8_t val) {} \
\
extern "C" GPU_RT_STUB void base_agg_func##_int32_skip_val_shared( \
int32_t* agg, const int32_t val, const int32_t skip_val) {} \
\
extern "C" GPU_RT_STUB void base_agg_func##_double_shared(int64_t* agg, \
const double val) {} \
\
extern "C" GPU_RT_STUB void base_agg_func##_double_skip_val_shared( \
int64_t* agg, const double val, const double skip_val) {} \
extern "C" GPU_RT_STUB void base_agg_func##_float_shared(int32_t* agg, \
const float val) {} \
\
extern "C" GPU_RT_STUB void base_agg_func##_float_skip_val_shared( \
int32_t* agg, const float val, const float skip_val) {}
#define GPU_RT_STUB

Definition at line 755 of file RuntimeFunctions.cpp.

#define DEF_SKIP_AGG (   base_agg_func)
Value:
extern "C" ALWAYS_INLINE void base_agg_func##_skip_val( \
DATA_T* agg, const DATA_T val, const DATA_T skip_val) { \
if (val != skip_val) { \
const DATA_T old_agg = *agg; \
if (old_agg != skip_val) { \
base_agg_func(agg, val); \
} else { \
*agg = val; \
} \
} \
}
#define DATA_T
#define ALWAYS_INLINE

Definition at line 668 of file RuntimeFunctions.cpp.

#define DEF_SKIP_AGG (   base_agg_func)
Value:
extern "C" ALWAYS_INLINE void base_agg_func##_skip_val( \
ADDR_T* agg, const DATA_T val, const DATA_T skip_val) { \
if (val != skip_val) { \
const ADDR_T old_agg = *agg; \
if (old_agg != *reinterpret_cast<const ADDR_T*>(may_alias_ptr(&skip_val))) { \
base_agg_func(agg, val); \
} else { \
*agg = *reinterpret_cast<const ADDR_T*>(may_alias_ptr(&val)); \
} \
} \
}
#define DATA_T
#define ADDR_T
#define ALWAYS_INLINE

Definition at line 668 of file RuntimeFunctions.cpp.

#define DEF_SKIP_AGG_ADD (   base_agg_func)
Value:
extern "C" ALWAYS_INLINE void base_agg_func##_skip_val( \
DATA_T* agg, const DATA_T val, const DATA_T skip_val) { \
if (val != skip_val) { \
base_agg_func(agg, val); \
} \
}
#define DATA_T
#define ALWAYS_INLINE

Definition at line 660 of file RuntimeFunctions.cpp.

#define DEF_SKIP_AGG_ADD (   base_agg_func)
Value:
extern "C" ALWAYS_INLINE void base_agg_func##_skip_val( \
ADDR_T* agg, const DATA_T val, const DATA_T skip_val) { \
if (val != skip_val) { \
base_agg_func(agg, val); \
} \
}
#define DATA_T
#define ADDR_T
#define ALWAYS_INLINE

Definition at line 660 of file RuntimeFunctions.cpp.

#define DEF_UMINUS_NULLABLE (   type,
  null_type 
)
Value:
extern "C" ALWAYS_INLINE type uminus_##type##_nullable(const type operand, \
const null_type null_val) { \
return operand == null_val ? null_val : -operand; \
}
#define ALWAYS_INLINE

Definition at line 202 of file RuntimeFunctions.cpp.

#define DEF_WRITE_PROJECTION_INT (   n)
Value:
extern "C" ALWAYS_INLINE void write_projection_int##n( \
int8_t* slot_ptr, const int##n##_t val, const int64_t init_val) { \
if (val != init_val) { \
*reinterpret_cast<int##n##_t*>(slot_ptr) = val; \
} \
}
#define ALWAYS_INLINE

Definition at line 456 of file RuntimeFunctions.cpp.

#define GPU_RT_STUB   NEVER_INLINE __attribute__((optnone))

Definition at line 291 of file RuntimeFunctions.cpp.

Function Documentation

__attribute__ ( (noinline)  )

Definition at line 878 of file RuntimeFunctions.cpp.

878  {
879  int32_t row_index_resume{0};
880  if (error_code) {
881  row_index_resume = error_code[0];
882  error_code[0] = 0;
883  }
884  return row_index_resume;
885 }
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t int32_t * error_code
NEVER_INLINE void agg_approximate_count_distinct ( int64_t *  agg,
const int64_t  key,
const uint32_t  b 
)

Definition at line 301 of file RuntimeFunctions.cpp.

References get_rank(), and MurmurHash64A().

303  {
304  const uint64_t hash = MurmurHash64A(&key, sizeof(key), 0);
305  const uint32_t index = hash >> (64 - b);
306  const uint8_t rank = get_rank(hash << b, 64 - b);
307  uint8_t* M = reinterpret_cast<uint8_t*>(*agg);
308  M[index] = std::max(M[index], rank);
309 }
FORCE_INLINE uint8_t get_rank(uint64_t x, uint32_t b)
NEVER_INLINE DEVICE uint64_t MurmurHash64A(const void *key, int len, uint64_t seed)
Definition: MurmurHash.cpp:26

+ Here is the call graph for this function:

GPU_RT_STUB void agg_approximate_count_distinct_gpu ( int64_t *  ,
const int64_t  ,
const uint32_t  ,
const int64_t  ,
const int64_t   
)

Definition at line 311 of file RuntimeFunctions.cpp.

315  {}
ALWAYS_INLINE uint64_t agg_count ( uint64_t *  agg,
const int64_t   
)

Definition at line 280 of file RuntimeFunctions.cpp.

Referenced by agg_count_skip_val(), and anonymous_namespace{GroupByAndAggregate.cpp}::get_agg_count().

280  {
281  return (*agg)++;
282 }

+ Here is the caller graph for this function:

ALWAYS_INLINE void agg_count_distinct_bitmap ( int64_t *  agg,
const int64_t  val,
const int64_t  min_val 
)

Definition at line 284 of file RuntimeFunctions.cpp.

Referenced by agg_count_distinct_bitmap_skip_val(), WindowFunctionContext::fillPartitionEnd(), WindowFunctionContext::fillPartitionStart(), anonymous_namespace{WindowContext.cpp}::index_to_partition_end(), and InValuesBitmap::InValuesBitmap().

286  {
287  const uint64_t bitmap_idx = val - min_val;
288  reinterpret_cast<int8_t*>(*agg)[bitmap_idx >> 3] |= (1 << (bitmap_idx & 7));
289 }

+ Here is the caller graph for this function:

GPU_RT_STUB void agg_count_distinct_bitmap_gpu ( int64_t *  ,
const int64_t  ,
const int64_t  ,
const int64_t  ,
const int64_t  ,
const uint64_t  ,
const uint64_t   
)

Definition at line 293 of file RuntimeFunctions.cpp.

299  {}
ALWAYS_INLINE void agg_count_distinct_bitmap_skip_val ( int64_t *  agg,
const int64_t  val,
const int64_t  min_val,
const int64_t  skip_val 
)

Definition at line 375 of file RuntimeFunctions.cpp.

References agg_count_distinct_bitmap().

378  {
379  if (val != skip_val) {
380  agg_count_distinct_bitmap(agg, val, min_val);
381  }
382 }
ALWAYS_INLINE void agg_count_distinct_bitmap(int64_t *agg, const int64_t val, const int64_t min_val)

+ Here is the call graph for this function:

GPU_RT_STUB void agg_count_distinct_bitmap_skip_val_gpu ( int64_t *  ,
const int64_t  ,
const int64_t  ,
const int64_t  ,
const int64_t  ,
const int64_t  ,
const uint64_t  ,
const uint64_t   
)

Definition at line 384 of file RuntimeFunctions.cpp.

391  {}
ALWAYS_INLINE uint64_t agg_count_double ( uint64_t *  agg,
const double  val 
)

Definition at line 560 of file RuntimeFunctions.cpp.

Referenced by agg_count_double_skip_val().

560  {
561  return (*agg)++;
562 }

+ Here is the caller graph for this function:

ALWAYS_INLINE uint64_t agg_count_double_skip_val ( uint64_t *  agg,
const double  val,
const double  skip_val 
)

Definition at line 642 of file RuntimeFunctions.cpp.

References agg_count_double().

644  {
645  if (val != skip_val) {
646  return agg_count_double(agg, val);
647  }
648  return *agg;
649 }
ALWAYS_INLINE uint64_t agg_count_double(uint64_t *agg, const double val)

+ Here is the call graph for this function:

ALWAYS_INLINE uint32_t agg_count_float ( uint32_t *  agg,
const float  val 
)

Definition at line 601 of file RuntimeFunctions.cpp.

Referenced by agg_count_float_skip_val().

601  {
602  return (*agg)++;
603 }

+ Here is the caller graph for this function:

ALWAYS_INLINE uint32_t agg_count_float_skip_val ( uint32_t *  agg,
const float  val,
const float  skip_val 
)

Definition at line 651 of file RuntimeFunctions.cpp.

References agg_count_float().

653  {
654  if (val != skip_val) {
655  return agg_count_float(agg, val);
656  }
657  return *agg;
658 }
ALWAYS_INLINE uint32_t agg_count_float(uint32_t *agg, const float val)

+ Here is the call graph for this function:

ALWAYS_INLINE uint32_t agg_count_int32 ( uint32_t *  agg,
const int32_t   
)

Definition at line 393 of file RuntimeFunctions.cpp.

Referenced by agg_count_int32_skip_val().

393  {
394  return (*agg)++;
395 }

+ Here is the caller graph for this function:

ALWAYS_INLINE uint32_t agg_count_int32_skip_val ( uint32_t *  agg,
const int32_t  val,
const int32_t  skip_val 
)

Definition at line 505 of file RuntimeFunctions.cpp.

References agg_count_int32().

507  {
508  if (val != skip_val) {
509  return agg_count_int32(agg, val);
510  }
511  return *agg;
512 }
ALWAYS_INLINE uint32_t agg_count_int32(uint32_t *agg, const int32_t)

+ Here is the call graph for this function:

ALWAYS_INLINE uint64_t agg_count_skip_val ( uint64_t *  agg,
const int64_t  val,
const int64_t  skip_val 
)

Definition at line 496 of file RuntimeFunctions.cpp.

References agg_count().

498  {
499  if (val != skip_val) {
500  return agg_count(agg, val);
501  }
502  return *agg;
503 }
ALWAYS_INLINE uint64_t agg_count(uint64_t *agg, const int64_t)

+ Here is the call graph for this function:

int64_t const int32_t sz return agg_from_smem_to_gmem_nop ( dest  ,
src  ,
sz   
)
ALWAYS_INLINE void agg_id ( int64_t *  agg,
const int64_t  val 
)

Definition at line 353 of file RuntimeFunctions.cpp.

353  {
354  *agg = val;
355 }
ALWAYS_INLINE void agg_id_double ( int64_t *  agg,
const double  val 
)

Definition at line 579 of file RuntimeFunctions.cpp.

579  {
580  *agg = *(reinterpret_cast<const int64_t*>(may_alias_ptr(&val)));
581 }
GPU_RT_STUB void agg_id_double_shared_slow ( int64_t *  agg,
const double *  val 
)

Definition at line 839 of file RuntimeFunctions.cpp.

839 {}
ALWAYS_INLINE void agg_id_float ( int32_t *  agg,
const float  val 
)

Definition at line 620 of file RuntimeFunctions.cpp.

620  {
621  *agg = *(reinterpret_cast<const int32_t*>(may_alias_ptr(&val)));
622 }
ALWAYS_INLINE void agg_max ( int64_t *  agg,
const int64_t  val 
)

Definition at line 345 of file RuntimeFunctions.cpp.

345  {
346  *agg = std::max(*agg, val);
347 }
ALWAYS_INLINE void agg_max_double ( int64_t *  agg,
const double  val 
)

Definition at line 569 of file RuntimeFunctions.cpp.

569  {
570  const auto r = std::max(*reinterpret_cast<const double*>(agg), val);
571  *agg = *(reinterpret_cast<const int64_t*>(may_alias_ptr(&r)));
572 }
ALWAYS_INLINE void agg_max_float ( int32_t *  agg,
const float  val 
)

Definition at line 610 of file RuntimeFunctions.cpp.

610  {
611  const auto r = std::max(*reinterpret_cast<const float*>(agg), val);
612  *agg = *(reinterpret_cast<const int32_t*>(may_alias_ptr(&r)));
613 }
GPU_RT_STUB void agg_max_int16_skip_val_shared ( int16_t *  agg,
const int16_t  val,
const int16_t  skip_val 
)

Definition at line 823 of file RuntimeFunctions.cpp.

825  {}
GPU_RT_STUB void agg_max_int8_skip_val_shared ( int8_t *  agg,
const int8_t  val,
const int8_t  skip_val 
)

Definition at line 827 of file RuntimeFunctions.cpp.

829  {}
ALWAYS_INLINE void agg_min ( int64_t *  agg,
const int64_t  val 
)

Definition at line 349 of file RuntimeFunctions.cpp.

349  {
350  *agg = std::min(*agg, val);
351 }
ALWAYS_INLINE void agg_min_double ( int64_t *  agg,
const double  val 
)

Definition at line 574 of file RuntimeFunctions.cpp.

574  {
575  const auto r = std::min(*reinterpret_cast<const double*>(agg), val);
576  *agg = *(reinterpret_cast<const int64_t*>(may_alias_ptr(&r)));
577 }
ALWAYS_INLINE void agg_min_float ( int32_t *  agg,
const float  val 
)

Definition at line 615 of file RuntimeFunctions.cpp.

615  {
616  const auto r = std::min(*reinterpret_cast<const float*>(agg), val);
617  *agg = *(reinterpret_cast<const int32_t*>(may_alias_ptr(&r)));
618 }
GPU_RT_STUB void agg_min_int16_skip_val_shared ( int16_t *  agg,
const int16_t  val,
const int16_t  skip_val 
)

Definition at line 831 of file RuntimeFunctions.cpp.

833  {}
GPU_RT_STUB void agg_min_int8_skip_val_shared ( int8_t *  agg,
const int8_t  val,
const int8_t  skip_val 
)

Definition at line 835 of file RuntimeFunctions.cpp.

837  {}
ALWAYS_INLINE int64_t agg_sum ( int64_t *  agg,
const int64_t  val 
)

Definition at line 339 of file RuntimeFunctions.cpp.

Referenced by agg_sum_skip_val().

339  {
340  const auto old = *agg;
341  *agg += val;
342  return old;
343 }

+ Here is the caller graph for this function:

ALWAYS_INLINE void agg_sum_double ( int64_t *  agg,
const double  val 
)

Definition at line 564 of file RuntimeFunctions.cpp.

564  {
565  const auto r = *reinterpret_cast<const double*>(agg) + val;
566  *agg = *reinterpret_cast<const int64_t*>(may_alias_ptr(&r));
567 }
GPU_RT_STUB void agg_sum_double_shared ( int64_t *  agg,
const double  val 
)

Definition at line 860 of file RuntimeFunctions.cpp.

860 {}
GPU_RT_STUB void agg_sum_double_skip_val_shared ( int64_t *  agg,
const double  val,
const double  skip_val 
)

Definition at line 862 of file RuntimeFunctions.cpp.

864  {}
ALWAYS_INLINE void agg_sum_float ( int32_t *  agg,
const float  val 
)

Definition at line 605 of file RuntimeFunctions.cpp.

605  {
606  const auto r = *reinterpret_cast<const float*>(agg) + val;
607  *agg = *reinterpret_cast<const int32_t*>(may_alias_ptr(&r));
608 }
GPU_RT_STUB void agg_sum_float_shared ( int32_t *  agg,
const float  val 
)

Definition at line 865 of file RuntimeFunctions.cpp.

865 {}
GPU_RT_STUB void agg_sum_float_skip_val_shared ( int32_t *  agg,
const float  val,
const float  skip_val 
)

Definition at line 867 of file RuntimeFunctions.cpp.

869  {}
ALWAYS_INLINE int32_t agg_sum_int32 ( int32_t *  agg,
const int32_t  val 
)

Definition at line 397 of file RuntimeFunctions.cpp.

Referenced by agg_sum_int32_skip_val().

397  {
398  const auto old = *agg;
399  *agg += val;
400  return old;
401 }

+ Here is the caller graph for this function:

GPU_RT_STUB int32_t agg_sum_int32_shared ( int32_t *  agg,
const int32_t  val 
)

Definition at line 850 of file RuntimeFunctions.cpp.

850  {
851  return 0;
852 }
ALWAYS_INLINE int32_t agg_sum_int32_skip_val ( int32_t *  agg,
const int32_t  val,
const int32_t  skip_val 
)

Definition at line 482 of file RuntimeFunctions.cpp.

References agg_sum_int32().

484  {
485  const auto old = *agg;
486  if (val != skip_val) {
487  if (old != skip_val) {
488  return agg_sum_int32(agg, val);
489  } else {
490  *agg = val;
491  }
492  }
493  return old;
494 }
ALWAYS_INLINE int32_t agg_sum_int32(int32_t *agg, const int32_t val)

+ Here is the call graph for this function:

GPU_RT_STUB int32_t agg_sum_int32_skip_val_shared ( int32_t *  agg,
const int32_t  val,
const int32_t  skip_val 
)

Definition at line 854 of file RuntimeFunctions.cpp.

856  {
857  return 0;
858 }
GPU_RT_STUB int64_t agg_sum_shared ( int64_t *  agg,
const int64_t  val 
)

Definition at line 841 of file RuntimeFunctions.cpp.

841  {
842  return 0;
843 }
ALWAYS_INLINE int64_t agg_sum_skip_val ( int64_t *  agg,
const int64_t  val,
const int64_t  skip_val 
)

Definition at line 468 of file RuntimeFunctions.cpp.

References agg_sum().

Referenced by Executor::reduceResults().

470  {
471  const auto old = *agg;
472  if (val != skip_val) {
473  if (old != skip_val) {
474  return agg_sum(agg, val);
475  } else {
476  *agg = val;
477  }
478  }
479  return old;
480 }
ALWAYS_INLINE int64_t agg_sum(int64_t *agg, const int64_t val)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

GPU_RT_STUB int64_t agg_sum_skip_val_shared ( int64_t *  agg,
const int64_t  val,
const int64_t  skip_val 
)

Definition at line 845 of file RuntimeFunctions.cpp.

847  {
848  return 0;
849 }
int64_t const int32_t sz assert ( dest  )

Referenced by org.apache.calcite.prepare.MapDSqlAdvisor::applyPermissionsToTableHints(), array_append(), array_append__(), array_append__1(), array_append__2(), array_append__3(), array_append__4(), checkCudaErrors(), org.apache.calcite.sql2rel.SqlToRelConverter::collectInsertTargets(), org.apache.calcite.sql2rel.SqlToRelConverter::convertAgg(), org.apache.calcite.sql2rel.SqlToRelConverter::convertColumnList(), org.apache.calcite.sql2rel.SqlToRelConverter::convertCursor(), org.apache.calcite.sql2rel.SqlToRelConverter.Blackboard::convertExpression(), org.apache.calcite.sql2rel.SqlToRelConverter::convertFrom(), org.apache.calcite.sql2rel.SqlToRelConverter::convertIdentifier(), org.apache.calcite.sql2rel.SqlToRelConverter::convertInsert(), org.apache.calcite.sql2rel.SqlToRelConverter::convertInToOr(), org.apache.calcite.sql2rel.SqlToRelConverter::convertLiteralInValuesList(), org.apache.calcite.sql2rel.SqlToRelConverter::convertMatchRecognize(), org.apache.calcite.sql2rel.SqlToRelConverter::convertMerge(), org.apache.calcite.sql2rel.SqlToRelConverter::convertOrder(), org.apache.calcite.sql2rel.SqlToRelConverter::convertOrderItem(), org.apache.calcite.sql2rel.SqlToRelConverter::convertSelectList(), org.apache.calcite.sql2rel.SqlToRelConverter::convertUpdate(), org.apache.calcite.sql2rel.SqlToRelConverter::convertValues(), org.apache.calcite.sql2rel.SqlToRelConverter::convertWhere(), count_matches_baseline(), org.apache.calcite.sql2rel.SqlToRelConverter::createAggImpl(), org.apache.calcite.sql2rel.SqlToRelConverter::createJoin(), org.apache.calcite.sql2rel.SqlToRelConverter::createSource(), decompress(), com.mapd.calcite.parser.MapDSqlOperatorTable::dropSuffix(), fill_row_ids_baseline(), fixed_width_double_decode(), fixed_width_float_decode(), fixed_width_int_decode(), fixed_width_unsigned_decode(), org.apache.calcite.sql2rel.SqlToRelConverter::gatherOrderExprs(), get_join_column_element_value(), get_matching_baseline_hash_slot_readonly(), SQLTypeInfoCore< ArrayContextTypeSizer, ExecutorTypePackaging, DateTimeFacilities >::get_storage_size(), com.mapd.metadata.MetaConnect::get_table_detail_JSON(), org.apache.calcite.sql2rel.SqlToRelConverter::getCorrelationUse(), SqliteConnector::getData(), org.apache.calcite.sql2rel.SqlToRelConverter.Blackboard::getSubQueryExpr(), com.mapd.calcite.parser.MapDSqlOperatorTable.ExtFunction::getValueType(), CartesianProductIterator< T >::increment(), com.mapd.calcite.parser.MapDSqlOperatorTable.RowCopier::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.PgUnnest::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.Any::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.All::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.Now::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.Datetime::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.Truncate::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_Contains::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_Intersects::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_Disjoint::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_Within::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_DWithin::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_DFullyWithin::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_Distance::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_MaxDistance::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_GeogFromText::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_GeomFromText::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_Transform::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_X::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_Y::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_XMin::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_XMax::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_YMin::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_YMax::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_PointN::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_EndPoint::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_StartPoint::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_Length::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_Perimeter::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_Area::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_NPoints::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_NRings::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_SRID::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_SetSRID::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_Point::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.CastToGeography::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.OffsetInFragment::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ExtFunction::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.MapD_GeoPolyBoundsPtr::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.MapD_GeoPolyRenderGroup::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.OmniSci_Geo_PolyBoundsPtr::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.OmniSci_Geo_PolyRenderGroup::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.convert_meters_to_pixel_width::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.convert_meters_to_pixel_height::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.is_point_in_view::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.is_point_size_in_view::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.usTimestamp::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.nsTimestamp::inferReturnType(), SqliteConnector::isNull(), org.apache.calcite.sql2rel.SqlToRelConverter.AggConverter::lookupAggregates(), com.mapd.tests.SelectCopyFromDeleteConcurrencyTest::main(), org.apache.calcite.sql2rel.SqlToRelConverter::negate(), parse_numeric(), com.mapd.parser.server.ExtensionFunctionSignatureParser::pointerType(), org.apache.calcite.sql2rel.SqlToRelConverter::pushDownNotForIn(), SqliteConnector::query_with_text_params(), org.apache.calcite.sql2rel.SqlToRelConverter.Blackboard::register(), org.apache.calcite.sql2rel.SqlToRelConverter::setDynamicParamCountInExplain(), org.apache.calcite.sql2rel.SqlToRelConverter::substituteSubQuery(), Parser::SQLType::to_string(), com.mapd.parser.server.ExtensionFunctionSignatureParser::toSignature(), com.mapd.calcite.parser.MapDSqlOperatorTable.ExtFunction::toSqlTypeName(), org.apache.calcite.sql2rel.SqlToRelConverter.AggConverter::translateAgg(), org.apache.calcite.sql2rel.SqlToRelConverter::translateIn(), com.mapd.parser.server.ExtensionFunction::typeName(), and org.apache.calcite.sql2rel.SqlToRelConverter.AggConverter::visit().

const int64_t const uint32_t const uint32_t const uint32_t const bool const int8_t warp_size assert ( groups_buffer  )
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t int32_t int32_t* total_matched assert ( col_buffers||literals||num_rows||frag_row_offsets||max_matched||init_agg_value||out||frag_idx||error_code||join_hash_tables||  total_matched)
const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t int32_t int32_t* total_matched assert ( col_buffers||num_rows||frag_row_offsets||max_matched||init_agg_value||out||frag_idx||error_code||join_hash_tables||  total_matched)
ALWAYS_INLINE int8_t bit_is_set ( const int64_t  bitset,
const int64_t  val,
const int64_t  min_val,
const int64_t  max_val,
const int64_t  null_val,
const int8_t  null_bool_val 
)

Definition at line 317 of file RuntimeFunctions.cpp.

322  {
323  if (val == null_val) {
324  return null_bool_val;
325  }
326  if (val < min_val || val > max_val) {
327  return 0;
328  }
329  if (!bitset) {
330  return 0;
331  }
332  const uint64_t bitmap_idx = val - min_val;
333  return (reinterpret_cast<const int8_t*>(bitset))[bitmap_idx >> 3] &
334  (1 << (bitmap_idx & 7))
335  ? 1
336  : 0;
337 }
ALWAYS_INLINE DEVICE int32_t char_length ( const char *  str,
const int32_t  str_len 
)

Definition at line 1217 of file RuntimeFunctions.cpp.

Referenced by ScalarExprVisitor< std::unordered_set< InputColDescriptor > >::visit().

1218  {
1219  return str_len;
1220 }

+ Here is the caller graph for this function:

ALWAYS_INLINE DEVICE int32_t char_length_nullable ( const char *  str,
const int32_t  str_len,
const int32_t  int_null 
)

Definition at line 1222 of file RuntimeFunctions.cpp.

1224  {
1225  if (!str) {
1226  return int_null;
1227  }
1228  return str_len;
1229 }
ALWAYS_INLINE int32_t checked_single_agg_id ( int64_t *  agg,
const int64_t  val,
const int64_t  null_val 
)

Definition at line 357 of file RuntimeFunctions.cpp.

359  {
360  if (val == null_val) {
361  return 0;
362  }
363 
364  if (*agg == val) {
365  return 0;
366  } else if (*agg == null_val) {
367  *agg = val;
368  return 0;
369  } else {
370  // see Execute::ERR_SINGLE_VALUE_FOUND_MULTIPLE_VALUES
371  return 15;
372  }
373 }
ALWAYS_INLINE int32_t checked_single_agg_id_double ( int64_t *  agg,
const double  val,
const double  null_val 
)

Definition at line 583 of file RuntimeFunctions.cpp.

585  {
586  if (val == null_val) {
587  return 0;
588  }
589 
590  if (*agg == *(reinterpret_cast<const int64_t*>(may_alias_ptr(&val)))) {
591  return 0;
592  } else if (*agg == *(reinterpret_cast<const int64_t*>(may_alias_ptr(&null_val)))) {
593  *agg = *(reinterpret_cast<const int64_t*>(may_alias_ptr(&val)));
594  return 0;
595  } else {
596  // see Execute::ERR_SINGLE_VALUE_FOUND_MULTIPLE_VALUES
597  return 15;
598  }
599 }
GPU_RT_STUB int32_t checked_single_agg_id_double_shared ( int64_t *  agg,
const double  val,
const double  null_val 
)

Definition at line 811 of file RuntimeFunctions.cpp.

813  {
814  return 0;
815 }
ALWAYS_INLINE int32_t checked_single_agg_id_float ( int32_t *  agg,
const float  val,
const float  null_val 
)

Definition at line 624 of file RuntimeFunctions.cpp.

626  {
627  if (val == null_val) {
628  return 0;
629  }
630 
631  if (*agg == *(reinterpret_cast<const int32_t*>(may_alias_ptr(&val)))) {
632  return 0;
633  } else if (*agg == *(reinterpret_cast<const int32_t*>(may_alias_ptr(&null_val)))) {
634  *agg = *(reinterpret_cast<const int32_t*>(may_alias_ptr(&val)));
635  return 0;
636  } else {
637  // see Execute::ERR_SINGLE_VALUE_FOUND_MULTIPLE_VALUES
638  return 15;
639  }
640 }
GPU_RT_STUB int32_t checked_single_agg_id_float_shared ( int32_t *  agg,
const float  val,
const float  null_val 
)

Definition at line 817 of file RuntimeFunctions.cpp.

819  {
820  return 0;
821 }
GPU_RT_STUB int32_t checked_single_agg_id_int16_shared ( int16_t *  agg,
const int16_t  val,
const int16_t  null_val 
)

Definition at line 799 of file RuntimeFunctions.cpp.

801  {
802  return 0;
803 }
GPU_RT_STUB int32_t checked_single_agg_id_int32_shared ( int32_t *  agg,
const int32_t  val,
const int32_t  null_val 
)

Definition at line 793 of file RuntimeFunctions.cpp.

795  {
796  return 0;
797 }
GPU_RT_STUB int32_t checked_single_agg_id_int8_shared ( int8_t *  agg,
const int8_t  val,
const int8_t  null_val 
)

Definition at line 804 of file RuntimeFunctions.cpp.

806  {
807  return 0;
808 }
GPU_RT_STUB int32_t checked_single_agg_id_shared ( int64_t *  agg,
const int64_t  val,
const int64_t  null_val 
)

Definition at line 786 of file RuntimeFunctions.cpp.

788  {
789  return 0;
790 }
ALWAYS_INLINE int64_t decimal_ceil ( const int64_t  x,
const int64_t  scale 
)

Definition at line 710 of file RuntimeFunctions.cpp.

References decimal_floor().

710  {
711  return decimal_floor(x, scale) + (x % scale ? scale : 0);
712 }
ALWAYS_INLINE int64_t decimal_floor(const int64_t x, const int64_t scale)

+ Here is the call graph for this function:

ALWAYS_INLINE int64_t decimal_floor ( const int64_t  x,
const int64_t  scale 
)

Definition at line 700 of file RuntimeFunctions.cpp.

Referenced by decimal_ceil().

700  {
701  if (x >= 0) {
702  return x / scale * scale;
703  }
704  if (!(x % scale)) {
705  return x;
706  }
707  return x / scale * scale - scale;
708 }

+ Here is the caller graph for this function:

ALWAYS_INLINE int32_t extract_str_len ( const uint64_t  str_and_len)

Definition at line 1190 of file RuntimeFunctions.cpp.

1190  {
1191  return static_cast<int64_t>(str_and_len) >> 48;
1192 }
ALWAYS_INLINE int8_t* extract_str_ptr ( const uint64_t  str_and_len)

Definition at line 1186 of file RuntimeFunctions.cpp.

1186  {
1187  return reinterpret_cast<int8_t*>(str_and_len & 0xffffffffffff);
1188 }
GPU_RT_STUB void force_sync ( )

Definition at line 871 of file RuntimeFunctions.cpp.

871 {}
ALWAYS_INLINE int64_t* get_group_value_fast_keyless ( int64_t *  groups_buffer,
const int64_t  key,
const int64_t  min_key,
const int64_t  ,
const uint32_t  row_size_quad 
)

Definition at line 1166 of file RuntimeFunctions.cpp.

1171  {
1172  return groups_buffer + row_size_quad * (key - min_key);
1173 }
const int32_t groups_buffer_size return groups_buffer
ALWAYS_INLINE int64_t* get_group_value_fast_keyless_semiprivate ( int64_t *  groups_buffer,
const int64_t  key,
const int64_t  min_key,
const int64_t  ,
const uint32_t  row_size_quad,
const uint8_t  thread_warp_idx,
const uint8_t  warp_size 
)

Definition at line 1175 of file RuntimeFunctions.cpp.

1182  {
1183  return groups_buffer + row_size_quad * (warp_size * (key - min_key) + thread_warp_idx);
1184 }
const int32_t groups_buffer_size return groups_buffer
__device__ int8_t thread_warp_idx(const int8_t warp_sz)
Definition: cuda_mapd_rt.cu:23
template<typename T >
ALWAYS_INLINE int64_t* get_matching_group_value ( int64_t *  groups_buffer,
const uint32_t  h,
const T *  key,
const uint32_t  key_count,
const uint32_t  row_size_quad 
)

Definition at line 1008 of file RuntimeFunctions.cpp.

References align_to_int64().

1012  {
1013  auto off = h * row_size_quad;
1014  auto row_ptr = reinterpret_cast<T*>(groups_buffer + off);
1015  if (*row_ptr == get_empty_key<T>()) {
1016  memcpy(row_ptr, key, key_count * sizeof(T));
1017  auto row_ptr_i8 = reinterpret_cast<int8_t*>(row_ptr + key_count);
1018  return reinterpret_cast<int64_t*>(align_to_int64(row_ptr_i8));
1019  }
1020  if (memcmp(row_ptr, key, key_count * sizeof(T)) == 0) {
1021  auto row_ptr_i8 = reinterpret_cast<int8_t*>(row_ptr + key_count);
1022  return reinterpret_cast<int64_t*>(align_to_int64(row_ptr_i8));
1023  }
1024  return nullptr;
1025 }
const int32_t groups_buffer_size return groups_buffer
FORCE_INLINE HOST DEVICE T align_to_int64(T addr)

+ Here is the call graph for this function:

ALWAYS_INLINE int64_t* get_matching_group_value ( int64_t *  groups_buffer,
const uint32_t  h,
const int64_t *  key,
const uint32_t  key_count,
const uint32_t  key_width,
const uint32_t  row_size_quad,
const int64_t *  init_vals 
)

Definition at line 1027 of file RuntimeFunctions.cpp.

References get_matching_group_value().

1033  {
1034  switch (key_width) {
1035  case 4:
1037  h,
1038  reinterpret_cast<const int32_t*>(key),
1039  key_count,
1040  row_size_quad);
1041  case 8:
1042  return get_matching_group_value(groups_buffer, h, key, key_count, row_size_quad);
1043  default:;
1044  }
1045  return nullptr;
1046 }
const int32_t groups_buffer_size return groups_buffer
__device__ int64_t * get_matching_group_value(int64_t *groups_buffer, const uint32_t h, const T *key, const uint32_t key_count, const uint32_t row_size_quad)

+ Here is the call graph for this function:

ALWAYS_INLINE int64_t* get_matching_group_value_columnar ( int64_t *  groups_buffer,
const uint32_t  h,
const int64_t *  key,
const uint32_t  key_qw_count,
const size_t  entry_count 
)

Definition at line 1096 of file RuntimeFunctions.cpp.

References EMPTY_KEY_64, and key_qw_count.

1101  {
1102  auto off = h;
1103  if (groups_buffer[off] == EMPTY_KEY_64) {
1104  for (size_t i = 0; i < key_qw_count; ++i) {
1105  groups_buffer[off] = key[i];
1106  off += entry_count;
1107  }
1108  return &groups_buffer[off];
1109  }
1110  off = h;
1111  for (size_t i = 0; i < key_qw_count; ++i) {
1112  if (groups_buffer[off] != key[i]) {
1113  return nullptr;
1114  }
1115  off += entry_count;
1116  }
1117  return &groups_buffer[off];
1118 }
const int32_t groups_buffer_size return groups_buffer
#define EMPTY_KEY_64
const int64_t const uint32_t const uint32_t key_qw_count
template<typename T >
ALWAYS_INLINE int32_t get_matching_group_value_columnar_slot ( int64_t *  groups_buffer,
const uint32_t  entry_count,
const uint32_t  h,
const T *  key,
const uint32_t  key_count 
)

Definition at line 1049 of file RuntimeFunctions.cpp.

References groups_buffer.

1053  {
1054  auto off = h;
1055  auto key_buffer = reinterpret_cast<T*>(groups_buffer);
1056  if (key_buffer[off] == get_empty_key<T>()) {
1057  for (size_t i = 0; i < key_count; ++i) {
1058  key_buffer[off] = key[i];
1059  off += entry_count;
1060  }
1061  return h;
1062  }
1063  off = h;
1064  for (size_t i = 0; i < key_count; ++i) {
1065  if (key_buffer[off] != key[i]) {
1066  return -1;
1067  }
1068  off += entry_count;
1069  }
1070  return h;
1071 }
const int32_t groups_buffer_size return groups_buffer
ALWAYS_INLINE int32_t get_matching_group_value_columnar_slot ( int64_t *  groups_buffer,
const uint32_t  entry_count,
const uint32_t  h,
const int64_t *  key,
const uint32_t  key_count,
const uint32_t  key_width 
)

Definition at line 1074 of file RuntimeFunctions.cpp.

References get_matching_group_value_columnar_slot().

1079  {
1080  switch (key_width) {
1081  case 4:
1083  entry_count,
1084  h,
1085  reinterpret_cast<const int32_t*>(key),
1086  key_count);
1087  case 8:
1089  groups_buffer, entry_count, h, key, key_count);
1090  default:
1091  return -1;
1092  }
1093  return -1;
1094 }
const int32_t groups_buffer_size return groups_buffer
__device__ int32_t get_matching_group_value_columnar_slot(int64_t *groups_buffer, const uint32_t entry_count, const uint32_t h, const T *key, const uint32_t key_count)

+ Here is the call graph for this function:

ALWAYS_INLINE int64_t* get_matching_group_value_perfect_hash ( int64_t *  groups_buffer,
const uint32_t  hashed_index,
const int64_t *  key,
const uint32_t  key_count,
const uint32_t  row_size_quad 
)

Definition at line 1131 of file RuntimeFunctions.cpp.

References EMPTY_KEY_64.

1136  {
1137  uint32_t off = hashed_index * row_size_quad;
1138  if (groups_buffer[off] == EMPTY_KEY_64) {
1139  for (uint32_t i = 0; i < key_count; ++i) {
1140  groups_buffer[off + i] = key[i];
1141  }
1142  }
1143  return groups_buffer + off + key_count;
1144 }
const int32_t groups_buffer_size return groups_buffer
#define EMPTY_KEY_64
const int32_t groups_buffer_size return init_shared_mem_nop ( groups_buffer  ,
groups_buffer_size   
)
ALWAYS_INLINE DEVICE int32_t key_for_string_encoded ( const int32_t  str_id)

Definition at line 1231 of file RuntimeFunctions.cpp.

1231  {
1232  return str_id;
1233 }
NEVER_INLINE void linear_probabilistic_count ( uint8_t *  bitmap,
const uint32_t  bitmap_bytes,
const uint8_t *  key_bytes,
const uint32_t  key_len 
)

Definition at line 1280 of file RuntimeFunctions.cpp.

References MurmurHash1().

1283  {
1284  const uint32_t bit_pos = MurmurHash1(key_bytes, key_len, 0) % (bitmap_bytes * 8);
1285  const uint32_t word_idx = bit_pos / 32;
1286  const uint32_t bit_idx = bit_pos % 32;
1287  reinterpret_cast<uint32_t*>(bitmap)[word_idx] |= 1 << bit_idx;
1288 }
NEVER_INLINE DEVICE uint32_t MurmurHash1(const void *key, int len, const uint32_t seed)
Definition: MurmurHash.cpp:20

+ Here is the call graph for this function:

ALWAYS_INLINE double load_avg_decimal ( const int64_t *  sum,
const int64_t *  count,
const double  null_val,
const uint32_t  scale 
)

Definition at line 1259 of file RuntimeFunctions.cpp.

1262  {
1263  return *count != 0 ? (static_cast<double>(*sum) / pow(10, scale)) / *count : null_val;
1264 }
ALWAYS_INLINE double load_avg_double ( const int64_t *  agg,
const int64_t *  count,
const double  null_val 
)

Definition at line 1266 of file RuntimeFunctions.cpp.

1268  {
1269  return *count != 0 ? *reinterpret_cast<const double*>(may_alias_ptr(agg)) / *count
1270  : null_val;
1271 }
ALWAYS_INLINE double load_avg_float ( const int32_t *  agg,
const int32_t *  count,
const double  null_val 
)

Definition at line 1273 of file RuntimeFunctions.cpp.

1275  {
1276  return *count != 0 ? *reinterpret_cast<const float*>(may_alias_ptr(agg)) / *count
1277  : null_val;
1278 }
ALWAYS_INLINE double load_avg_int ( const int64_t *  sum,
const int64_t *  count,
const double  null_val 
)

Definition at line 1253 of file RuntimeFunctions.cpp.

1255  {
1256  return *count != 0 ? static_cast<double>(*sum) / *count : null_val;
1257 }
ALWAYS_INLINE double load_double ( const int64_t *  agg)

Definition at line 1245 of file RuntimeFunctions.cpp.

1245  {
1246  return *reinterpret_cast<const double*>(may_alias_ptr(agg));
1247 }
ALWAYS_INLINE float load_float ( const int32_t *  agg)

Definition at line 1249 of file RuntimeFunctions.cpp.

1249  {
1250  return *reinterpret_cast<const float*>(may_alias_ptr(agg));
1251 }
ALWAYS_INLINE int8_t logical_and ( const int8_t  lhs,
const int8_t  rhs,
const int8_t  null_val 
)

Definition at line 254 of file RuntimeFunctions.cpp.

256  {
257  if (lhs == null_val) {
258  return rhs == 0 ? rhs : null_val;
259  }
260  if (rhs == null_val) {
261  return lhs == 0 ? lhs : null_val;
262  }
263  return (lhs && rhs) ? 1 : 0;
264 }
ALWAYS_INLINE int8_t logical_not ( const int8_t  operand,
const int8_t  null_val 
)

Definition at line 250 of file RuntimeFunctions.cpp.

250  {
251  return operand == null_val ? operand : (operand ? 0 : 1);
252 }
ALWAYS_INLINE int8_t logical_or ( const int8_t  lhs,
const int8_t  rhs,
const int8_t  null_val 
)

Definition at line 266 of file RuntimeFunctions.cpp.

268  {
269  if (lhs == null_val) {
270  return rhs == 0 ? null_val : rhs;
271  }
272  if (rhs == null_val) {
273  return lhs == 0 ? null_val : lhs;
274  }
275  return (lhs || rhs) ? 1 : 0;
276 }
void multifrag_query ( const int8_t ***  col_buffers,
const uint64_t *  num_fragments,
const int64_t *  num_rows,
const uint64_t *  frag_row_offsets,
const int32_t *  max_matched,
int32_t *  total_matched,
const int64_t *  init_agg_value,
int64_t **  out,
int32_t *  error_code,
const uint32_t *  num_tables_ptr,
const int64_t *  join_hash_tables 
)

Definition at line 1348 of file RuntimeFunctions.cpp.

1358  {
1359  for (uint32_t i = 0; i < *num_fragments; ++i) {
1360  query_stub(col_buffers ? col_buffers[i] : nullptr,
1361  &num_rows[i * (*num_tables_ptr)],
1362  &frag_row_offsets[i * (*num_tables_ptr)],
1363  max_matched,
1365  out,
1366  i,
1368  total_matched,
1369  error_code);
1370  }
1371 }
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t * join_hash_tables
const int8_t const int64_t * num_rows
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t int32_t * error_code
const int8_t const int64_t const uint64_t const int32_t * max_matched
const int8_t const int64_t const uint64_t const int32_t const int64_t * init_agg_value
const int8_t const int64_t const uint64_t * frag_row_offsets
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t ** out
void multifrag_query_hoisted_literals ( const int8_t ***  col_buffers,
const uint64_t *  num_fragments,
const int8_t *  literals,
const int64_t *  num_rows,
const uint64_t *  frag_row_offsets,
const int32_t *  max_matched,
int32_t *  total_matched,
const int64_t *  init_agg_value,
int64_t **  out,
int32_t *  error_code,
const uint32_t *  num_tables_ptr,
const int64_t *  join_hash_tables 
)

Definition at line 1307 of file RuntimeFunctions.cpp.

1318  {
1319  for (uint32_t i = 0; i < *num_fragments; ++i) {
1320  query_stub_hoisted_literals(col_buffers ? col_buffers[i] : nullptr,
1321  literals,
1322  &num_rows[i * (*num_tables_ptr)],
1323  &frag_row_offsets[i * (*num_tables_ptr)],
1324  max_matched,
1326  out,
1327  i,
1329  total_matched,
1330  error_code);
1331  }
1332 }
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t * join_hash_tables
const int8_t const int64_t * num_rows
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t int32_t * error_code
const int8_t const int64_t const uint64_t const int32_t * max_matched
const int8_t const int64_t const uint64_t const int32_t const int64_t * init_agg_value
const int8_t * literals
const int8_t const int64_t const uint64_t * frag_row_offsets
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t ** out
ALWAYS_INLINE double percent_window_func ( const int64_t  output_buff,
const int64_t  pos 
)

Definition at line 1240 of file RuntimeFunctions.cpp.

1241  {
1242  return reinterpret_cast<const double*>(output_buff)[pos];
1243 }
ALWAYS_INLINE int32_t record_error_code ( const int32_t  err_code,
int32_t *  error_codes 
)

Definition at line 901 of file RuntimeFunctions.cpp.

References pos_start_impl().

902  {
903  // NB: never override persistent error codes (with code greater than zero).
904  // On GPU, a projection query with a limit can run out of slots without it
905  // being an actual error if the limit has been hit. If a persistent error
906  // (division by zero, for example) occurs before running out of slots, we
907  // have to avoid overriding it, because there's a risk that the query would
908  // go through if we override with a potentially benign out-of-slots code.
909  if (err_code && error_codes[pos_start_impl(nullptr)] <= 0) {
910  error_codes[pos_start_impl(nullptr)] = err_code;
911  }
912  return err_code;
913 }
__device__ int32_t pos_start_impl(const int32_t *row_index_resume)
Definition: cuda_mapd_rt.cu:11

+ Here is the call graph for this function:

ALWAYS_INLINE int64_t row_number_window_func ( const int64_t  output_buff,
const int64_t  pos 
)

Definition at line 1235 of file RuntimeFunctions.cpp.

1236  {
1237  return reinterpret_cast<const int64_t*>(output_buff)[pos];
1238 }
ALWAYS_INLINE int64_t scale_decimal_down_not_nullable ( const int64_t  operand,
const int64_t  scale,
const int64_t  null_val 
)

Definition at line 194 of file RuntimeFunctions.cpp.

196  {
197  int64_t tmp = scale >> 1;
198  tmp = operand >= 0 ? operand + tmp : operand - tmp;
199  return tmp / scale;
200 }
ALWAYS_INLINE int64_t scale_decimal_down_nullable ( const int64_t  operand,
const int64_t  scale,
const int64_t  null_val 
)

Definition at line 181 of file RuntimeFunctions.cpp.

183  {
184  // rounded scale down of a decimal
185  if (operand == null_val) {
186  return null_val;
187  }
188 
189  int64_t tmp = scale >> 1;
190  tmp = operand >= 0 ? operand + tmp : operand - tmp;
191  return tmp / scale;
192 }
ALWAYS_INLINE int64_t scale_decimal_up ( const int64_t  operand,
const uint64_t  scale,
const int64_t  operand_null_val,
const int64_t  result_null_val 
)

Definition at line 174 of file RuntimeFunctions.cpp.

177  {
178  return operand != operand_null_val ? operand * scale : result_null_val;
179 }
ALWAYS_INLINE void set_matching_group_value_perfect_hash_columnar ( int64_t *  groups_buffer,
const uint32_t  hashed_index,
const int64_t *  key,
const uint32_t  key_count,
const uint32_t  entry_count 
)

Definition at line 1150 of file RuntimeFunctions.cpp.

References EMPTY_KEY_64.

1155  {
1156  if (groups_buffer[hashed_index] == EMPTY_KEY_64) {
1157  for (uint32_t i = 0; i < key_count; i++) {
1158  groups_buffer[i * entry_count + hashed_index] = key[i];
1159  }
1160  }
1161 }
const int32_t groups_buffer_size return groups_buffer
#define EMPTY_KEY_64
ALWAYS_INLINE uint64_t string_pack ( const int8_t *  ptr,
const int32_t  len 
)

Definition at line 1204 of file RuntimeFunctions.cpp.

1204  {
1205  return (reinterpret_cast<const uint64_t>(ptr) & 0xffffffffffff) |
1206  (static_cast<const uint64_t>(len) << 48);
1207 }
GPU_RT_STUB void sync_warp ( )

Definition at line 873 of file RuntimeFunctions.cpp.

873 {}
GPU_RT_STUB void sync_warp_protected ( int64_t  thread_pos,
int64_t  row_count 
)

Definition at line 874 of file RuntimeFunctions.cpp.

874 {}
GPU_RT_STUB int8_t thread_warp_idx ( const int8_t  warp_sz)

Definition at line 895 of file RuntimeFunctions.cpp.

895  {
896  return 0;
897 }
int64_t const int32_t sz write_back_nop ( dest  ,
src  ,
sz   
)

Variable Documentation

const int64_t const uint32_t const uint32_t const uint32_t const bool const bool blocks_share_memory

Definition at line 984 of file RuntimeFunctions.cpp.

const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t frag_idx
const int64_t const uint64_t * frag_row_offsets

Definition at line 1292 of file RuntimeFunctions.cpp.

const int64_t const uint64_t const int32_t const int64_t * init_agg_value

Definition at line 1292 of file RuntimeFunctions.cpp.

const int64_t * init_vals
const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t * join_hash_tables
const int64_t const uint32_t const uint32_t const uint32_t const bool keyless
const int8_t* literals
const int64_t const uint64_t const int32_t * max_matched
const int32_t groups_buffer_size return nullptr

Definition at line 939 of file RuntimeFunctions.cpp.