OmniSciDB  06b3bd477c
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
RuntimeFunctions.cpp File Reference
#include "RuntimeFunctions.h"
#include "../Shared/funcannotations.h"
#include "BufferCompaction.h"
#include "HyperLogLogRank.h"
#include "MurmurHash.h"
#include "TypePunning.h"
#include <algorithm>
#include <atomic>
#include <chrono>
#include <cmath>
#include <cstring>
#include <thread>
#include <tuple>
#include "DecodersImpl.h"
#include "GroupByRuntime.cpp"
#include "JoinHashTableQueryRuntime.cpp"
#include "TopKRuntime.cpp"
+ Include dependency graph for RuntimeFunctions.cpp:

Go to the source code of this file.

Macros

#define DEF_ARITH_NULLABLE(type, null_type, opname, opsym)
 
#define DEF_ARITH_NULLABLE_LHS(type, null_type, opname, opsym)
 
#define DEF_ARITH_NULLABLE_RHS(type, null_type, opname, opsym)
 
#define DEF_CMP_NULLABLE(type, null_type, opname, opsym)
 
#define DEF_CMP_NULLABLE_LHS(type, null_type, opname, opsym)
 
#define DEF_CMP_NULLABLE_RHS(type, null_type, opname, opsym)
 
#define DEF_SAFE_DIV_NULLABLE(type, null_type, opname)
 
#define DEF_BINARY_NULLABLE_ALL_OPS(type, null_type)
 
#define DEF_UMINUS_NULLABLE(type, null_type)
 
#define DEF_CAST_NULLABLE(from_type, to_type)
 
#define DEF_CAST_NULLABLE_BIDIR(type1, type2)
 
#define GPU_RT_STUB   NEVER_INLINE __attribute__((optnone))
 
#define DEF_AGG_MAX_INT(n)
 
#define DEF_AGG_MIN_INT(n)
 
#define DEF_AGG_ID_INT(n)
 
#define DEF_CHECKED_SINGLE_AGG_ID_INT(n)
 
#define DEF_WRITE_PROJECTION_INT(n)
 
#define DEF_SKIP_AGG_ADD(base_agg_func)
 
#define DEF_SKIP_AGG(base_agg_func)
 
#define DATA_T   int64_t
 
#define DATA_T   int32_t
 
#define DATA_T   int16_t
 
#define DATA_T   int8_t
 
#define DEF_SKIP_AGG_ADD(base_agg_func)
 
#define DEF_SKIP_AGG(base_agg_func)
 
#define DATA_T   double
 
#define ADDR_T   int64_t
 
#define DATA_T   float
 
#define ADDR_T   int32_t
 
#define DEF_SHARED_AGG_RET_STUBS(base_agg_func)
 
#define DEF_SHARED_AGG_STUBS(base_agg_func)
 

Functions

ALWAYS_INLINE int64_t scale_decimal_up (const int64_t operand, const uint64_t scale, const int64_t operand_null_val, const int64_t result_null_val)
 
ALWAYS_INLINE int64_t scale_decimal_down_nullable (const int64_t operand, const int64_t scale, const int64_t null_val)
 
ALWAYS_INLINE int64_t scale_decimal_down_not_nullable (const int64_t operand, const int64_t scale, const int64_t null_val)
 
ALWAYS_INLINE int64_t floor_div_nullable_lhs (const int64_t dividend, const int64_t divisor, const int64_t null_val)
 
ALWAYS_INLINE int8_t logical_not (const int8_t operand, const int8_t null_val)
 
ALWAYS_INLINE int8_t logical_and (const int8_t lhs, const int8_t rhs, const int8_t null_val)
 
ALWAYS_INLINE int8_t logical_or (const int8_t lhs, const int8_t rhs, const int8_t null_val)
 
ALWAYS_INLINE uint64_t agg_count (uint64_t *agg, const int64_t)
 
ALWAYS_INLINE void agg_count_distinct_bitmap (int64_t *agg, const int64_t val, const int64_t min_val)
 
GPU_RT_STUB void agg_count_distinct_bitmap_gpu (int64_t *, const int64_t, const int64_t, const int64_t, const int64_t, const uint64_t, const uint64_t)
 
NEVER_INLINE void agg_approximate_count_distinct (int64_t *agg, const int64_t key, const uint32_t b)
 
GPU_RT_STUB void agg_approximate_count_distinct_gpu (int64_t *, const int64_t, const uint32_t, const int64_t, const int64_t)
 
ALWAYS_INLINE int8_t bit_is_set (const int64_t bitset, const int64_t val, const int64_t min_val, const int64_t max_val, const int64_t null_val, const int8_t null_bool_val)
 
ALWAYS_INLINE int64_t agg_sum (int64_t *agg, const int64_t val)
 
ALWAYS_INLINE void agg_max (int64_t *agg, const int64_t val)
 
ALWAYS_INLINE void agg_min (int64_t *agg, const int64_t val)
 
ALWAYS_INLINE void agg_id (int64_t *agg, const int64_t val)
 
ALWAYS_INLINE int32_t checked_single_agg_id (int64_t *agg, const int64_t val, const int64_t null_val)
 
ALWAYS_INLINE void agg_count_distinct_bitmap_skip_val (int64_t *agg, const int64_t val, const int64_t min_val, const int64_t skip_val)
 
GPU_RT_STUB void agg_count_distinct_bitmap_skip_val_gpu (int64_t *, const int64_t, const int64_t, const int64_t, const int64_t, const int64_t, const uint64_t, const uint64_t)
 
ALWAYS_INLINE uint32_t agg_count_int32 (uint32_t *agg, const int32_t)
 
ALWAYS_INLINE int32_t agg_sum_int32 (int32_t *agg, const int32_t val)
 
ALWAYS_INLINE int64_t agg_sum_skip_val (int64_t *agg, const int64_t val, const int64_t skip_val)
 
ALWAYS_INLINE int32_t agg_sum_int32_skip_val (int32_t *agg, const int32_t val, const int32_t skip_val)
 
ALWAYS_INLINE uint64_t agg_count_skip_val (uint64_t *agg, const int64_t val, const int64_t skip_val)
 
ALWAYS_INLINE uint32_t agg_count_int32_skip_val (uint32_t *agg, const int32_t val, const int32_t skip_val)
 
ALWAYS_INLINE uint64_t agg_count_double (uint64_t *agg, const double val)
 
ALWAYS_INLINE void agg_sum_double (int64_t *agg, const double val)
 
ALWAYS_INLINE void agg_max_double (int64_t *agg, const double val)
 
ALWAYS_INLINE void agg_min_double (int64_t *agg, const double val)
 
ALWAYS_INLINE void agg_id_double (int64_t *agg, const double val)
 
ALWAYS_INLINE int32_t checked_single_agg_id_double (int64_t *agg, const double val, const double null_val)
 
ALWAYS_INLINE uint32_t agg_count_float (uint32_t *agg, const float val)
 
ALWAYS_INLINE void agg_sum_float (int32_t *agg, const float val)
 
ALWAYS_INLINE void agg_max_float (int32_t *agg, const float val)
 
ALWAYS_INLINE void agg_min_float (int32_t *agg, const float val)
 
ALWAYS_INLINE void agg_id_float (int32_t *agg, const float val)
 
ALWAYS_INLINE int32_t checked_single_agg_id_float (int32_t *agg, const float val, const float null_val)
 
ALWAYS_INLINE uint64_t agg_count_double_skip_val (uint64_t *agg, const double val, const double skip_val)
 
ALWAYS_INLINE uint32_t agg_count_float_skip_val (uint32_t *agg, const float val, const float skip_val)
 
ALWAYS_INLINE int64_t decimal_floor (const int64_t x, const int64_t scale)
 
ALWAYS_INLINE int64_t decimal_ceil (const int64_t x, const int64_t scale)
 
GPU_RT_STUB int32_t checked_single_agg_id_shared (int64_t *agg, const int64_t val, const int64_t null_val)
 
GPU_RT_STUB int32_t checked_single_agg_id_int32_shared (int32_t *agg, const int32_t val, const int32_t null_val)
 
GPU_RT_STUB int32_t checked_single_agg_id_int16_shared (int16_t *agg, const int16_t val, const int16_t null_val)
 
GPU_RT_STUB int32_t checked_single_agg_id_int8_shared (int8_t *agg, const int8_t val, const int8_t null_val)
 
GPU_RT_STUB int32_t checked_single_agg_id_double_shared (int64_t *agg, const double val, const double null_val)
 
GPU_RT_STUB int32_t checked_single_agg_id_float_shared (int32_t *agg, const float val, const float null_val)
 
GPU_RT_STUB void agg_max_int16_skip_val_shared (int16_t *agg, const int16_t val, const int16_t skip_val)
 
GPU_RT_STUB void agg_max_int8_skip_val_shared (int8_t *agg, const int8_t val, const int8_t skip_val)
 
GPU_RT_STUB void agg_min_int16_skip_val_shared (int16_t *agg, const int16_t val, const int16_t skip_val)
 
GPU_RT_STUB void agg_min_int8_skip_val_shared (int8_t *agg, const int8_t val, const int8_t skip_val)
 
GPU_RT_STUB void agg_id_double_shared_slow (int64_t *agg, const double *val)
 
GPU_RT_STUB int64_t agg_sum_shared (int64_t *agg, const int64_t val)
 
GPU_RT_STUB int64_t agg_sum_skip_val_shared (int64_t *agg, const int64_t val, const int64_t skip_val)
 
GPU_RT_STUB int32_t agg_sum_int32_shared (int32_t *agg, const int32_t val)
 
GPU_RT_STUB int32_t agg_sum_int32_skip_val_shared (int32_t *agg, const int32_t val, const int32_t skip_val)
 
GPU_RT_STUB void agg_sum_double_shared (int64_t *agg, const double val)
 
GPU_RT_STUB void agg_sum_double_skip_val_shared (int64_t *agg, const double val, const double skip_val)
 
GPU_RT_STUB void agg_sum_float_shared (int32_t *agg, const float val)
 
GPU_RT_STUB void agg_sum_float_skip_val_shared (int32_t *agg, const float val, const float skip_val)
 
GPU_RT_STUB void force_sync ()
 
GPU_RT_STUB void sync_warp ()
 
GPU_RT_STUB void sync_warp_protected (int64_t thread_pos, int64_t row_count)
 
GPU_RT_STUB void sync_threadblock ()
 
GPU_RT_STUB void write_back_non_grouped_agg (int64_t *input_buffer, int64_t *output_buffer, const int32_t num_agg_cols)
 
 __attribute__ ((noinline)) int32_t pos_start_impl(int32_t *error_code)
 
GPU_RT_STUB int8_t thread_warp_idx (const int8_t warp_sz)
 
GPU_RT_STUB int64_t get_thread_index ()
 
GPU_RT_STUB int64_t * declare_dynamic_shared_memory ()
 
GPU_RT_STUB int64_t get_block_index ()
 
ALWAYS_INLINE int32_t record_error_code (const int32_t err_code, int32_t *error_codes)
 
int64_t const int32_t sz assert (dest)
 
int64_t * init_shared_mem (const int64_t *global_groups_buffer, const int32_t groups_buffer_size)
 
const int64_t const uint32_t
const uint32_t const uint32_t
const bool const int8_t
warp_size 
assert (groups_buffer)
 
template<typename T >
ALWAYS_INLINE int64_t * get_matching_group_value (int64_t *groups_buffer, const uint32_t h, const T *key, const uint32_t key_count, const uint32_t row_size_quad)
 
ALWAYS_INLINE int64_t * get_matching_group_value (int64_t *groups_buffer, const uint32_t h, const int64_t *key, const uint32_t key_count, const uint32_t key_width, const uint32_t row_size_quad, const int64_t *init_vals)
 
template<typename T >
ALWAYS_INLINE int32_t get_matching_group_value_columnar_slot (int64_t *groups_buffer, const uint32_t entry_count, const uint32_t h, const T *key, const uint32_t key_count)
 
ALWAYS_INLINE int32_t get_matching_group_value_columnar_slot (int64_t *groups_buffer, const uint32_t entry_count, const uint32_t h, const int64_t *key, const uint32_t key_count, const uint32_t key_width)
 
ALWAYS_INLINE int64_t * get_matching_group_value_columnar (int64_t *groups_buffer, const uint32_t h, const int64_t *key, const uint32_t key_qw_count, const size_t entry_count)
 
ALWAYS_INLINE int64_t * get_matching_group_value_perfect_hash (int64_t *groups_buffer, const uint32_t hashed_index, const int64_t *key, const uint32_t key_count, const uint32_t row_size_quad)
 
ALWAYS_INLINE int64_t * get_matching_group_value_perfect_hash_keyless (int64_t *groups_buffer, const uint32_t hashed_index, const uint32_t row_size_quad)
 
ALWAYS_INLINE void set_matching_group_value_perfect_hash_columnar (int64_t *groups_buffer, const uint32_t hashed_index, const int64_t *key, const uint32_t key_count, const uint32_t entry_count)
 
ALWAYS_INLINE int64_t * get_group_value_fast_keyless (int64_t *groups_buffer, const int64_t key, const int64_t min_key, const int64_t, const uint32_t row_size_quad)
 
ALWAYS_INLINE int64_t * get_group_value_fast_keyless_semiprivate (int64_t *groups_buffer, const int64_t key, const int64_t min_key, const int64_t, const uint32_t row_size_quad, const uint8_t thread_warp_idx, const uint8_t warp_size)
 
ALWAYS_INLINE int8_t * extract_str_ptr (const uint64_t str_and_len)
 
ALWAYS_INLINE int32_t extract_str_len (const uint64_t str_and_len)
 
ALWAYS_INLINE uint64_t string_pack (const int8_t *ptr, const int32_t len)
 
ALWAYS_INLINE DEVICE int32_t char_length (const char *str, const int32_t str_len)
 
ALWAYS_INLINE DEVICE int32_t char_length_nullable (const char *str, const int32_t str_len, const int32_t int_null)
 
ALWAYS_INLINE DEVICE int32_t key_for_string_encoded (const int32_t str_id)
 
ALWAYS_INLINE DEVICE bool sample_ratio (const double proportion, const int64_t row_offset)
 
ALWAYS_INLINE int64_t row_number_window_func (const int64_t output_buff, const int64_t pos)
 
ALWAYS_INLINE double percent_window_func (const int64_t output_buff, const int64_t pos)
 
ALWAYS_INLINE double load_double (const int64_t *agg)
 
ALWAYS_INLINE float load_float (const int32_t *agg)
 
ALWAYS_INLINE double load_avg_int (const int64_t *sum, const int64_t *count, const double null_val)
 
ALWAYS_INLINE double load_avg_decimal (const int64_t *sum, const int64_t *count, const double null_val, const uint32_t scale)
 
ALWAYS_INLINE double load_avg_double (const int64_t *agg, const int64_t *count, const double null_val)
 
ALWAYS_INLINE double load_avg_float (const int32_t *agg, const int32_t *count, const double null_val)
 
NEVER_INLINE void linear_probabilistic_count (uint8_t *bitmap, const uint32_t bitmap_bytes, const uint8_t *key_bytes, const uint32_t key_len)
 
const int8_t const int64_t
const uint64_t const int32_t
const int64_t int64_t uint32_t
const int64_t int32_t int32_t
*total_matched 
assert (col_buffers||literals||num_rows||frag_row_offsets||max_matched||init_agg_value||out||frag_idx||error_code||join_hash_tables||total_matched)
 
void multifrag_query_hoisted_literals (const int8_t ***col_buffers, const uint64_t *num_fragments, const int8_t *literals, const int64_t *num_rows, const uint64_t *frag_row_offsets, const int32_t *max_matched, int32_t *total_matched, const int64_t *init_agg_value, int64_t **out, int32_t *error_code, const uint32_t *num_tables_ptr, const int64_t *join_hash_tables)
 
const int64_t const uint64_t
const int32_t const int64_t
int64_t uint32_t const int64_t
int32_t int32_t *total_matched 
assert (col_buffers||num_rows||frag_row_offsets||max_matched||init_agg_value||out||frag_idx||error_code||join_hash_tables||total_matched)
 
void multifrag_query (const int8_t ***col_buffers, const uint64_t *num_fragments, const int64_t *num_rows, const uint64_t *frag_row_offsets, const int32_t *max_matched, int32_t *total_matched, const int64_t *init_agg_value, int64_t **out, int32_t *error_code, const uint32_t *num_tables_ptr, const int64_t *join_hash_tables)
 
ALWAYS_INLINE DEVICE bool check_interrupt ()
 
bool check_interrupt_init (unsigned command)
 

Variables

const int32_t
groups_buffer_size return 
groups_buffer
 
int64_t * src
 
const int64_t * init_vals
 
const int64_t const uint32_t groups_buffer_entry_count
 
const int64_t const uint32_t
const uint32_t 
key_qw_count
 
const int64_t const uint32_t
const uint32_t const uint32_t 
agg_col_count
 
const int64_t const uint32_t
const uint32_t const uint32_t
const bool 
keyless
 
const int64_t const uint32_t
const uint32_t const uint32_t
const bool const bool 
blocks_share_memory
 
const int8_t * literals
 
const int8_t const int64_t * num_rows
 
const int8_t const int64_t
const uint64_t * 
frag_row_offsets
 
const int8_t const int64_t
const uint64_t const int32_t * 
max_matched
 
const int8_t const int64_t
const uint64_t const int32_t
const int64_t * 
init_agg_value
 
const int8_t const int64_t
const uint64_t const int32_t
const int64_t int64_t ** 
out
 
const int8_t const int64_t
const uint64_t const int32_t
const int64_t int64_t uint32_t 
frag_idx
 
const int8_t const int64_t
const uint64_t const int32_t
const int64_t int64_t uint32_t
const int64_t * 
join_hash_tables
 
const int8_t const int64_t
const uint64_t const int32_t
const int64_t int64_t uint32_t
const int64_t int32_t * 
error_code
 

Macro Definition Documentation

#define ADDR_T   int64_t

Definition at line 703 of file RuntimeFunctions.cpp.

#define ADDR_T   int32_t

Definition at line 703 of file RuntimeFunctions.cpp.

#define DATA_T   int64_t

Definition at line 702 of file RuntimeFunctions.cpp.

#define DATA_T   int32_t

Definition at line 702 of file RuntimeFunctions.cpp.

#define DATA_T   int16_t

Definition at line 702 of file RuntimeFunctions.cpp.

#define DATA_T   int8_t

Definition at line 702 of file RuntimeFunctions.cpp.

#define DATA_T   double

Definition at line 702 of file RuntimeFunctions.cpp.

#define DATA_T   float

Definition at line 702 of file RuntimeFunctions.cpp.

#define DEF_AGG_ID_INT (   n)
Value:
extern "C" ALWAYS_INLINE void agg_id_int##n(int##n##_t* agg, const int##n##_t val) { \
*agg = val; \
}
#define ALWAYS_INLINE

Definition at line 436 of file RuntimeFunctions.cpp.

#define DEF_AGG_MAX_INT (   n)
Value:
extern "C" ALWAYS_INLINE void agg_max_int##n(int##n##_t* agg, const int##n##_t val) { \
*agg = std::max(*agg, val); \
}
#define ALWAYS_INLINE

Definition at line 416 of file RuntimeFunctions.cpp.

#define DEF_AGG_MIN_INT (   n)
Value:
extern "C" ALWAYS_INLINE void agg_min_int##n(int##n##_t* agg, const int##n##_t val) { \
*agg = std::min(*agg, val); \
}
#define ALWAYS_INLINE

Definition at line 426 of file RuntimeFunctions.cpp.

#define DEF_ARITH_NULLABLE (   type,
  null_type,
  opname,
  opsym 
)
Value:
extern "C" ALWAYS_INLINE type opname##_##type##_nullable( \
const type lhs, const type rhs, const null_type null_val) { \
if (lhs != null_val && rhs != null_val) { \
return lhs opsym rhs; \
} \
return null_val; \
}
#define ALWAYS_INLINE

Definition at line 42 of file RuntimeFunctions.cpp.

#define DEF_ARITH_NULLABLE_LHS (   type,
  null_type,
  opname,
  opsym 
)
Value:
extern "C" ALWAYS_INLINE type opname##_##type##_nullable_lhs( \
const type lhs, const type rhs, const null_type null_val) { \
if (lhs != null_val) { \
return lhs opsym rhs; \
} \
return null_val; \
}
#define ALWAYS_INLINE

Definition at line 51 of file RuntimeFunctions.cpp.

#define DEF_ARITH_NULLABLE_RHS (   type,
  null_type,
  opname,
  opsym 
)
Value:
extern "C" ALWAYS_INLINE type opname##_##type##_nullable_rhs( \
const type lhs, const type rhs, const null_type null_val) { \
if (rhs != null_val) { \
return lhs opsym rhs; \
} \
return null_val; \
}
#define ALWAYS_INLINE

Definition at line 60 of file RuntimeFunctions.cpp.

#define DEF_BINARY_NULLABLE_ALL_OPS (   type,
  null_type 
)

Definition at line 114 of file RuntimeFunctions.cpp.

#define DEF_CAST_NULLABLE (   from_type,
  to_type 
)
Value:
extern "C" ALWAYS_INLINE to_type cast_##from_type##_to_##to_type##_nullable( \
const from_type operand, \
const from_type from_null_val, \
const to_type to_null_val) { \
return operand == from_null_val ? to_null_val : operand; \
}
#define ALWAYS_INLINE

Definition at line 230 of file RuntimeFunctions.cpp.

#define DEF_CAST_NULLABLE_BIDIR (   type1,
  type2 
)
Value:
DEF_CAST_NULLABLE(type1, type2) \
DEF_CAST_NULLABLE(type2, type1)
#define DEF_CAST_NULLABLE(from_type, to_type)

Definition at line 238 of file RuntimeFunctions.cpp.

#define DEF_CHECKED_SINGLE_AGG_ID_INT (   n)
Value:
extern "C" ALWAYS_INLINE int32_t checked_single_agg_id_int##n( \
int##n##_t* agg, const int##n##_t val, const int##n##_t null_val) { \
if (val == null_val) { \
return 0; \
} \
if (*agg == val) { \
return 0; \
} else if (*agg == null_val) { \
*agg = val; \
return 0; \
} else { \
/* see Execute::ERR_SINGLE_VALUE_FOUND_MULTIPLE_VALUES*/ \
return 15; \
} \
}
#define ALWAYS_INLINE

Definition at line 441 of file RuntimeFunctions.cpp.

#define DEF_CMP_NULLABLE (   type,
  null_type,
  opname,
  opsym 
)
Value:
extern "C" ALWAYS_INLINE int8_t opname##_##type##_nullable( \
const type lhs, \
const type rhs, \
const null_type null_val, \
const int8_t null_bool_val) { \
if (lhs != null_val && rhs != null_val) { \
return lhs opsym rhs; \
} \
return null_bool_val; \
}
#define ALWAYS_INLINE

Definition at line 69 of file RuntimeFunctions.cpp.

#define DEF_CMP_NULLABLE_LHS (   type,
  null_type,
  opname,
  opsym 
)
Value:
extern "C" ALWAYS_INLINE int8_t opname##_##type##_nullable_lhs( \
const type lhs, \
const type rhs, \
const null_type null_val, \
const int8_t null_bool_val) { \
if (lhs != null_val) { \
return lhs opsym rhs; \
} \
return null_bool_val; \
}
#define ALWAYS_INLINE

Definition at line 81 of file RuntimeFunctions.cpp.

#define DEF_CMP_NULLABLE_RHS (   type,
  null_type,
  opname,
  opsym 
)
Value:
extern "C" ALWAYS_INLINE int8_t opname##_##type##_nullable_rhs( \
const type lhs, \
const type rhs, \
const null_type null_val, \
const int8_t null_bool_val) { \
if (rhs != null_val) { \
return lhs opsym rhs; \
} \
return null_bool_val; \
}
#define ALWAYS_INLINE

Definition at line 93 of file RuntimeFunctions.cpp.

#define DEF_SAFE_DIV_NULLABLE (   type,
  null_type,
  opname 
)
Value:
extern "C" ALWAYS_INLINE type safe_div_##type( \
const type lhs, const type rhs, const null_type null_val) { \
if (lhs != null_val && rhs != null_val && rhs != 0) { \
return lhs / rhs; \
} \
return null_val; \
}
#define ALWAYS_INLINE

Definition at line 105 of file RuntimeFunctions.cpp.

#define DEF_SHARED_AGG_RET_STUBS (   base_agg_func)

Definition at line 729 of file RuntimeFunctions.cpp.

#define DEF_SHARED_AGG_STUBS (   base_agg_func)
Value:
extern "C" GPU_RT_STUB void base_agg_func##_shared(int64_t* agg, const int64_t val) {} \
\
extern "C" GPU_RT_STUB void base_agg_func##_skip_val_shared( \
int64_t* agg, const int64_t val, const int64_t skip_val) {} \
extern "C" GPU_RT_STUB void base_agg_func##_int32_shared(int32_t* agg, \
const int32_t val) {} \
extern "C" GPU_RT_STUB void base_agg_func##_int16_shared(int16_t* agg, \
const int16_t val) {} \
extern "C" GPU_RT_STUB void base_agg_func##_int8_shared(int8_t* agg, \
const int8_t val) {} \
\
extern "C" GPU_RT_STUB void base_agg_func##_int32_skip_val_shared( \
int32_t* agg, const int32_t val, const int32_t skip_val) {} \
\
extern "C" GPU_RT_STUB void base_agg_func##_double_shared(int64_t* agg, \
const double val) {} \
\
extern "C" GPU_RT_STUB void base_agg_func##_double_skip_val_shared( \
int64_t* agg, const double val, const double skip_val) {} \
extern "C" GPU_RT_STUB void base_agg_func##_float_shared(int32_t* agg, \
const float val) {} \
\
extern "C" GPU_RT_STUB void base_agg_func##_float_skip_val_shared( \
int32_t* agg, const float val, const float skip_val) {}
#define GPU_RT_STUB

Definition at line 768 of file RuntimeFunctions.cpp.

#define DEF_SKIP_AGG (   base_agg_func)
Value:
extern "C" ALWAYS_INLINE void base_agg_func##_skip_val( \
DATA_T* agg, const DATA_T val, const DATA_T skip_val) { \
if (val != skip_val) { \
const DATA_T old_agg = *agg; \
if (old_agg != skip_val) { \
base_agg_func(agg, val); \
} else { \
*agg = val; \
} \
} \
}
#define DATA_T
#define ALWAYS_INLINE

Definition at line 681 of file RuntimeFunctions.cpp.

#define DEF_SKIP_AGG (   base_agg_func)
Value:
extern "C" ALWAYS_INLINE void base_agg_func##_skip_val( \
ADDR_T* agg, const DATA_T val, const DATA_T skip_val) { \
if (val != skip_val) { \
const ADDR_T old_agg = *agg; \
if (old_agg != *reinterpret_cast<const ADDR_T*>(may_alias_ptr(&skip_val))) { \
base_agg_func(agg, val); \
} else { \
*agg = *reinterpret_cast<const ADDR_T*>(may_alias_ptr(&val)); \
} \
} \
}
#define DATA_T
#define ADDR_T
#define ALWAYS_INLINE

Definition at line 681 of file RuntimeFunctions.cpp.

#define DEF_SKIP_AGG_ADD (   base_agg_func)
Value:
extern "C" ALWAYS_INLINE void base_agg_func##_skip_val( \
DATA_T* agg, const DATA_T val, const DATA_T skip_val) { \
if (val != skip_val) { \
base_agg_func(agg, val); \
} \
}
#define DATA_T
#define ALWAYS_INLINE

Definition at line 673 of file RuntimeFunctions.cpp.

#define DEF_SKIP_AGG_ADD (   base_agg_func)
Value:
extern "C" ALWAYS_INLINE void base_agg_func##_skip_val( \
ADDR_T* agg, const DATA_T val, const DATA_T skip_val) { \
if (val != skip_val) { \
base_agg_func(agg, val); \
} \
}
#define DATA_T
#define ADDR_T
#define ALWAYS_INLINE

Definition at line 673 of file RuntimeFunctions.cpp.

#define DEF_UMINUS_NULLABLE (   type,
  null_type 
)
Value:
extern "C" ALWAYS_INLINE type uminus_##type##_nullable(const type operand, \
const null_type null_val) { \
return operand == null_val ? null_val : -operand; \
}
#define ALWAYS_INLINE

Definition at line 215 of file RuntimeFunctions.cpp.

#define DEF_WRITE_PROJECTION_INT (   n)
Value:
extern "C" ALWAYS_INLINE void write_projection_int##n( \
int8_t* slot_ptr, const int##n##_t val, const int64_t init_val) { \
if (val != init_val) { \
*reinterpret_cast<int##n##_t*>(slot_ptr) = val; \
} \
}
#define ALWAYS_INLINE

Definition at line 469 of file RuntimeFunctions.cpp.

#define GPU_RT_STUB   NEVER_INLINE __attribute__((optnone))

Definition at line 304 of file RuntimeFunctions.cpp.

Function Documentation

__attribute__ ( (noinline)  )

Definition at line 895 of file RuntimeFunctions.cpp.

895  {
896  int32_t row_index_resume{0};
897  if (error_code) {
898  row_index_resume = error_code[0];
899  error_code[0] = 0;
900  }
901  return row_index_resume;
902 }
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t int32_t * error_code
NEVER_INLINE void agg_approximate_count_distinct ( int64_t *  agg,
const int64_t  key,
const uint32_t  b 
)

Definition at line 314 of file RuntimeFunctions.cpp.

References get_rank(), and MurmurHash64A().

316  {
317  const uint64_t hash = MurmurHash64A(&key, sizeof(key), 0);
318  const uint32_t index = hash >> (64 - b);
319  const uint8_t rank = get_rank(hash << b, 64 - b);
320  uint8_t* M = reinterpret_cast<uint8_t*>(*agg);
321  M[index] = std::max(M[index], rank);
322 }
FORCE_INLINE uint8_t get_rank(uint64_t x, uint32_t b)
NEVER_INLINE DEVICE uint64_t MurmurHash64A(const void *key, int len, uint64_t seed)
Definition: MurmurHash.cpp:26

+ Here is the call graph for this function:

GPU_RT_STUB void agg_approximate_count_distinct_gpu ( int64_t *  ,
const int64_t  ,
const uint32_t  ,
const int64_t  ,
const int64_t   
)

Definition at line 324 of file RuntimeFunctions.cpp.

328  {}
ALWAYS_INLINE uint64_t agg_count ( uint64_t *  agg,
const int64_t   
)

Definition at line 293 of file RuntimeFunctions.cpp.

Referenced by agg_count_skip_val(), and anonymous_namespace{GroupByAndAggregate.cpp}::get_agg_count().

293  {
294  return (*agg)++;
295 }

+ Here is the caller graph for this function:

ALWAYS_INLINE void agg_count_distinct_bitmap ( int64_t *  agg,
const int64_t  val,
const int64_t  min_val 
)

Definition at line 297 of file RuntimeFunctions.cpp.

Referenced by agg_count_distinct_bitmap_skip_val(), WindowFunctionContext::fillPartitionEnd(), WindowFunctionContext::fillPartitionStart(), anonymous_namespace{WindowContext.cpp}::index_to_partition_end(), and InValuesBitmap::InValuesBitmap().

299  {
300  const uint64_t bitmap_idx = val - min_val;
301  reinterpret_cast<int8_t*>(*agg)[bitmap_idx >> 3] |= (1 << (bitmap_idx & 7));
302 }

+ Here is the caller graph for this function:

GPU_RT_STUB void agg_count_distinct_bitmap_gpu ( int64_t *  ,
const int64_t  ,
const int64_t  ,
const int64_t  ,
const int64_t  ,
const uint64_t  ,
const uint64_t   
)

Definition at line 306 of file RuntimeFunctions.cpp.

312  {}
ALWAYS_INLINE void agg_count_distinct_bitmap_skip_val ( int64_t *  agg,
const int64_t  val,
const int64_t  min_val,
const int64_t  skip_val 
)

Definition at line 388 of file RuntimeFunctions.cpp.

References agg_count_distinct_bitmap().

391  {
392  if (val != skip_val) {
393  agg_count_distinct_bitmap(agg, val, min_val);
394  }
395 }
ALWAYS_INLINE void agg_count_distinct_bitmap(int64_t *agg, const int64_t val, const int64_t min_val)

+ Here is the call graph for this function:

GPU_RT_STUB void agg_count_distinct_bitmap_skip_val_gpu ( int64_t *  ,
const int64_t  ,
const int64_t  ,
const int64_t  ,
const int64_t  ,
const int64_t  ,
const uint64_t  ,
const uint64_t   
)

Definition at line 397 of file RuntimeFunctions.cpp.

404  {}
ALWAYS_INLINE uint64_t agg_count_double ( uint64_t *  agg,
const double  val 
)

Definition at line 573 of file RuntimeFunctions.cpp.

Referenced by agg_count_double_skip_val().

573  {
574  return (*agg)++;
575 }

+ Here is the caller graph for this function:

ALWAYS_INLINE uint64_t agg_count_double_skip_val ( uint64_t *  agg,
const double  val,
const double  skip_val 
)

Definition at line 655 of file RuntimeFunctions.cpp.

References agg_count_double().

657  {
658  if (val != skip_val) {
659  return agg_count_double(agg, val);
660  }
661  return *agg;
662 }
ALWAYS_INLINE uint64_t agg_count_double(uint64_t *agg, const double val)

+ Here is the call graph for this function:

ALWAYS_INLINE uint32_t agg_count_float ( uint32_t *  agg,
const float  val 
)

Definition at line 614 of file RuntimeFunctions.cpp.

Referenced by agg_count_float_skip_val().

614  {
615  return (*agg)++;
616 }

+ Here is the caller graph for this function:

ALWAYS_INLINE uint32_t agg_count_float_skip_val ( uint32_t *  agg,
const float  val,
const float  skip_val 
)

Definition at line 664 of file RuntimeFunctions.cpp.

References agg_count_float().

666  {
667  if (val != skip_val) {
668  return agg_count_float(agg, val);
669  }
670  return *agg;
671 }
ALWAYS_INLINE uint32_t agg_count_float(uint32_t *agg, const float val)

+ Here is the call graph for this function:

ALWAYS_INLINE uint32_t agg_count_int32 ( uint32_t *  agg,
const int32_t   
)

Definition at line 406 of file RuntimeFunctions.cpp.

Referenced by agg_count_int32_skip_val().

406  {
407  return (*agg)++;
408 }

+ Here is the caller graph for this function:

ALWAYS_INLINE uint32_t agg_count_int32_skip_val ( uint32_t *  agg,
const int32_t  val,
const int32_t  skip_val 
)

Definition at line 518 of file RuntimeFunctions.cpp.

References agg_count_int32().

520  {
521  if (val != skip_val) {
522  return agg_count_int32(agg, val);
523  }
524  return *agg;
525 }
ALWAYS_INLINE uint32_t agg_count_int32(uint32_t *agg, const int32_t)

+ Here is the call graph for this function:

ALWAYS_INLINE uint64_t agg_count_skip_val ( uint64_t *  agg,
const int64_t  val,
const int64_t  skip_val 
)

Definition at line 509 of file RuntimeFunctions.cpp.

References agg_count().

511  {
512  if (val != skip_val) {
513  return agg_count(agg, val);
514  }
515  return *agg;
516 }
ALWAYS_INLINE uint64_t agg_count(uint64_t *agg, const int64_t)

+ Here is the call graph for this function:

ALWAYS_INLINE void agg_id ( int64_t *  agg,
const int64_t  val 
)

Definition at line 366 of file RuntimeFunctions.cpp.

366  {
367  *agg = val;
368 }
ALWAYS_INLINE void agg_id_double ( int64_t *  agg,
const double  val 
)

Definition at line 592 of file RuntimeFunctions.cpp.

592  {
593  *agg = *(reinterpret_cast<const int64_t*>(may_alias_ptr(&val)));
594 }
GPU_RT_STUB void agg_id_double_shared_slow ( int64_t *  agg,
const double *  val 
)

Definition at line 852 of file RuntimeFunctions.cpp.

852 {}
ALWAYS_INLINE void agg_id_float ( int32_t *  agg,
const float  val 
)

Definition at line 633 of file RuntimeFunctions.cpp.

633  {
634  *agg = *(reinterpret_cast<const int32_t*>(may_alias_ptr(&val)));
635 }
ALWAYS_INLINE void agg_max ( int64_t *  agg,
const int64_t  val 
)

Definition at line 358 of file RuntimeFunctions.cpp.

358  {
359  *agg = std::max(*agg, val);
360 }
ALWAYS_INLINE void agg_max_double ( int64_t *  agg,
const double  val 
)

Definition at line 582 of file RuntimeFunctions.cpp.

582  {
583  const auto r = std::max(*reinterpret_cast<const double*>(agg), val);
584  *agg = *(reinterpret_cast<const int64_t*>(may_alias_ptr(&r)));
585 }
ALWAYS_INLINE void agg_max_float ( int32_t *  agg,
const float  val 
)

Definition at line 623 of file RuntimeFunctions.cpp.

623  {
624  const auto r = std::max(*reinterpret_cast<const float*>(agg), val);
625  *agg = *(reinterpret_cast<const int32_t*>(may_alias_ptr(&r)));
626 }
GPU_RT_STUB void agg_max_int16_skip_val_shared ( int16_t *  agg,
const int16_t  val,
const int16_t  skip_val 
)

Definition at line 836 of file RuntimeFunctions.cpp.

838  {}
GPU_RT_STUB void agg_max_int8_skip_val_shared ( int8_t *  agg,
const int8_t  val,
const int8_t  skip_val 
)

Definition at line 840 of file RuntimeFunctions.cpp.

842  {}
ALWAYS_INLINE void agg_min ( int64_t *  agg,
const int64_t  val 
)

Definition at line 362 of file RuntimeFunctions.cpp.

362  {
363  *agg = std::min(*agg, val);
364 }
ALWAYS_INLINE void agg_min_double ( int64_t *  agg,
const double  val 
)

Definition at line 587 of file RuntimeFunctions.cpp.

587  {
588  const auto r = std::min(*reinterpret_cast<const double*>(agg), val);
589  *agg = *(reinterpret_cast<const int64_t*>(may_alias_ptr(&r)));
590 }
ALWAYS_INLINE void agg_min_float ( int32_t *  agg,
const float  val 
)

Definition at line 628 of file RuntimeFunctions.cpp.

628  {
629  const auto r = std::min(*reinterpret_cast<const float*>(agg), val);
630  *agg = *(reinterpret_cast<const int32_t*>(may_alias_ptr(&r)));
631 }
GPU_RT_STUB void agg_min_int16_skip_val_shared ( int16_t *  agg,
const int16_t  val,
const int16_t  skip_val 
)

Definition at line 844 of file RuntimeFunctions.cpp.

846  {}
GPU_RT_STUB void agg_min_int8_skip_val_shared ( int8_t *  agg,
const int8_t  val,
const int8_t  skip_val 
)

Definition at line 848 of file RuntimeFunctions.cpp.

850  {}
ALWAYS_INLINE int64_t agg_sum ( int64_t *  agg,
const int64_t  val 
)

Definition at line 352 of file RuntimeFunctions.cpp.

Referenced by agg_sum_skip_val().

352  {
353  const auto old = *agg;
354  *agg += val;
355  return old;
356 }

+ Here is the caller graph for this function:

ALWAYS_INLINE void agg_sum_double ( int64_t *  agg,
const double  val 
)

Definition at line 577 of file RuntimeFunctions.cpp.

577  {
578  const auto r = *reinterpret_cast<const double*>(agg) + val;
579  *agg = *reinterpret_cast<const int64_t*>(may_alias_ptr(&r));
580 }
GPU_RT_STUB void agg_sum_double_shared ( int64_t *  agg,
const double  val 
)

Definition at line 873 of file RuntimeFunctions.cpp.

873 {}
GPU_RT_STUB void agg_sum_double_skip_val_shared ( int64_t *  agg,
const double  val,
const double  skip_val 
)

Definition at line 875 of file RuntimeFunctions.cpp.

877  {}
ALWAYS_INLINE void agg_sum_float ( int32_t *  agg,
const float  val 
)

Definition at line 618 of file RuntimeFunctions.cpp.

618  {
619  const auto r = *reinterpret_cast<const float*>(agg) + val;
620  *agg = *reinterpret_cast<const int32_t*>(may_alias_ptr(&r));
621 }
GPU_RT_STUB void agg_sum_float_shared ( int32_t *  agg,
const float  val 
)

Definition at line 878 of file RuntimeFunctions.cpp.

878 {}
GPU_RT_STUB void agg_sum_float_skip_val_shared ( int32_t *  agg,
const float  val,
const float  skip_val 
)

Definition at line 880 of file RuntimeFunctions.cpp.

882  {}
ALWAYS_INLINE int32_t agg_sum_int32 ( int32_t *  agg,
const int32_t  val 
)

Definition at line 410 of file RuntimeFunctions.cpp.

Referenced by agg_sum_int32_skip_val().

410  {
411  const auto old = *agg;
412  *agg += val;
413  return old;
414 }

+ Here is the caller graph for this function:

GPU_RT_STUB int32_t agg_sum_int32_shared ( int32_t *  agg,
const int32_t  val 
)

Definition at line 863 of file RuntimeFunctions.cpp.

863  {
864  return 0;
865 }
ALWAYS_INLINE int32_t agg_sum_int32_skip_val ( int32_t *  agg,
const int32_t  val,
const int32_t  skip_val 
)

Definition at line 495 of file RuntimeFunctions.cpp.

References agg_sum_int32().

497  {
498  const auto old = *agg;
499  if (val != skip_val) {
500  if (old != skip_val) {
501  return agg_sum_int32(agg, val);
502  } else {
503  *agg = val;
504  }
505  }
506  return old;
507 }
ALWAYS_INLINE int32_t agg_sum_int32(int32_t *agg, const int32_t val)

+ Here is the call graph for this function:

GPU_RT_STUB int32_t agg_sum_int32_skip_val_shared ( int32_t *  agg,
const int32_t  val,
const int32_t  skip_val 
)

Definition at line 867 of file RuntimeFunctions.cpp.

869  {
870  return 0;
871 }
GPU_RT_STUB int64_t agg_sum_shared ( int64_t *  agg,
const int64_t  val 
)

Definition at line 854 of file RuntimeFunctions.cpp.

854  {
855  return 0;
856 }
ALWAYS_INLINE int64_t agg_sum_skip_val ( int64_t *  agg,
const int64_t  val,
const int64_t  skip_val 
)

Definition at line 481 of file RuntimeFunctions.cpp.

References agg_sum().

Referenced by Executor::reduceResults().

483  {
484  const auto old = *agg;
485  if (val != skip_val) {
486  if (old != skip_val) {
487  return agg_sum(agg, val);
488  } else {
489  *agg = val;
490  }
491  }
492  return old;
493 }
ALWAYS_INLINE int64_t agg_sum(int64_t *agg, const int64_t val)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

GPU_RT_STUB int64_t agg_sum_skip_val_shared ( int64_t *  agg,
const int64_t  val,
const int64_t  skip_val 
)

Definition at line 858 of file RuntimeFunctions.cpp.

860  {
861  return 0;
862 }
int64_t const int32_t sz assert ( dest  )

Referenced by org.apache.calcite.prepare.MapDSqlAdvisor::applyPermissionsToTableHints(), array_append(), array_append__(), array_append__1(), array_append__2(), array_append__3(), array_append__4(), checkCudaErrors(), ChunkIter_get_nth_point_coords(), org.apache.calcite.sql2rel.SqlToRelConverter::collectInsertTargets(), org.apache.calcite.sql2rel.SqlToRelConverter::convertAgg(), org.apache.calcite.sql2rel.SqlToRelConverter::convertColumnList(), org.apache.calcite.sql2rel.SqlToRelConverter::convertCursor(), org.apache.calcite.sql2rel.SqlToRelConverter::convertFrom(), org.apache.calcite.sql2rel.SqlToRelConverter::convertIdentifier(), org.apache.calcite.sql2rel.SqlToRelConverter::convertInsert(), org.apache.calcite.sql2rel.SqlToRelConverter::convertInToOr(), org.apache.calcite.sql2rel.SqlToRelConverter::convertLiteralInValuesList(), org.apache.calcite.sql2rel.SqlToRelConverter::convertMatchRecognize(), org.apache.calcite.sql2rel.SqlToRelConverter::convertMerge(), org.apache.calcite.sql2rel.SqlToRelConverter::convertOrder(), org.apache.calcite.sql2rel.SqlToRelConverter::convertOrderItem(), org.apache.calcite.sql2rel.SqlToRelConverter::convertSelectList(), org.apache.calcite.sql2rel.SqlToRelConverter::convertUpdate(), org.apache.calcite.sql2rel.SqlToRelConverter::convertValues(), org.apache.calcite.sql2rel.SqlToRelConverter::convertWhere(), count_matches_baseline(), org.apache.calcite.sql2rel.SqlToRelConverter::createAggImpl(), org.apache.calcite.sql2rel.SqlToRelConverter::createJoin(), org.apache.calcite.sql2rel.SqlToRelConverter::createSource(), decompress(), com.mapd.calcite.parser.MapDParser::desugar(), com.mapd.calcite.parser.MapDSqlOperatorTable::dropSuffix(), fill_row_ids_baseline(), fixed_width_double_decode(), fixed_width_float_decode(), fixed_width_int_decode(), fixed_width_unsigned_decode(), org.apache.calcite.sql2rel.SqlToRelConverter::gatherOrderExprs(), get_candidate_rows(), get_matching_baseline_hash_slot_readonly(), SQLTypeInfo::get_storage_size(), com.mapd.metadata.MetaConnect::get_table_detail_JSON(), org.apache.calcite.sql2rel.SqlToRelConverter::getCorrelationUse(), SqliteConnector::getData(), JoinColumnIterator::getElementSwitch(), com.mapd.calcite.parser.MapDSqlOperatorTable.ExtFunction::getValueType(), CartesianProductIterator< T >::increment(), com.mapd.calcite.parser.MapDSqlOperatorTable.RowCopier::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.PgUnnest::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.Any::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.All::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.Now::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.Datetime::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.Truncate::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_IsEmpty::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_IsValid::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_Contains::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_Intersects::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_Overlaps::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_Disjoint::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_Within::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_DWithin::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_DFullyWithin::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_Distance::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_MaxDistance::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_GeogFromText::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_GeomFromText::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_Transform::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_X::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_Y::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_XMin::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_XMax::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_YMin::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_YMax::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_PointN::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_EndPoint::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_StartPoint::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_Length::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_Perimeter::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_Area::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_NPoints::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_NRings::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_SRID::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_SetSRID::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_Point::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_Buffer::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_Intersection::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_Union::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ST_Difference::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.CastToGeography::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.OffsetInFragment::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.ExtFunction::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.MapD_GeoPolyBoundsPtr::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.MapD_GeoPolyRenderGroup::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.OmniSci_Geo_PolyBoundsPtr::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.OmniSci_Geo_PolyRenderGroup::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.convert_meters_to_pixel_width::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.convert_meters_to_pixel_height::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.is_point_in_view::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.is_point_size_in_view::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.usTimestamp::inferReturnType(), com.mapd.calcite.parser.MapDSqlOperatorTable.nsTimestamp::inferReturnType(), SqliteConnector::isNull(), JoinColumnTupleIterator::JoinColumnTupleIterator(), com.mapd.tests.SelectCopyFromDeleteConcurrencyTest::main(), com.mapd.tests.ImportAlterValidateSelectConcurrencyTest::main(), org.apache.calcite.sql2rel.SqlToRelConverter::negate(), Data_Namespace::detail::TypedThrustAllocatorState::operator=(), parse_numeric(), com.mapd.parser.server.ExtensionFunctionSignatureParser::pointerType(), org.apache.calcite.sql2rel.SqlToRelConverter::pushDownNotForIn(), SqliteConnector::query_with_text_params(), org.apache.calcite.sql2rel.SqlToRelConverter.Blackboard::register(), org.apache.calcite.sql2rel.SqlToRelConverter::setDynamicParamCountInExplain(), org.apache.calcite.sql2rel.SqlToRelConverter::substituteSubQuery(), ddl_utils::SqlType::to_string(), com.mapd.parser.server.ExtensionFunctionSignatureParser::toSignature(), com.mapd.calcite.parser.MapDSqlOperatorTable.ExtFunction::toSqlTypeName(), org.apache.calcite.sql2rel.SqlToRelConverter::translateIn(), and com.mapd.parser.server.ExtensionFunction::typeName().

const int64_t const uint32_t const uint32_t const uint32_t const bool const int8_t warp_size assert ( groups_buffer  )
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t int32_t int32_t* total_matched assert ( col_buffers||literals||num_rows||frag_row_offsets||max_matched||init_agg_value||out||frag_idx||error_code||join_hash_tables||  total_matched)
const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t int32_t int32_t* total_matched assert ( col_buffers||num_rows||frag_row_offsets||max_matched||init_agg_value||out||frag_idx||error_code||join_hash_tables||  total_matched)
ALWAYS_INLINE int8_t bit_is_set ( const int64_t  bitset,
const int64_t  val,
const int64_t  min_val,
const int64_t  max_val,
const int64_t  null_val,
const int8_t  null_bool_val 
)

Definition at line 330 of file RuntimeFunctions.cpp.

335  {
336  if (val == null_val) {
337  return null_bool_val;
338  }
339  if (val < min_val || val > max_val) {
340  return 0;
341  }
342  if (!bitset) {
343  return 0;
344  }
345  const uint64_t bitmap_idx = val - min_val;
346  return (reinterpret_cast<const int8_t*>(bitset))[bitmap_idx >> 3] &
347  (1 << (bitmap_idx & 7))
348  ? 1
349  : 0;
350 }
ALWAYS_INLINE DEVICE int32_t char_length ( const char *  str,
const int32_t  str_len 
)

Definition at line 1224 of file RuntimeFunctions.cpp.

Referenced by ScalarExprVisitor< std::unordered_set< InputColDescriptor > >::visit().

1225  {
1226  return str_len;
1227 }

+ Here is the caller graph for this function:

ALWAYS_INLINE DEVICE int32_t char_length_nullable ( const char *  str,
const int32_t  str_len,
const int32_t  int_null 
)

Definition at line 1229 of file RuntimeFunctions.cpp.

1231  {
1232  if (!str) {
1233  return int_null;
1234  }
1235  return str_len;
1236 }
ALWAYS_INLINE DEVICE bool check_interrupt ( )

Definition at line 1386 of file RuntimeFunctions.cpp.

References check_interrupt_init(), INT_CHECK, and runtime_interrupt_flag.

1386  {
1387  if (check_interrupt_init(static_cast<unsigned>(INT_CHECK))) {
1388  return true;
1389  }
1390  return false;
1391 }
bool check_interrupt_init(unsigned command)

+ Here is the call graph for this function:

bool check_interrupt_init ( unsigned  command)

Definition at line 1393 of file RuntimeFunctions.cpp.

References INT_ABORT, INT_CHECK, INT_RESET, and runtime_interrupt_flag.

Referenced by check_interrupt(), Executor::interrupt(), and Executor::resetInterrupt().

1393  {
1394  static std::atomic_bool runtime_interrupt_flag{false};
1395 
1396  if (command == static_cast<unsigned>(INT_CHECK)) {
1397  if (runtime_interrupt_flag.load()) {
1398  return true;
1399  }
1400  return false;
1401  }
1402  if (command == static_cast<unsigned>(INT_ABORT)) {
1403  runtime_interrupt_flag.store(true);
1404  return false;
1405  }
1406  if (command == static_cast<unsigned>(INT_RESET)) {
1407  runtime_interrupt_flag.store(false);
1408  return false;
1409  }
1410  return false;
1411 }
__device__ int32_t runtime_interrupt_flag
Definition: cuda_mapd_rt.cu:96

+ Here is the caller graph for this function:

ALWAYS_INLINE int32_t checked_single_agg_id ( int64_t *  agg,
const int64_t  val,
const int64_t  null_val 
)

Definition at line 370 of file RuntimeFunctions.cpp.

372  {
373  if (val == null_val) {
374  return 0;
375  }
376 
377  if (*agg == val) {
378  return 0;
379  } else if (*agg == null_val) {
380  *agg = val;
381  return 0;
382  } else {
383  // see Execute::ERR_SINGLE_VALUE_FOUND_MULTIPLE_VALUES
384  return 15;
385  }
386 }
ALWAYS_INLINE int32_t checked_single_agg_id_double ( int64_t *  agg,
const double  val,
const double  null_val 
)

Definition at line 596 of file RuntimeFunctions.cpp.

598  {
599  if (val == null_val) {
600  return 0;
601  }
602 
603  if (*agg == *(reinterpret_cast<const int64_t*>(may_alias_ptr(&val)))) {
604  return 0;
605  } else if (*agg == *(reinterpret_cast<const int64_t*>(may_alias_ptr(&null_val)))) {
606  *agg = *(reinterpret_cast<const int64_t*>(may_alias_ptr(&val)));
607  return 0;
608  } else {
609  // see Execute::ERR_SINGLE_VALUE_FOUND_MULTIPLE_VALUES
610  return 15;
611  }
612 }
GPU_RT_STUB int32_t checked_single_agg_id_double_shared ( int64_t *  agg,
const double  val,
const double  null_val 
)

Definition at line 824 of file RuntimeFunctions.cpp.

826  {
827  return 0;
828 }
ALWAYS_INLINE int32_t checked_single_agg_id_float ( int32_t *  agg,
const float  val,
const float  null_val 
)

Definition at line 637 of file RuntimeFunctions.cpp.

639  {
640  if (val == null_val) {
641  return 0;
642  }
643 
644  if (*agg == *(reinterpret_cast<const int32_t*>(may_alias_ptr(&val)))) {
645  return 0;
646  } else if (*agg == *(reinterpret_cast<const int32_t*>(may_alias_ptr(&null_val)))) {
647  *agg = *(reinterpret_cast<const int32_t*>(may_alias_ptr(&val)));
648  return 0;
649  } else {
650  // see Execute::ERR_SINGLE_VALUE_FOUND_MULTIPLE_VALUES
651  return 15;
652  }
653 }
GPU_RT_STUB int32_t checked_single_agg_id_float_shared ( int32_t *  agg,
const float  val,
const float  null_val 
)

Definition at line 830 of file RuntimeFunctions.cpp.

832  {
833  return 0;
834 }
GPU_RT_STUB int32_t checked_single_agg_id_int16_shared ( int16_t *  agg,
const int16_t  val,
const int16_t  null_val 
)

Definition at line 812 of file RuntimeFunctions.cpp.

814  {
815  return 0;
816 }
GPU_RT_STUB int32_t checked_single_agg_id_int32_shared ( int32_t *  agg,
const int32_t  val,
const int32_t  null_val 
)

Definition at line 806 of file RuntimeFunctions.cpp.

808  {
809  return 0;
810 }
GPU_RT_STUB int32_t checked_single_agg_id_int8_shared ( int8_t *  agg,
const int8_t  val,
const int8_t  null_val 
)

Definition at line 817 of file RuntimeFunctions.cpp.

819  {
820  return 0;
821 }
GPU_RT_STUB int32_t checked_single_agg_id_shared ( int64_t *  agg,
const int64_t  val,
const int64_t  null_val 
)

Definition at line 799 of file RuntimeFunctions.cpp.

801  {
802  return 0;
803 }
ALWAYS_INLINE int64_t decimal_ceil ( const int64_t  x,
const int64_t  scale 
)

Definition at line 723 of file RuntimeFunctions.cpp.

References decimal_floor().

723  {
724  return decimal_floor(x, scale) + (x % scale ? scale : 0);
725 }
ALWAYS_INLINE int64_t decimal_floor(const int64_t x, const int64_t scale)

+ Here is the call graph for this function:

ALWAYS_INLINE int64_t decimal_floor ( const int64_t  x,
const int64_t  scale 
)

Definition at line 713 of file RuntimeFunctions.cpp.

Referenced by decimal_ceil().

713  {
714  if (x >= 0) {
715  return x / scale * scale;
716  }
717  if (!(x % scale)) {
718  return x;
719  }
720  return x / scale * scale - scale;
721 }

+ Here is the caller graph for this function:

GPU_RT_STUB int64_t* declare_dynamic_shared_memory ( )

Definition at line 920 of file RuntimeFunctions.cpp.

920  {
921  return nullptr;
922 }
ALWAYS_INLINE int32_t extract_str_len ( const uint64_t  str_and_len)

Definition at line 1197 of file RuntimeFunctions.cpp.

1197  {
1198  return static_cast<int64_t>(str_and_len) >> 48;
1199 }
ALWAYS_INLINE int8_t* extract_str_ptr ( const uint64_t  str_and_len)

Definition at line 1193 of file RuntimeFunctions.cpp.

1193  {
1194  return reinterpret_cast<int8_t*>(str_and_len & 0xffffffffffff);
1195 }
ALWAYS_INLINE int64_t floor_div_nullable_lhs ( const int64_t  dividend,
const int64_t  divisor,
const int64_t  null_val 
)

Definition at line 205 of file RuntimeFunctions.cpp.

207  {
208  if (dividend == null_val) {
209  return null_val;
210  } else {
211  return (dividend < 0 ? dividend - (divisor - 1) : dividend) / divisor;
212  }
213 }
GPU_RT_STUB void force_sync ( )

Definition at line 884 of file RuntimeFunctions.cpp.

884 {}
GPU_RT_STUB int64_t get_block_index ( )

Definition at line 924 of file RuntimeFunctions.cpp.

924  {
925  return 0;
926 }
ALWAYS_INLINE int64_t* get_group_value_fast_keyless ( int64_t *  groups_buffer,
const int64_t  key,
const int64_t  min_key,
const int64_t  ,
const uint32_t  row_size_quad 
)

Definition at line 1173 of file RuntimeFunctions.cpp.

1178  {
1179  return groups_buffer + row_size_quad * (key - min_key);
1180 }
const int32_t groups_buffer_size return groups_buffer
ALWAYS_INLINE int64_t* get_group_value_fast_keyless_semiprivate ( int64_t *  groups_buffer,
const int64_t  key,
const int64_t  min_key,
const int64_t  ,
const uint32_t  row_size_quad,
const uint8_t  thread_warp_idx,
const uint8_t  warp_size 
)

Definition at line 1182 of file RuntimeFunctions.cpp.

1189  {
1190  return groups_buffer + row_size_quad * (warp_size * (key - min_key) + thread_warp_idx);
1191 }
const int32_t groups_buffer_size return groups_buffer
__device__ int8_t thread_warp_idx(const int8_t warp_sz)
Definition: cuda_mapd_rt.cu:40
template<typename T >
ALWAYS_INLINE int64_t* get_matching_group_value ( int64_t *  groups_buffer,
const uint32_t  h,
const T *  key,
const uint32_t  key_count,
const uint32_t  row_size_quad 
)

Definition at line 1002 of file RuntimeFunctions.cpp.

References align_to_int64().

1006  {
1007  auto off = h * row_size_quad;
1008  auto row_ptr = reinterpret_cast<T*>(groups_buffer + off);
1009  if (*row_ptr == get_empty_key<T>()) {
1010  memcpy(row_ptr, key, key_count * sizeof(T));
1011  auto row_ptr_i8 = reinterpret_cast<int8_t*>(row_ptr + key_count);
1012  return reinterpret_cast<int64_t*>(align_to_int64(row_ptr_i8));
1013  }
1014  if (memcmp(row_ptr, key, key_count * sizeof(T)) == 0) {
1015  auto row_ptr_i8 = reinterpret_cast<int8_t*>(row_ptr + key_count);
1016  return reinterpret_cast<int64_t*>(align_to_int64(row_ptr_i8));
1017  }
1018  return nullptr;
1019 }
const int32_t groups_buffer_size return groups_buffer
FORCE_INLINE HOST DEVICE T align_to_int64(T addr)

+ Here is the call graph for this function:

ALWAYS_INLINE int64_t* get_matching_group_value ( int64_t *  groups_buffer,
const uint32_t  h,
const int64_t *  key,
const uint32_t  key_count,
const uint32_t  key_width,
const uint32_t  row_size_quad,
const int64_t *  init_vals 
)

Definition at line 1021 of file RuntimeFunctions.cpp.

References get_matching_group_value().

1027  {
1028  switch (key_width) {
1029  case 4:
1031  h,
1032  reinterpret_cast<const int32_t*>(key),
1033  key_count,
1034  row_size_quad);
1035  case 8:
1036  return get_matching_group_value(groups_buffer, h, key, key_count, row_size_quad);
1037  default:;
1038  }
1039  return nullptr;
1040 }
const int32_t groups_buffer_size return groups_buffer
__device__ int64_t * get_matching_group_value(int64_t *groups_buffer, const uint32_t h, const T *key, const uint32_t key_count, const uint32_t row_size_quad)

+ Here is the call graph for this function:

ALWAYS_INLINE int64_t* get_matching_group_value_columnar ( int64_t *  groups_buffer,
const uint32_t  h,
const int64_t *  key,
const uint32_t  key_qw_count,
const size_t  entry_count 
)

Definition at line 1090 of file RuntimeFunctions.cpp.

References EMPTY_KEY_64, and key_qw_count.

1095  {
1096  auto off = h;
1097  if (groups_buffer[off] == EMPTY_KEY_64) {
1098  for (size_t i = 0; i < key_qw_count; ++i) {
1099  groups_buffer[off] = key[i];
1100  off += entry_count;
1101  }
1102  return &groups_buffer[off];
1103  }
1104  off = h;
1105  for (size_t i = 0; i < key_qw_count; ++i) {
1106  if (groups_buffer[off] != key[i]) {
1107  return nullptr;
1108  }
1109  off += entry_count;
1110  }
1111  return &groups_buffer[off];
1112 }
const int32_t groups_buffer_size return groups_buffer
#define EMPTY_KEY_64
const int64_t const uint32_t const uint32_t key_qw_count
template<typename T >
ALWAYS_INLINE int32_t get_matching_group_value_columnar_slot ( int64_t *  groups_buffer,
const uint32_t  entry_count,
const uint32_t  h,
const T *  key,
const uint32_t  key_count 
)

Definition at line 1043 of file RuntimeFunctions.cpp.

References groups_buffer.

1047  {
1048  auto off = h;
1049  auto key_buffer = reinterpret_cast<T*>(groups_buffer);
1050  if (key_buffer[off] == get_empty_key<T>()) {
1051  for (size_t i = 0; i < key_count; ++i) {
1052  key_buffer[off] = key[i];
1053  off += entry_count;
1054  }
1055  return h;
1056  }
1057  off = h;
1058  for (size_t i = 0; i < key_count; ++i) {
1059  if (key_buffer[off] != key[i]) {
1060  return -1;
1061  }
1062  off += entry_count;
1063  }
1064  return h;
1065 }
const int32_t groups_buffer_size return groups_buffer
ALWAYS_INLINE int32_t get_matching_group_value_columnar_slot ( int64_t *  groups_buffer,
const uint32_t  entry_count,
const uint32_t  h,
const int64_t *  key,
const uint32_t  key_count,
const uint32_t  key_width 
)

Definition at line 1068 of file RuntimeFunctions.cpp.

References get_matching_group_value_columnar_slot().

1073  {
1074  switch (key_width) {
1075  case 4:
1077  entry_count,
1078  h,
1079  reinterpret_cast<const int32_t*>(key),
1080  key_count);
1081  case 8:
1083  groups_buffer, entry_count, h, key, key_count);
1084  default:
1085  return -1;
1086  }
1087  return -1;
1088 }
const int32_t groups_buffer_size return groups_buffer
__device__ int32_t get_matching_group_value_columnar_slot(int64_t *groups_buffer, const uint32_t entry_count, const uint32_t h, const T *key, const uint32_t key_count)

+ Here is the call graph for this function:

ALWAYS_INLINE int64_t* get_matching_group_value_perfect_hash ( int64_t *  groups_buffer,
const uint32_t  hashed_index,
const int64_t *  key,
const uint32_t  key_count,
const uint32_t  row_size_quad 
)

Definition at line 1125 of file RuntimeFunctions.cpp.

References EMPTY_KEY_64.

1130  {
1131  uint32_t off = hashed_index * row_size_quad;
1132  if (groups_buffer[off] == EMPTY_KEY_64) {
1133  for (uint32_t i = 0; i < key_count; ++i) {
1134  groups_buffer[off + i] = key[i];
1135  }
1136  }
1137  return groups_buffer + off + key_count;
1138 }
const int32_t groups_buffer_size return groups_buffer
#define EMPTY_KEY_64
ALWAYS_INLINE int64_t* get_matching_group_value_perfect_hash_keyless ( int64_t *  groups_buffer,
const uint32_t  hashed_index,
const uint32_t  row_size_quad 
)

For a particular hashed index (only used with multi-column perfect hash group by) it returns the row-wise offset of the group in the output buffer. Since it is intended for keyless hash use, it assumes there is no group columns prepending the output buffer.

Definition at line 1146 of file RuntimeFunctions.cpp.

1149  {
1150  return groups_buffer + row_size_quad * hashed_index;
1151 }
const int32_t groups_buffer_size return groups_buffer
GPU_RT_STUB int64_t get_thread_index ( )

Definition at line 916 of file RuntimeFunctions.cpp.

916  {
917  return 0;
918 }
int64_t* init_shared_mem ( const int64_t *  global_groups_buffer,
const int32_t  groups_buffer_size 
)

Definition at line 959 of file RuntimeFunctions.cpp.

960  {
961  return nullptr;
962 }
ALWAYS_INLINE DEVICE int32_t key_for_string_encoded ( const int32_t  str_id)

Definition at line 1238 of file RuntimeFunctions.cpp.

1238  {
1239  return str_id;
1240 }
NEVER_INLINE void linear_probabilistic_count ( uint8_t *  bitmap,
const uint32_t  bitmap_bytes,
const uint8_t *  key_bytes,
const uint32_t  key_len 
)

Definition at line 1293 of file RuntimeFunctions.cpp.

References MurmurHash1().

1296  {
1297  const uint32_t bit_pos = MurmurHash1(key_bytes, key_len, 0) % (bitmap_bytes * 8);
1298  const uint32_t word_idx = bit_pos / 32;
1299  const uint32_t bit_idx = bit_pos % 32;
1300  reinterpret_cast<uint32_t*>(bitmap)[word_idx] |= 1 << bit_idx;
1301 }
NEVER_INLINE DEVICE uint32_t MurmurHash1(const void *key, int len, const uint32_t seed)
Definition: MurmurHash.cpp:20

+ Here is the call graph for this function:

ALWAYS_INLINE double load_avg_decimal ( const int64_t *  sum,
const int64_t *  count,
const double  null_val,
const uint32_t  scale 
)

Definition at line 1272 of file RuntimeFunctions.cpp.

1275  {
1276  return *count != 0 ? (static_cast<double>(*sum) / pow(10, scale)) / *count : null_val;
1277 }
ALWAYS_INLINE double load_avg_double ( const int64_t *  agg,
const int64_t *  count,
const double  null_val 
)

Definition at line 1279 of file RuntimeFunctions.cpp.

1281  {
1282  return *count != 0 ? *reinterpret_cast<const double*>(may_alias_ptr(agg)) / *count
1283  : null_val;
1284 }
ALWAYS_INLINE double load_avg_float ( const int32_t *  agg,
const int32_t *  count,
const double  null_val 
)

Definition at line 1286 of file RuntimeFunctions.cpp.

1288  {
1289  return *count != 0 ? *reinterpret_cast<const float*>(may_alias_ptr(agg)) / *count
1290  : null_val;
1291 }
ALWAYS_INLINE double load_avg_int ( const int64_t *  sum,
const int64_t *  count,
const double  null_val 
)

Definition at line 1266 of file RuntimeFunctions.cpp.

1268  {
1269  return *count != 0 ? static_cast<double>(*sum) / *count : null_val;
1270 }
ALWAYS_INLINE double load_double ( const int64_t *  agg)

Definition at line 1258 of file RuntimeFunctions.cpp.

1258  {
1259  return *reinterpret_cast<const double*>(may_alias_ptr(agg));
1260 }
ALWAYS_INLINE float load_float ( const int32_t *  agg)

Definition at line 1262 of file RuntimeFunctions.cpp.

1262  {
1263  return *reinterpret_cast<const float*>(may_alias_ptr(agg));
1264 }
ALWAYS_INLINE int8_t logical_and ( const int8_t  lhs,
const int8_t  rhs,
const int8_t  null_val 
)

Definition at line 267 of file RuntimeFunctions.cpp.

269  {
270  if (lhs == null_val) {
271  return rhs == 0 ? rhs : null_val;
272  }
273  if (rhs == null_val) {
274  return lhs == 0 ? lhs : null_val;
275  }
276  return (lhs && rhs) ? 1 : 0;
277 }
ALWAYS_INLINE int8_t logical_not ( const int8_t  operand,
const int8_t  null_val 
)

Definition at line 263 of file RuntimeFunctions.cpp.

263  {
264  return operand == null_val ? operand : (operand ? 0 : 1);
265 }
ALWAYS_INLINE int8_t logical_or ( const int8_t  lhs,
const int8_t  rhs,
const int8_t  null_val 
)

Definition at line 279 of file RuntimeFunctions.cpp.

281  {
282  if (lhs == null_val) {
283  return rhs == 0 ? null_val : rhs;
284  }
285  if (rhs == null_val) {
286  return lhs == 0 ? null_val : lhs;
287  }
288  return (lhs || rhs) ? 1 : 0;
289 }
void multifrag_query ( const int8_t ***  col_buffers,
const uint64_t *  num_fragments,
const int64_t *  num_rows,
const uint64_t *  frag_row_offsets,
const int32_t *  max_matched,
int32_t *  total_matched,
const int64_t *  init_agg_value,
int64_t **  out,
int32_t *  error_code,
const uint32_t *  num_tables_ptr,
const int64_t *  join_hash_tables 
)

Definition at line 1361 of file RuntimeFunctions.cpp.

1371  {
1372  for (uint32_t i = 0; i < *num_fragments; ++i) {
1373  query_stub(col_buffers ? col_buffers[i] : nullptr,
1374  &num_rows[i * (*num_tables_ptr)],
1375  &frag_row_offsets[i * (*num_tables_ptr)],
1376  max_matched,
1378  out,
1379  i,
1381  total_matched,
1382  error_code);
1383  }
1384 }
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t * join_hash_tables
const int8_t const int64_t * num_rows
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t int32_t * error_code
const int8_t const int64_t const uint64_t const int32_t * max_matched
const int8_t const int64_t const uint64_t const int32_t const int64_t * init_agg_value
const int8_t const int64_t const uint64_t * frag_row_offsets
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t ** out
void multifrag_query_hoisted_literals ( const int8_t ***  col_buffers,
const uint64_t *  num_fragments,
const int8_t *  literals,
const int64_t *  num_rows,
const uint64_t *  frag_row_offsets,
const int32_t *  max_matched,
int32_t *  total_matched,
const int64_t *  init_agg_value,
int64_t **  out,
int32_t *  error_code,
const uint32_t *  num_tables_ptr,
const int64_t *  join_hash_tables 
)

Definition at line 1320 of file RuntimeFunctions.cpp.

1331  {
1332  for (uint32_t i = 0; i < *num_fragments; ++i) {
1333  query_stub_hoisted_literals(col_buffers ? col_buffers[i] : nullptr,
1334  literals,
1335  &num_rows[i * (*num_tables_ptr)],
1336  &frag_row_offsets[i * (*num_tables_ptr)],
1337  max_matched,
1339  out,
1340  i,
1342  total_matched,
1343  error_code);
1344  }
1345 }
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t * join_hash_tables
const int8_t const int64_t * num_rows
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t int32_t * error_code
const int8_t const int64_t const uint64_t const int32_t * max_matched
const int8_t const int64_t const uint64_t const int32_t const int64_t * init_agg_value
const int8_t * literals
const int8_t const int64_t const uint64_t * frag_row_offsets
const int8_t const int64_t const uint64_t const int32_t const int64_t int64_t ** out
ALWAYS_INLINE double percent_window_func ( const int64_t  output_buff,
const int64_t  pos 
)

Definition at line 1253 of file RuntimeFunctions.cpp.

1254  {
1255  return reinterpret_cast<const double*>(output_buff)[pos];
1256 }
ALWAYS_INLINE int32_t record_error_code ( const int32_t  err_code,
int32_t *  error_codes 
)

Definition at line 930 of file RuntimeFunctions.cpp.

References pos_start_impl().

931  {
932  // NB: never override persistent error codes (with code greater than zero).
933  // On GPU, a projection query with a limit can run out of slots without it
934  // being an actual error if the limit has been hit. If a persistent error
935  // (division by zero, for example) occurs before running out of slots, we
936  // have to avoid overriding it, because there's a risk that the query would
937  // go through if we override with a potentially benign out-of-slots code.
938  if (err_code && error_codes[pos_start_impl(nullptr)] <= 0) {
939  error_codes[pos_start_impl(nullptr)] = err_code;
940  }
941  return err_code;
942 }
__device__ int32_t pos_start_impl(const int32_t *row_index_resume)
Definition: cuda_mapd_rt.cu:28

+ Here is the call graph for this function:

ALWAYS_INLINE int64_t row_number_window_func ( const int64_t  output_buff,
const int64_t  pos 
)

Definition at line 1248 of file RuntimeFunctions.cpp.

1249  {
1250  return reinterpret_cast<const int64_t*>(output_buff)[pos];
1251 }
ALWAYS_INLINE DEVICE bool sample_ratio ( const double  proportion,
const int64_t  row_offset 
)

Definition at line 1242 of file RuntimeFunctions.cpp.

Referenced by ScalarExprVisitor< std::unordered_set< InputColDescriptor > >::visit().

1243  {
1244  const int64_t threshold = 4294967296 * proportion;
1245  return (row_offset * 2654435761) % 4294967296 < threshold;
1246 }

+ Here is the caller graph for this function:

ALWAYS_INLINE int64_t scale_decimal_down_not_nullable ( const int64_t  operand,
const int64_t  scale,
const int64_t  null_val 
)

Definition at line 195 of file RuntimeFunctions.cpp.

197  {
198  int64_t tmp = scale >> 1;
199  tmp = operand >= 0 ? operand + tmp : operand - tmp;
200  return tmp / scale;
201 }
ALWAYS_INLINE int64_t scale_decimal_down_nullable ( const int64_t  operand,
const int64_t  scale,
const int64_t  null_val 
)

Definition at line 182 of file RuntimeFunctions.cpp.

184  {
185  // rounded scale down of a decimal
186  if (operand == null_val) {
187  return null_val;
188  }
189 
190  int64_t tmp = scale >> 1;
191  tmp = operand >= 0 ? operand + tmp : operand - tmp;
192  return tmp / scale;
193 }
ALWAYS_INLINE int64_t scale_decimal_up ( const int64_t  operand,
const uint64_t  scale,
const int64_t  operand_null_val,
const int64_t  result_null_val 
)

Definition at line 175 of file RuntimeFunctions.cpp.

178  {
179  return operand != operand_null_val ? operand * scale : result_null_val;
180 }
ALWAYS_INLINE void set_matching_group_value_perfect_hash_columnar ( int64_t *  groups_buffer,
const uint32_t  hashed_index,
const int64_t *  key,
const uint32_t  key_count,
const uint32_t  entry_count 
)

Definition at line 1157 of file RuntimeFunctions.cpp.

References EMPTY_KEY_64.

1162  {
1163  if (groups_buffer[hashed_index] == EMPTY_KEY_64) {
1164  for (uint32_t i = 0; i < key_count; i++) {
1165  groups_buffer[i * entry_count + hashed_index] = key[i];
1166  }
1167  }
1168 }
const int32_t groups_buffer_size return groups_buffer
#define EMPTY_KEY_64
ALWAYS_INLINE uint64_t string_pack ( const int8_t *  ptr,
const int32_t  len 
)

Definition at line 1211 of file RuntimeFunctions.cpp.

1211  {
1212  return (reinterpret_cast<const uint64_t>(ptr) & 0xffffffffffff) |
1213  (static_cast<const uint64_t>(len) << 48);
1214 }
GPU_RT_STUB void sync_threadblock ( )

Definition at line 888 of file RuntimeFunctions.cpp.

888 {}
GPU_RT_STUB void sync_warp ( )

Definition at line 886 of file RuntimeFunctions.cpp.

886 {}
GPU_RT_STUB void sync_warp_protected ( int64_t  thread_pos,
int64_t  row_count 
)

Definition at line 887 of file RuntimeFunctions.cpp.

887 {}
GPU_RT_STUB int8_t thread_warp_idx ( const int8_t  warp_sz)

Definition at line 912 of file RuntimeFunctions.cpp.

912  {
913  return 0;
914 }
GPU_RT_STUB void write_back_non_grouped_agg ( int64_t *  input_buffer,
int64_t *  output_buffer,
const int32_t  num_agg_cols 
)

Definition at line 890 of file RuntimeFunctions.cpp.

892  {};

Variable Documentation

const int64_t const uint32_t const uint32_t const uint32_t const bool const bool blocks_share_memory

Definition at line 978 of file RuntimeFunctions.cpp.

const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t frag_idx
const int64_t const uint64_t * frag_row_offsets

Definition at line 1305 of file RuntimeFunctions.cpp.

const int64_t const uint64_t const int32_t const int64_t * init_agg_value

Definition at line 1305 of file RuntimeFunctions.cpp.

const int64_t * init_vals
const int64_t const uint64_t const int32_t const int64_t int64_t uint32_t const int64_t * join_hash_tables
const int64_t const uint32_t const uint32_t const uint32_t const bool keyless
const int8_t* literals
const int64_t const uint64_t const int32_t * max_matched
const int64_t * num_rows

Definition at line 1305 of file RuntimeFunctions.cpp.

Referenced by Catalog_Namespace::Catalog::addForeignTableDetails(), DictionaryValueConverter< TARGET_TYPE >::allocateColumnarBuffer(), StringValueConverter::allocateColumnarData(), ArrayValueConverter< ELEMENT_CONVERTER >::allocateColumnarData(), GeoPointValueConverter::allocateColumnarData(), GeoLinestringValueConverter::allocateColumnarData(), GeoPolygonValueConverter::allocateColumnarData(), GeoMultiPolygonValueConverter::allocateColumnarData(), SpeculativeTopNMap::asRows(), Catalog_Namespace::Catalog::buildForeignServerMap(), Catalog_Namespace::Catalog::dropForeignServer(), Catalog_Namespace::Catalog::dropFsiSchemasAndTables(), Executor::executePlanWithGroupBy(), Executor::executePlanWithoutGroupBy(), Executor::fetchUnionChunks(), Fragmenter_Namespace::FixedLenArrayChunkConverter::FixedLenArrayChunkConverter(), get_num_allocated_rows_from_gpu(), Catalog_Namespace::Catalog::getForeignServersForUser(), Catalog_Namespace::SysCatalog::getGranteesOfSharedDashboards(), Executor::getRowCountAndOffsetForAllFrags(), QueryExecutionContext::launchCpuCode(), QueryExecutionContext::launchGpuCode(), Parser::InsertIntoTableAsSelectStmt::populateData(), QueryExecutionContext::QueryExecutionContext(), SqliteMemDatabase::runSelect(), com.mapd.tests.DistributedConcurrencyTest::runTest(), com.mapd.tests.SelectUpdateDeleteDifferentTables::runTest(), com.mapd.tests.UpdateDeleteInsertConcurrencyTest::runTest(), com.mapd.tests.AlterDropTruncateValidateConcurrencyTest::runTest(), com.mapd.tests.CtasItasSelectUpdelConcurrencyTest::runTest(), Catalog_Namespace::Catalog::setForeignServerProperty(), Fragmenter_Namespace::StringChunkConverter::StringChunkConverter(), Fragmenter_Namespace::InsertOrderFragmenter::updateColumns(), anonymous_namespace{ExternalExecutor.cpp}::vt_column(), and anonymous_namespace{ExternalExecutor.cpp}::vt_next().