27 #include "../BufferCompaction.h"
37 const std::vector<int64_t>& col_exprs_to_not_project) {
40 if (!col_exprs_to_not_project.empty()) {
41 CHECK_EQ(col_expr_list.size(), col_exprs_to_not_project.size());
43 size_t col_expr_idx = 0;
45 for (
const auto col_expr : col_expr_list) {
46 if (!col_exprs_to_not_project.empty() &&
47 col_exprs_to_not_project[col_expr_idx] != -1) {
59 if ((chosen_type.is_string() && chosen_type.get_compression() ==
kENCODING_NONE) ||
60 chosen_type.is_array()) {
66 if (chosen_type.is_geometry()) {
67 for (
auto i = 0; i < chosen_type.get_physical_coord_cols(); ++i) {
77 CHECK_EQ(
size_t(0), col_expr_bitwidth % 8);
80 if (agg_info.agg_kind ==
kAVG) {
81 CHECK(agg_info.is_agg);
90 const SlotSize ss{slot_width_size, slot_width_size};
96 slot_size.padded_size = padded_size;
102 if (slot_size.padded_size < 0) {
103 slot_size.padded_size = padded_size;
110 slot_size.padded_size = slot_size.logical_size;
116 CHECK_GE(slot_size.logical_size, 0);
117 CHECK_LE(slot_size.logical_size, slot_size.padded_size);
132 [](
size_t sum,
const auto& slot_size) {
134 return sum +
static_cast<size_t>(slot_size.padded_size);
146 [](
size_t sum,
const auto& slot_size) {
148 const auto chosen_bytes =
149 static_cast<size_t>(slot_size.padded_size);
150 if (chosen_bytes ==
sizeof(int64_t)) {
153 return sum + chosen_bytes;
163 [entry_count](
size_t sum,
const auto& slot_size) {
166 align_to_int64(static_cast<size_t>(slot_size.padded_size) * entry_count);
173 return actual_min_byte_width;
175 const auto min_padded_size = std::min_element(
177 return lhs.padded_size < rhs.padded_size;
179 return std::min(min_padded_size->padded_size, actual_min_byte_width);
186 size_t compact_width{0};
188 if (slot_size.padded_size != 0) {
189 compact_width = slot_size.padded_size;
193 if (!compact_width) {
197 for (
const auto& slot_size : slot_sizes_) {
198 if (slot_size.padded_size == 0) {
201 CHECK_EQ(static_cast<size_t>(slot_size.padded_size), compact_width);
203 return compact_width;
209 if (
slot_sizes_[slot_idx].padded_size ==
sizeof(int64_t)) {
225 size_t total_bytes{0};
226 for (
size_t slot_idx = 0; slot_idx <
slot_sizes_.size(); slot_idx++) {
227 auto chosen_bytes =
slot_sizes_[slot_idx].padded_size;
228 if (chosen_bytes ==
sizeof(int64_t)) {
230 CHECK_GE(aligned_total_bytes, total_bytes);
232 const auto padding = aligned_total_bytes - total_bytes;
233 CHECK(padding == 0 || padding == 4);
236 total_bytes = aligned_total_bytes;
238 total_bytes += chosen_bytes;
242 CHECK_GE(aligned_total_bytes, total_bytes);
243 const auto padding = aligned_total_bytes - total_bytes;
244 CHECK(padding == 0 || padding == 4);
250 const std::vector<std::tuple<int8_t, int8_t>>& slots_for_col) {
253 for (
const auto& slot_info : slots_for_col) {
259 const size_t column_idx) {
264 const int8_t logical_size,
265 const size_t column_idx) {
Defines data structures for the semantic analysis phase of query processing.
int8_t getMinPaddedByteSize(const int8_t actual_min_byte_width) const
void alignPaddedSlots(const bool sort_on_gpu)
TargetInfo get_target_info(const PointerType target_expr, const bool bigint_count)
size_t getAllSlotsPaddedSize() const
size_t getAllSlotsAlignedPaddedSize() const
std::vector< std::vector< size_t > > col_to_slot_map_
std::vector< SlotSize > slot_sizes_
void setAllSlotsSize(const int8_t slot_width_size)
size_t getColOnlyOffInBytes(const size_t slot_idx) const
const SQLTypeInfo get_compact_type(const TargetInfo &target)
size_t get_bit_width(const SQLTypeInfo &ti)
DEVICE void fill(ARGS &&...args)
size_t getCompactByteWidth() const
Provides column info and slot info for the output buffer and some metadata helpers.
size_t getAlignedPaddedSizeForRange(const size_t end) const
DEVICE auto accumulate(ARGS &&...args)
void addColumn(const std::vector< std::tuple< int8_t, int8_t >> &slots_for_col)
void setAllSlotsPaddedSize(const int8_t padded_size)
size_t getTotalBytesOfColumnarBuffers(const size_t entry_count) const
void addSlotForColumn(const int8_t logical_size, const size_t column_idx)
size_t getSlotCount() const
void setAllSlotsPaddedSizeToLogicalSize()
size_t getColCount() const
void sort_on_gpu(int64_t *val_buff, int32_t *idx_buff, const uint64_t entry_count, const bool desc, const uint32_t chosen_bytes, ThrustAllocator &alloc)
void setAllUnsetSlotsPaddedSize(const int8_t padded_size)
FORCE_INLINE HOST DEVICE T align_to_int64(T addr)