37 #include <tbb/parallel_for.h>
41 const std::vector<StringOps_Namespace::StringOpInfo>& string_op_infos) {
42 for (
const auto& string_op_info : string_op_infos) {
43 if (string_op_info.hasNullLiteralArg()) {
51 const int32_t source_string_dict_id,
52 const int32_t dest_string_dict_id,
53 const bool translate_intersection_only,
54 const std::vector<StringOps_Namespace::StringOpInfo>& string_op_infos,
56 const int device_count,
59 const bool delay_translation)
60 : source_string_dict_id_(source_string_dict_id)
61 , dest_string_dict_id_(dest_string_dict_id)
62 , translate_intersection_only_(translate_intersection_only)
63 , string_op_infos_(string_op_infos)
65 , memory_level_(memory_level)
66 , device_count_(device_count)
89 host_translation_map_ =
executor_->getStringProxyTranslationMap(
103 const size_t translation_map_size_bytes{host_translation_map_->getVectorMap().size() *
105 for (
int device_id = 0; device_id <
device_count_; ++device_id) {
107 data_mgr_, translation_map_size_bytes, device_id));
111 reinterpret_cast<CUdeviceptr>(device_buffer),
112 host_translation_map_->data(),
113 translation_map_size_bytes,
128 const bool add_nullcheck,
166 auto cgen_state_ptr =
executor_->getCgenStatePtr();
175 return static_cast<llvm::Value*
>(
executor_->cgen_state_->inlineIntNull(null_ti));
178 std::vector<std::shared_ptr<const Analyzer::Constant>> constants_owned;
179 std::vector<const Analyzer::Constant*> constants;
181 const int64_t translation_map_handle =
182 reinterpret_cast<int64_t
>(kernel_translation_map);
183 const auto translation_map_handle_literal =
186 CHECK(translation_map_handle_literal);
188 translation_map_handle_literal->get_type_info().get_compression());
189 constants_owned.push_back(translation_map_handle_literal);
190 constants.push_back(translation_map_handle_literal.get());
197 const auto translation_map_handle_lvs =
199 ? code_generator.codegenHoistedConstants(constants,
kENCODING_NONE, 0)
200 : code_generator.codegen(constants[0],
false, co);
201 CHECK_EQ(
size_t(1), translation_map_handle_lvs.size());
203 std::unique_ptr<CodeGenerator::NullCheckCodegen> nullcheck_codegen;
206 if (add_nullcheck && is_nullable) {
207 nullcheck_codegen = std::make_unique<CodeGenerator::NullCheckCodegen>(
212 "dict_encoded_str_cast_nullcheck");
214 llvm::Value* ret = cgen_state_ptr->emitCall(
215 "map_string_dict_id",
217 cgen_state_ptr->castToTypeIn(translation_map_handle_lvs.front(), 64),
220 if (nullcheck_codegen) {
222 nullcheck_codegen->finalize(cgen_state_ptr->inlineIntNull(decoded_input_ti), ret);
228 return host_translation_map_ && !host_translation_map_->empty();
232 return isMapValid() ? host_translation_map_->data() :
nullptr;
236 return isMapValid() ? host_translation_map_->domainStart() : 0;
const Data_Namespace::MemoryLevel memory_level_
void buildTranslationMap()
StringDictionaryTranslationMgr(const int32_t source_string_dict_id, const int32_t dest_string_dict_id, const bool translate_intersection_only, const std::vector< StringOps_Namespace::StringOpInfo > &string_op_infos, const Data_Namespace::MemoryLevel memory_level, const int device_count, Executor *executor, Data_Namespace::DataMgr *data_mgr, const bool delay_translation)
static std::shared_ptr< Analyzer::Expr > analyzeValue(const int64_t intval)
~StringDictionaryTranslationMgr()
bool one_or_more_string_ops_is_null(const std::vector< StringOps_Namespace::StringOpInfo > &string_op_infos)
Data_Namespace::DataMgr * data_mgr_
const int32_t source_string_dict_id_
Classes representing a parse tree.
const bool has_null_string_op_
const int32_t dest_string_dict_id_
int32_t minSourceStringId() const
ExecutorDeviceType device_type
std::vector< const int32_t * > kernel_translation_maps_
static Data_Namespace::AbstractBuffer * allocGpuAbstractBuffer(Data_Namespace::DataMgr *data_mgr, const size_t num_bytes, const int device_id)
const bool translate_intersection_only_
void copy_to_nvidia_gpu(Data_Namespace::DataMgr *data_mgr, CUdeviceptr dst, const void *src, const size_t num_bytes, const int device_id)
std::vector< Data_Namespace::AbstractBuffer * > device_buffers_
Allocate GPU memory using GpuBuffers via DataMgr.
void free(AbstractBuffer *buffer)
HOST DEVICE bool get_notnull() const
llvm::Value * codegen(llvm::Value *str_id_input, const SQLTypeInfo &input_ti, const bool add_nullcheck, const CompilationOptions &co) const
const int32_t * data() const
void createKernelBuffers()
const std::vector< StringOps_Namespace::StringOpInfo > string_op_infos_