OmniSciDB  eb3a3d0a03
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
InputMetadata.cpp
Go to the documentation of this file.
1 /*
2  * Copyright 2017 MapD Technologies, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "InputMetadata.h"
18 #include "Execute.h"
19 
20 #include "../Fragmenter/Fragmenter.h"
21 
22 #include <future>
23 
24 InputTableInfoCache::InputTableInfoCache(Executor* executor) : executor_(executor) {}
25 
26 namespace {
27 
29  const Fragmenter_Namespace::TableInfo& table_info) {
30  Fragmenter_Namespace::TableInfo table_info_copy;
31  table_info_copy.chunkKeyPrefix = table_info.chunkKeyPrefix;
32  table_info_copy.fragments = table_info.fragments;
33  table_info_copy.setPhysicalNumTuples(table_info.getPhysicalNumTuples());
34  return table_info_copy;
35 }
36 
37 } // namespace
38 
40  const std::vector<const TableDescriptor*>& shard_tables) {
41  size_t total_number_of_tuples{0};
42  Fragmenter_Namespace::TableInfo table_info_all_shards;
43  for (const TableDescriptor* shard_table : shard_tables) {
44  CHECK(shard_table->fragmenter);
45  const auto& shard_metainfo = shard_table->fragmenter->getFragmentsForQuery();
46  total_number_of_tuples += shard_metainfo.getPhysicalNumTuples();
47  table_info_all_shards.fragments.reserve(table_info_all_shards.fragments.size() +
48  shard_metainfo.fragments.size());
49  table_info_all_shards.fragments.insert(table_info_all_shards.fragments.end(),
50  shard_metainfo.fragments.begin(),
51  shard_metainfo.fragments.end());
52  }
53  table_info_all_shards.setPhysicalNumTuples(total_number_of_tuples);
54  return table_info_all_shards;
55 }
56 
58  const auto it = cache_.find(table_id);
59  if (it != cache_.end()) {
60  const auto& table_info = it->second;
61  return copy_table_info(table_info);
62  }
63  const auto cat = executor_->getCatalog();
64  CHECK(cat);
65  const auto td = cat->getMetadataForTable(table_id);
66  CHECK(td);
67  const auto shard_tables = cat->getPhysicalTablesDescriptors(td);
68  auto table_info = build_table_info(shard_tables);
69  auto it_ok = cache_.emplace(table_id, copy_table_info(table_info));
70  CHECK(it_ok.second);
71  return copy_table_info(table_info);
72 }
73 
75  decltype(cache_)().swap(cache_);
76 }
77 
78 namespace {
79 
80 bool uses_int_meta(const SQLTypeInfo& col_ti) {
81  return col_ti.is_integer() || col_ti.is_decimal() || col_ti.is_time() ||
82  col_ti.is_boolean() ||
83  (col_ti.is_string() && col_ti.get_compression() == kENCODING_DICT);
84 }
85 
87  std::vector<Fragmenter_Namespace::FragmentInfo> result;
88  if (rows) {
89  result.resize(1);
90  auto& fragment = result.front();
91  fragment.fragmentId = 0;
92  fragment.deviceIds.resize(3);
93  fragment.resultSet = rows.get();
94  fragment.resultSetMutex.reset(new std::mutex());
95  }
97  table_info.fragments = result;
98  return table_info;
99 }
100 
101 void collect_table_infos(std::vector<InputTableInfo>& table_infos,
102  const std::vector<InputDescriptor>& input_descs,
103  Executor* executor) {
104  const auto temporary_tables = executor->getTemporaryTables();
105  const auto cat = executor->getCatalog();
106  CHECK(cat);
107  std::unordered_map<int, size_t> info_cache;
108  for (const auto& input_desc : input_descs) {
109  const auto table_id = input_desc.getTableId();
110  const auto cached_index_it = info_cache.find(table_id);
111  if (cached_index_it != info_cache.end()) {
112  CHECK_LT(cached_index_it->second, table_infos.size());
113  table_infos.push_back(
114  {table_id, copy_table_info(table_infos[cached_index_it->second].info)});
115  continue;
116  }
117  if (input_desc.getSourceType() == InputSourceType::RESULT) {
118  CHECK_LT(table_id, 0);
119  CHECK(temporary_tables);
120  const auto it = temporary_tables->find(table_id);
121  LOG_IF(FATAL, it == temporary_tables->end())
122  << "Failed to find previous query result for node " << -table_id;
123  table_infos.push_back({table_id, synthesize_table_info(it->second)});
124  } else {
125  CHECK(input_desc.getSourceType() == InputSourceType::TABLE);
126  table_infos.push_back({table_id, executor->getTableInfo(table_id)});
127  }
128  CHECK(!table_infos.empty());
129  info_cache.insert(std::make_pair(table_id, table_infos.size() - 1));
130  }
131 }
132 
133 } // namespace
134 
135 ChunkMetadataMap synthesize_metadata(const ResultSet* rows) {
136  auto timer = DEBUG_TIMER(__func__);
137  rows->moveToBegin();
138  std::vector<std::vector<std::unique_ptr<Encoder>>> dummy_encoders;
139  const size_t worker_count =
141  for (size_t worker_idx = 0; worker_idx < worker_count; ++worker_idx) {
142  dummy_encoders.emplace_back();
143  for (size_t i = 0; i < rows->colCount(); ++i) {
144  const auto& col_ti = rows->getColType(i);
145  dummy_encoders.back().emplace_back(Encoder::Create(nullptr, col_ti));
146  }
147  }
148  const auto do_work = [rows](const std::vector<TargetValue>& crt_row,
149  std::vector<std::unique_ptr<Encoder>>& dummy_encoders) {
150  for (size_t i = 0; i < rows->colCount(); ++i) {
151  const auto& col_ti = rows->getColType(i);
152  const auto& col_val = crt_row[i];
153  const auto scalar_col_val = boost::get<ScalarTargetValue>(&col_val);
154  CHECK(scalar_col_val);
155  if (uses_int_meta(col_ti)) {
156  const auto i64_p = boost::get<int64_t>(scalar_col_val);
157  CHECK(i64_p);
158  dummy_encoders[i]->updateStats(*i64_p, *i64_p == inline_int_null_val(col_ti));
159  } else if (col_ti.is_fp()) {
160  switch (col_ti.get_type()) {
161  case kFLOAT: {
162  const auto float_p = boost::get<float>(scalar_col_val);
163  CHECK(float_p);
164  dummy_encoders[i]->updateStats(*float_p,
165  *float_p == inline_fp_null_val(col_ti));
166  break;
167  }
168  case kDOUBLE: {
169  const auto double_p = boost::get<double>(scalar_col_val);
170  CHECK(double_p);
171  dummy_encoders[i]->updateStats(*double_p,
172  *double_p == inline_fp_null_val(col_ti));
173  break;
174  }
175  default:
176  CHECK(false);
177  }
178  } else {
179  throw std::runtime_error(col_ti.get_type_name() +
180  " is not supported in temporary table.");
181  }
182  }
183  };
185  const size_t worker_count = cpu_threads();
186  std::vector<std::future<void>> compute_stats_threads;
187  const auto entry_count = rows->entryCount();
188  for (size_t i = 0,
189  start_entry = 0,
190  stride = (entry_count + worker_count - 1) / worker_count;
191  i < worker_count && start_entry < entry_count;
192  ++i, start_entry += stride) {
193  const auto end_entry = std::min(start_entry + stride, entry_count);
194  compute_stats_threads.push_back(std::async(
196  [rows, &do_work, &dummy_encoders](
197  const size_t start, const size_t end, const size_t worker_idx) {
198  for (size_t i = start; i < end; ++i) {
199  const auto crt_row = rows->getRowAtNoTranslations(i);
200  if (!crt_row.empty()) {
201  do_work(crt_row, dummy_encoders[worker_idx]);
202  }
203  }
204  },
205  start_entry,
206  end_entry,
207  i));
208  }
209  for (auto& child : compute_stats_threads) {
210  child.wait();
211  }
212  for (auto& child : compute_stats_threads) {
213  child.get();
214  }
215  } else {
216  while (true) {
217  auto crt_row = rows->getNextRow(false, false);
218  if (crt_row.empty()) {
219  break;
220  }
221  do_work(crt_row, dummy_encoders[0]);
222  }
223  rows->moveToBegin();
224  }
225  ChunkMetadataMap metadata_map;
226  for (size_t worker_idx = 1; worker_idx < worker_count; ++worker_idx) {
227  CHECK_LT(worker_idx, dummy_encoders.size());
228  const auto& worker_encoders = dummy_encoders[worker_idx];
229  for (size_t i = 0; i < rows->colCount(); ++i) {
230  dummy_encoders[0][i]->reduceStats(*worker_encoders[i]);
231  }
232  }
233  for (size_t i = 0; i < rows->colCount(); ++i) {
234  const auto it_ok =
235  metadata_map.emplace(i, dummy_encoders[0][i]->getMetadata(rows->getColType(i)));
236  CHECK(it_ok.second);
237  }
238  return metadata_map;
239 }
240 
241 size_t get_frag_count_of_table(const int table_id, Executor* executor) {
242  const auto temporary_tables = executor->getTemporaryTables();
243  CHECK(temporary_tables);
244  auto it = temporary_tables->find(table_id);
245  if (it != temporary_tables->end()) {
246  CHECK_GE(int(0), table_id);
247  return size_t(1);
248  } else {
249  const auto table_info = executor->getTableInfo(table_id);
250  return table_info.fragments.size();
251  }
252 }
253 
254 std::vector<InputTableInfo> get_table_infos(
255  const std::vector<InputDescriptor>& input_descs,
256  Executor* executor) {
257  std::vector<InputTableInfo> table_infos;
258  collect_table_infos(table_infos, input_descs, executor);
259  return table_infos;
260 }
261 
262 std::vector<InputTableInfo> get_table_infos(const RelAlgExecutionUnit& ra_exe_unit,
263  Executor* executor) {
265  std::vector<InputTableInfo> table_infos;
266  collect_table_infos(table_infos, ra_exe_unit.input_descs, executor);
267  return table_infos;
268 }
269 
274  }
275  return chunkMetadataMap;
276 }
277 
279  const {
280  ChunkMetadataMap metadata_map;
281  for (const auto& [column_id, chunk_metadata] : chunkMetadataMap) {
282  metadata_map[column_id] = std::make_shared<ChunkMetadata>(*chunk_metadata);
283  }
284  return metadata_map;
285 }
286 
288  std::unique_ptr<std::lock_guard<std::mutex>> lock;
289  if (resultSetMutex) {
290  lock.reset(new std::lock_guard<std::mutex>(*resultSetMutex));
291  }
292  CHECK_EQ(!!resultSet, !!resultSetMutex);
293  if (resultSet && !synthesizedNumTuplesIsValid) {
294  numTuples = resultSet->rowCount();
295  synthesizedNumTuplesIsValid = true;
296  }
297  return numTuples;
298 }
299 
301  if (!fragments.empty() && fragments.front().resultSet) {
302  return fragments.front().getNumTuples();
303  }
304  return numTuples;
305 }
306 
308  if (!fragments.empty() && fragments.front().resultSet) {
309  return fragments.front().resultSet->entryCount();
310  }
311  return numTuples;
312 }
313 
315  if (!fragments.empty() && fragments.front().resultSet) {
316  return fragments.front().resultSet->entryCount();
317  }
318  size_t fragment_num_tupples_upper_bound = 0;
319  for (const auto& fragment : fragments) {
320  fragment_num_tupples_upper_bound =
321  std::max(fragment.getNumTuples(), fragment_num_tupples_upper_bound);
322  }
323  return fragment_num_tupples_upper_bound;
324 }
#define CHECK_EQ(x, y)
Definition: Logger.h:217
ChunkMetadataMap getChunkMetadataMapPhysicalCopy() const
std::string cat(Ts &&...args)
Fragmenter_Namespace::TableInfo copy_table_info(const Fragmenter_Namespace::TableInfo &table_info)
ChunkMetadataMap synthesize_metadata(const ResultSet *rows)
static Encoder * Create(Data_Namespace::AbstractBuffer *buffer, const SQLTypeInfo sqlType)
Definition: Encoder.cpp:26
Executor * executor_
Definition: InputMetadata.h:48
std::vector< InputDescriptor > input_descs
#define CHECK_GE(x, y)
Definition: Logger.h:222
std::shared_ptr< ResultSet > ResultSetPtr
std::vector< FragmentInfo > fragments
Definition: Fragmenter.h:162
std::vector< int > chunkKeyPrefix
Definition: Fragmenter.h:161
double inline_fp_null_val(const SQL_TYPE_INFO &ti)
bool is_time() const
Definition: sqltypes.h:510
#define LOG_IF(severity, condition)
Definition: Logger.h:299
Fragmenter_Namespace::TableInfo build_table_info(const std::vector< const TableDescriptor * > &shard_tables)
std::map< int, std::shared_ptr< ChunkMetadata >> ChunkMetadataMap
bool use_parallel_algorithms(const ResultSet &rows)
Definition: ResultSet.cpp:1141
size_t getPhysicalNumTuples() const
Definition: Fragmenter.h:155
Fragmenter_Namespace::TableInfo getTableInfo(const int table_id)
future< Result > async(Fn &&fn, Args &&...args)
bool uses_int_meta(const SQLTypeInfo &col_ti)
bool is_integer() const
Definition: sqltypes.h:506
#define INJECT_TIMER(DESC)
Definition: measure.h:93
size_t getFragmentNumTuplesUpperBound() const
Fragmenter_Namespace::TableInfo synthesize_table_info(const ResultSetPtr &rows)
const ChunkMetadataMap & getChunkMetadataMap() const
bool is_boolean() const
Definition: sqltypes.h:511
#define CHECK_LT(x, y)
Definition: Logger.h:219
HOST DEVICE EncodingType get_compression() const
Definition: sqltypes.h:337
InputTableInfoCache(Executor *executor)
#define CHECK(condition)
Definition: Logger.h:209
std::vector< InputTableInfo > get_table_infos(const std::vector< InputDescriptor > &input_descs, Executor *executor)
#define DEBUG_TIMER(name)
Definition: Logger.h:352
void setPhysicalNumTuples(const size_t physNumTuples)
Definition: Fragmenter.h:157
int64_t inline_int_null_val(const SQL_TYPE_INFO &ti)
void collect_table_infos(std::vector< InputTableInfo > &table_infos, const std::vector< InputDescriptor > &input_descs, Executor *executor)
bool is_string() const
Definition: sqltypes.h:504
std::unordered_map< int, Fragmenter_Namespace::TableInfo > cache_
Definition: InputMetadata.h:47
size_t get_frag_count_of_table(const int table_id, Executor *executor)
int cpu_threads()
Definition: thread_count.h:24
bool is_decimal() const
Definition: sqltypes.h:507
DEVICE void swap(ARGS &&...args)
Definition: gpu_enabled.h:114