OmniSciDB  d2f719934e
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
TableArchiver.cpp
Go to the documentation of this file.
1 /*
2  * Copyright 2020 OmniSci, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
18 
19 #include <algorithm>
20 #include <boost/filesystem.hpp>
21 #include <boost/process.hpp>
22 #include <boost/range/combine.hpp>
23 #include <boost/version.hpp>
24 #include <cerrno>
25 #include <cstdio>
26 #include <cstring>
27 #include <exception>
28 #include <list>
29 #include <memory>
30 #include <regex>
31 #include <set>
32 #include <sstream>
33 #include <system_error>
34 
36 #include "LockMgr/LockMgr.h"
37 #include "Logger/Logger.h"
38 #include "Parser/ParseDDL.h"
39 #include "Shared/File.h"
40 #include "Shared/StringTransform.h"
42 #include "Shared/measure.h"
43 #include "Shared/thread_count.h"
44 
45 extern bool g_cluster;
47 
48 constexpr static char const* table_schema_filename = "_table.sql";
49 constexpr static char const* table_oldinfo_filename = "_table.oldinfo";
50 constexpr static char const* table_epoch_filename = "_table.epoch";
51 
52 #if BOOST_VERSION < 107300
53 namespace std {
54 
55 template <typename T, typename U>
56 struct tuple_size<boost::tuples::cons<T, U>>
57  : boost::tuples::length<boost::tuples::cons<T, U>> {};
58 template <size_t I, typename T, typename U>
59 struct tuple_element<I, boost::tuples::cons<T, U>>
60  : boost::tuples::element<I, boost::tuples::cons<T, U>> {};
61 
62 } // namespace std
63 #endif
64 
65 namespace {
66 
67 inline auto simple_file_closer = [](FILE* f) { std::fclose(f); };
68 
69 inline std::string abs_path(const File_Namespace::GlobalFileMgr* global_file_mgr) {
70  return boost::filesystem::canonical(global_file_mgr->getBasePath()).string();
71 }
72 
73 inline std::string run(const std::string& cmd, const std::string& chdir = "") {
74  VLOG(3) << "running cmd: " << cmd;
75  int rcode;
76  std::error_code ec;
77  std::string output, errors;
78  const auto time_ms = measure<>::execution([&]() {
79  using namespace boost::process;
80  ipstream stdout, stderr;
81  if (!chdir.empty()) {
82  rcode = system(cmd, std_out > stdout, std_err > stderr, ec, start_dir = chdir);
83  } else {
84  rcode = system(cmd, std_out > stdout, std_err > stderr, ec);
85  }
86  std::ostringstream ss_output, ss_errors;
87  stdout >> ss_output.rdbuf();
88  stderr >> ss_errors.rdbuf();
89  output = ss_output.str();
90  errors = ss_errors.str();
91  });
92  if (rcode || ec) {
93  LOG(ERROR) << "failed cmd: " << cmd;
94  LOG(ERROR) << "exit code: " << rcode;
95  LOG(ERROR) << "error code: " << ec.value() << " - " << ec.message();
96  LOG(ERROR) << "stdout: " << output;
97  LOG(ERROR) << "stderr: " << errors;
98 #if defined(__APPLE__)
99  // osx bsdtar options "--use-compress-program" and "--fast-read" together
100  // run into pipe write error after tar extracts the first occurrence of a
101  // file and closes the read end while the decompression program still writes
102  // to the pipe. bsdtar doesn't handle this situation well like gnu tar does.
103  if (1 == rcode && cmd.find("--fast-read") &&
104  (errors.find("cannot write decoded block") != std::string::npos ||
105  errors.find("Broken pipe") != std::string::npos)) {
106  // ignore this error, or lose speed advantage of "--fast-read" on osx.
107  LOG(ERROR) << "tar error ignored on osx for --fast-read";
108  } else
109 #endif
110  // circumvent tar warning on reading file that is "changed as we read it".
111  // this warning results from reading a table file under concurrent inserts
112  if (1 == rcode && errors.find("changed as we read") != std::string::npos) {
113  LOG(ERROR) << "tar error ignored under concurrent inserts";
114  } else {
115  int error_code;
116  std::string error_message;
117  if (ec) {
118  error_code = ec.value();
119  error_message = ec.message();
120  } else {
121  error_code = rcode;
122  // Show a more concise message for permission errors instead of the default
123  // verbose message. Error logs will still contain all details.
124  if (to_lower(errors).find("permission denied") != std::string::npos) {
125  error_message = "Insufficient file read/write permission.";
126  } else {
127  error_message = errors;
128  }
129  }
130  throw std::runtime_error(
131  "An error occurred while executing an internal command. Error code: " +
132  std::to_string(error_code) + ", message: " + error_message);
133  }
134  } else {
135  VLOG(3) << "finished cmd: " << cmd;
136  VLOG(3) << "time: " << time_ms << " ms";
137  VLOG(3) << "stdout: " << output;
138  }
139  return output;
140 }
141 
142 inline std::string simple_file_cat(const std::string& archive_path,
143  const std::string& file_name,
144  const std::string& compression) {
147 #if defined(__APPLE__)
148  constexpr static auto opt_occurrence = "--fast-read";
149 #else
150  constexpr static auto opt_occurrence = "--occurrence=1";
151 #endif
152  boost::filesystem::path temp_dir =
153  boost::filesystem::temp_directory_path() / boost::filesystem::unique_path();
154  boost::filesystem::create_directories(temp_dir);
155  run("tar " + compression + " -xvf " + get_quoted_string(archive_path) + " " +
156  opt_occurrence + " " + file_name,
157  temp_dir.string());
158  const auto output = run("cat " + (temp_dir / file_name).string());
159  boost::filesystem::remove_all(temp_dir);
160  return output;
161 }
162 
163 inline std::string get_table_schema(const std::string& archive_path,
164  const std::string& table,
165  const std::string& compression) {
166  const auto schema_str =
167  simple_file_cat(archive_path, table_schema_filename, compression);
168  std::regex regex("@T");
169  return std::regex_replace(schema_str, regex, table);
170 }
171 
172 // Adjust column ids in chunk keys in a table's data files under a temp_data_dir,
173 // including files of all shards of the table. Can be slow for big files but should
174 // be scale faster than refragmentizing. Table altering should be rare for olap.
175 void adjust_altered_table_files(const std::string& temp_data_dir,
176  const std::unordered_map<int, int>& column_ids_map) {
177  boost::filesystem::path base_path(temp_data_dir);
178  boost::filesystem::recursive_directory_iterator end_it;
180  for (boost::filesystem::recursive_directory_iterator fit(base_path); fit != end_it;
181  ++fit) {
182  if (boost::filesystem::is_regular_file(fit->status())) {
183  const std::string file_path = fit->path().string();
184  const std::string file_name = fit->path().filename().string();
185  std::vector<std::string> tokens;
186  boost::split(tokens, file_name, boost::is_any_of("."));
187  // ref. FileMgr::init for hint of data file name layout
188  if (tokens.size() > 2 && MAPD_FILE_EXT == "." + tokens[2]) {
189  thread_controller.startThread([file_name, file_path, tokens, &column_ids_map] {
190  const auto page_size = boost::lexical_cast<int64_t>(tokens[1]);
191  const auto file_size = boost::filesystem::file_size(file_path);
192  std::unique_ptr<FILE, decltype(simple_file_closer)> fp(
193  std::fopen(file_path.c_str(), "r+"), simple_file_closer);
194  if (!fp) {
195  throw std::runtime_error("Failed to open " + file_path +
196  " for update: " + std::strerror(errno));
197  }
198  // ref. FileInfo::openExistingFile for hint of chunk header layout
199  for (size_t page = 0; page < file_size / page_size; ++page) {
200  int ints[8];
201  if (0 != std::fseek(fp.get(), page * page_size, SEEK_SET)) {
202  throw std::runtime_error("Failed to seek to page# " + std::to_string(page) +
203  file_path + " for read: " + std::strerror(errno));
204  }
205  if (1 != fread(ints, sizeof ints, 1, fp.get())) {
206  throw std::runtime_error("Failed to read " + file_path + ": " +
207  std::strerror(errno));
208  }
209  if (ints[0] > 0) { // header size
210  auto cit = column_ids_map.find(ints[3]);
211  CHECK(cit != column_ids_map.end());
212  if (ints[3] != cit->second) {
213  ints[3] = cit->second;
214  if (0 != std::fseek(fp.get(), page * page_size, SEEK_SET)) {
215  throw std::runtime_error("Failed to seek to page# " +
216  std::to_string(page) + file_path +
217  " for write: " + std::strerror(errno));
218  }
219  if (1 != fwrite(ints, sizeof ints, 1, fp.get())) {
220  throw std::runtime_error("Failed to write " + file_path + ": " +
221  std::strerror(errno));
222  }
223  }
224  }
225  }
226  });
227  thread_controller.checkThreadsStatus();
228  }
229  }
230  }
231  thread_controller.finish();
232 }
233 
235  const std::string& temp_data_dir,
236  const std::vector<std::string>& target_paths,
237  const std::string& name_prefix) {
238  boost::filesystem::path base_path(temp_data_dir);
239  boost::filesystem::directory_iterator end_it;
240  int target_path_index = 0;
241  for (boost::filesystem::directory_iterator fit(base_path); fit != end_it; ++fit) {
242  if (!boost::filesystem::is_regular_file(fit->status())) {
243  const std::string file_path = fit->path().string();
244  const std::string file_name = fit->path().filename().string();
245  if (boost::istarts_with(file_name, name_prefix)) {
246  const std::string target_path =
247  abs_path(global_file_mgr) + "/" + target_paths[target_path_index++];
248  if (std::rename(file_path.c_str(), target_path.c_str())) {
249  throw std::runtime_error("Failed to rename file " + file_path + " to " +
250  target_path + ": " + std::strerror(errno));
251  }
252  }
253  }
254  }
255 }
256 
257 } // namespace
258 
260  const std::string& archive_path,
261  const std::string& compression) {
262  if (td->is_system_table) {
263  throw std::runtime_error("Dumping a system table is not supported.");
264  }
267  if (g_cluster) {
268  throw std::runtime_error("DUMP/RESTORE is not supported yet on distributed setup.");
269  }
270  if (boost::filesystem::exists(archive_path)) {
271  throw std::runtime_error("Archive " + archive_path + " already exists.");
272  }
274  throw std::runtime_error("Dumping view or temporary table is not supported.");
275  }
276  // collect paths of files to archive
277  const auto global_file_mgr = cat_->getDataMgr().getGlobalFileMgr();
278  std::vector<std::string> file_paths;
279  auto file_writer = [&file_paths, global_file_mgr](const std::string& file_name,
280  const std::string& file_type,
281  const std::string& file_data) {
282  const auto file_path = abs_path(global_file_mgr) + "/" + file_name;
283  std::unique_ptr<FILE, decltype(simple_file_closer)> fp(
284  std::fopen(file_path.c_str(), "w"), simple_file_closer);
285  if (!fp) {
286  throw std::runtime_error("Failed to create " + file_type + " file '" + file_path +
287  "': " + std::strerror(errno));
288  }
289  if (std::fwrite(file_data.data(), 1, file_data.size(), fp.get()) < file_data.size()) {
290  throw std::runtime_error("Failed to write " + file_type + " file '" + file_path +
291  "': " + std::strerror(errno));
292  }
293  file_paths.push_back(file_name);
294  };
295 
296  const auto table_name = td->tableName;
297  {
298  // - gen schema file
299  const auto schema_str = cat_->dumpSchema(td);
300  file_writer(table_schema_filename, "table schema", schema_str);
301  // - gen column-old-info file
302  const auto cds = cat_->getAllColumnMetadataForTable(td->tableId, true, true, true);
303  std::vector<std::string> column_oldinfo;
304  std::transform(cds.begin(),
305  cds.end(),
306  std::back_inserter(column_oldinfo),
307  [&](const auto cd) -> std::string {
308  return cd->columnName + ":" + std::to_string(cd->columnId) + ":" +
310  });
311  const auto column_oldinfo_str = boost::algorithm::join(column_oldinfo, " ");
312  file_writer(table_oldinfo_filename, "table old info", column_oldinfo_str);
313  // - gen table epoch
314  const auto epoch = cat_->getTableEpoch(cat_->getCurrentDB().dbId, td->tableId);
315  file_writer(table_epoch_filename, "table epoch", std::to_string(epoch));
316  // - collect table data file paths ...
317  const auto data_file_dirs = cat_->getTableDataDirectories(td);
318  file_paths.insert(file_paths.end(), data_file_dirs.begin(), data_file_dirs.end());
319  // - collect table dict file paths ...
320  const auto dict_file_dirs = cat_->getTableDictDirectories(td);
321  file_paths.insert(file_paths.end(), dict_file_dirs.begin(), dict_file_dirs.end());
322  // tar takes time. release cat lock to yield the cat to concurrent CREATE statements.
323  }
324  // run tar to archive the files ... this may take a while !!
325  run("tar " + compression + " -cvf " + get_quoted_string(archive_path) + " " +
326  boost::algorithm::join(file_paths, " "),
327  abs_path(global_file_mgr));
328 }
329 
330 // Restore data and dict files of a table from a tgz archive.
332  const TableDescriptor* td,
333  const std::string& archive_path,
334  const std::string& compression) {
337  if (g_cluster) {
338  throw std::runtime_error("DUMP/RESTORE is not supported yet on distributed setup.");
339  }
340  if (!boost::filesystem::exists(archive_path)) {
341  throw std::runtime_error("Archive " + archive_path + " does not exist.");
342  }
344  throw std::runtime_error("Restoring view or temporary table is not supported.");
345  }
346  // Obtain table schema read lock to prevent modification of the schema during
347  // restoration
348  const auto table_read_lock =
350  // prevent concurrent inserts into table during restoration
351  const auto insert_data_lock =
353 
354  // untar takes time. no grab of cat lock to yield to concurrent CREATE stmts.
355  const auto global_file_mgr = cat_->getDataMgr().getGlobalFileMgr();
356  // dirs where src files are untarred and dst files are backed up
357  constexpr static const auto temp_data_basename = "_data";
358  constexpr static const auto temp_back_basename = "_back";
359  const auto temp_data_dir = abs_path(global_file_mgr) + "/" + temp_data_basename;
360  const auto temp_back_dir = abs_path(global_file_mgr) + "/" + temp_back_basename;
361  // clean up tmp dirs and files in any case
362  auto tmp_files_cleaner = [&](void*) {
363  run("rm -rf " + temp_data_dir + " " + temp_back_dir);
364  run("rm -f " + abs_path(global_file_mgr) + "/" + table_schema_filename);
365  run("rm -f " + abs_path(global_file_mgr) + "/" + table_oldinfo_filename);
366  run("rm -f " + abs_path(global_file_mgr) + "/" + table_epoch_filename);
367  };
368  std::unique_ptr<decltype(tmp_files_cleaner), decltype(tmp_files_cleaner)> tfc(
369  &tmp_files_cleaner, tmp_files_cleaner);
370  // extract & parse schema
371  const auto schema_str = get_table_schema(archive_path, td->tableName, compression);
372  const auto create_table_stmt =
373  Parser::parseDDL<Parser::CreateTableStmt>("table schema", schema_str);
374  // verify compatibility between source and destination schemas
375  TableDescriptor src_td;
376  std::list<ColumnDescriptor> src_columns;
377  std::vector<Parser::SharedDictionaryDef> shared_dict_defs;
378  create_table_stmt->executeDryRun(session, src_td, src_columns, shared_dict_defs);
379  // - sanity check table-level compatibility
380  if (src_td.hasDeletedCol != td->hasDeletedCol) {
381  // TODO: allow the case, in which src data enables vacuum while
382  // dst doesn't, by simply discarding src $deleted column data.
383  throw std::runtime_error("Incompatible table VACCUM option");
384  }
385  if (src_td.nShards != td->nShards) {
386  // TODO: allow different shard numbers if they have a "GCD",
387  // by splitting/merging src data files before drop into dst.
388  throw std::runtime_error("Unmatched number of table shards");
389  }
390  // - sanity check column-level compatibility (based on column names)
391  const auto dst_columns =
392  cat_->getAllColumnMetadataForTable(td->tableId, false, false, false);
393  if (dst_columns.size() != src_columns.size()) {
394  throw std::runtime_error("Unmatched number of table columns");
395  }
396  for (const auto& [src_cd, dst_cd] : boost::combine(src_columns, dst_columns)) {
397  if (src_cd.columnType.get_type_name() != dst_cd->columnType.get_type_name() ||
398  src_cd.columnType.get_compression_name() !=
399  dst_cd->columnType.get_compression_name()) {
400  throw std::runtime_error("Incompatible types on column " + src_cd.columnName);
401  }
402  }
403  // extract src table column ids (ALL columns incl. system/virtual/phy geo cols)
404  const auto all_src_oldinfo_str =
405  simple_file_cat(archive_path, table_oldinfo_filename, compression);
406  std::vector<std::string> src_oldinfo_strs;
407  boost::algorithm::split(src_oldinfo_strs,
408  all_src_oldinfo_str,
409  boost::is_any_of(" "),
410  boost::token_compress_on);
411  auto all_dst_columns =
412  cat_->getAllColumnMetadataForTable(td->tableId, true, true, true);
413  if (src_oldinfo_strs.size() != all_dst_columns.size()) {
414  throw std::runtime_error("Source table has a unmatched number of columns: " +
415  std::to_string(src_oldinfo_strs.size()) + " vs " +
416  std::to_string(all_dst_columns.size()));
417  }
418  // build a map of src column ids and dst column ids, just in case src table has been
419  // ALTERed before and chunk keys of src table needs to be adjusted accordingly.
420  // note: this map is used only for the case of migrating a table and not for restoring
421  // a table. When restoring a table, the two tables must have the same column ids.
422  //
423  // also build a map of src dict paths and dst dict paths for relocating src dicts
424  std::unordered_map<int, int> column_ids_map;
425  std::unordered_map<std::string, std::string> dict_paths_map;
426  // sort inputs of transform in lexical order of column names for correct mappings
427  std::list<std::vector<std::string>> src_oldinfo_tokens;
429  src_oldinfo_strs.begin(),
430  src_oldinfo_strs.end(),
431  std::back_inserter(src_oldinfo_tokens),
432  [](const auto& src_oldinfo_str) -> auto {
433  std::vector<std::string> tokens;
435  tokens, src_oldinfo_str, boost::is_any_of(":"), boost::token_compress_on);
436  return tokens;
437  });
438  src_oldinfo_tokens.sort(
439  [](const auto& lhs, const auto& rhs) { return lhs[0].compare(rhs[0]) < 0; });
440  all_dst_columns.sort(
441  [](auto a, auto b) { return a->columnName.compare(b->columnName) < 0; });
442  // transform inputs into the maps
443  std::transform(src_oldinfo_tokens.begin(),
444  src_oldinfo_tokens.end(),
445  all_dst_columns.begin(),
446  std::inserter(column_ids_map, column_ids_map.end()),
447  [&](const auto& tokens, const auto& cd) -> std::pair<int, int> {
448  VLOG(3) << boost::algorithm::join(tokens, ":") << " ==> "
449  << cd->columnName << ":" << cd->columnId;
450  dict_paths_map[tokens[2]] = cat_->getColumnDictDirectory(cd);
451  return {boost::lexical_cast<int>(tokens[1]), cd->columnId};
452  });
453  bool was_table_altered = false;
454  std::for_each(column_ids_map.begin(), column_ids_map.end(), [&](auto& it) {
455  was_table_altered = was_table_altered || it.first != it.second;
456  });
457  VLOG(3) << "was_table_altered = " << was_table_altered;
458  // extract all data files to a temp dir. will swap with dst table dir after all set,
459  // otherwise will corrupt table in case any bad thing happens in the middle.
460  run("rm -rf " + temp_data_dir);
461  run("mkdir -p " + temp_data_dir);
462  run("tar " + compression + " -xvf " + get_quoted_string(archive_path), temp_data_dir);
463  // if table was ever altered after it was created, update column ids in chunk headers.
464  if (was_table_altered) {
465  const auto time_ms = measure<>::execution(
466  [&]() { adjust_altered_table_files(temp_data_dir, column_ids_map); });
467  VLOG(3) << "adjust_altered_table_files: " << time_ms << " ms";
468  }
469  // finally,,, swap table data/dict dirs!
470  const auto data_file_dirs = cat_->getTableDataDirectories(td);
471  const auto dict_file_dirs = cat_->getTableDictDirectories(td);
472  // move current target dirs, if exists, to backup dir
473  std::vector<std::string> both_file_dirs;
474  std::merge(data_file_dirs.begin(),
475  data_file_dirs.end(),
476  dict_file_dirs.begin(),
477  dict_file_dirs.end(),
478  std::back_inserter(both_file_dirs));
479  bool backup_completed = false;
480  try {
481  run("rm -rf " + temp_back_dir);
482  run("mkdir -p " + temp_back_dir);
483  for (const auto& dir : both_file_dirs) {
484  const auto dir_full_path = abs_path(global_file_mgr) + "/" + dir;
485  if (boost::filesystem::is_directory(dir_full_path)) {
486  run("mv " + dir_full_path + " " + temp_back_dir);
487  }
488  }
489  backup_completed = true;
490  // accord src data dirs to dst
492  cat_->getDataMgr().getGlobalFileMgr(), temp_data_dir, data_file_dirs, "table_");
493  // accord src dict dirs to dst
494  for (const auto& dit : dict_paths_map) {
495  if (!dit.first.empty() && !dit.second.empty()) {
496  const auto src_dict_path = temp_data_dir + "/" + dit.first;
497  const auto dst_dict_path = abs_path(global_file_mgr) + "/" + dit.second;
498  run("mv " + src_dict_path + " " + dst_dict_path);
499  }
500  }
501  // throw if sanity test forces a rollback
503  throw std::runtime_error("lol!");
504  }
505  } catch (...) {
506  // once backup is completed, whatever in abs_path(global_file_mgr) is the "src"
507  // dirs that are to be rolled back and discarded
508  if (backup_completed) {
509  run("rm -rf " + boost::algorithm::join(both_file_dirs, " "),
510  abs_path(global_file_mgr));
511  }
512  // complete rollback by recovering original "dst" table dirs from backup dir
513  boost::filesystem::path base_path(temp_back_dir);
514  boost::filesystem::directory_iterator end_it;
515  for (boost::filesystem::directory_iterator fit(base_path); fit != end_it; ++fit) {
516  run("mv " + fit->path().string() + " .", abs_path(global_file_mgr));
517  }
518  throw;
519  }
520  // set for reloading table from the restored/migrated files
521  const auto epoch = simple_file_cat(archive_path, table_epoch_filename, compression);
523  cat_->getCurrentDB().dbId, td->tableId, boost::lexical_cast<int>(epoch));
524 }
525 
526 // Migrate a table, which doesn't exist in current db, from a tar ball to the db.
527 // This actually creates the table and restores data/dict files from the tar ball.
529  const std::string& table_name,
530  const std::string& archive_path,
531  const std::string& compression) {
532  // replace table name and drop foreign dict references
533  const auto schema_str = get_table_schema(archive_path, table_name, compression);
534  Parser::parseDDL<Parser::CreateTableStmt>("table schema", schema_str)->execute(session);
535  try {
536  restoreTable(
537  session, cat_->getMetadataForTable(table_name), archive_path, compression);
538  } catch (...) {
539  Parser::parseDDL<Parser::DropTableStmt>("statement",
540  "DROP TABLE IF EXISTS " + table_name + ";")
541  ->execute(session);
542  throw;
543  }
544 }
std::string to_lower(const std::string &str)
std::string get_table_schema(const std::string &archive_path, const std::string &table, const std::string &compression)
static ReadLock getReadLockForTable(const Catalog_Namespace::Catalog &cat, const std::string &table_name)
Definition: LockMgrImpl.h:164
std::string getBasePath() const
std::string getColumnDictDirectory(const ColumnDescriptor *cd, bool file_name_only=true) const
Definition: Catalog.cpp:4464
static constexpr char const * table_schema_filename
std::string tableName
static TimeT::rep execution(F func, Args &&...args)
Definition: sample.cpp:29
std::string abs_path(const File_Namespace::GlobalFileMgr *global_file_mgr)
::FILE * fopen(const char *filename, const char *mode)
Definition: omnisci_fs.cpp:72
Data_Namespace::DataMgr & getDataMgr() const
Definition: Catalog.h:226
#define LOG(tag)
Definition: Logger.h:205
static WriteLock getWriteLockForTable(const Catalog_Namespace::Catalog &cat, const std::string &table_name)
Definition: LockMgrImpl.h:155
void startThread(FuncType &&func, Args &&...args)
void adjust_altered_table_files(const std::string &temp_data_dir, const std::unordered_map< int, int > &column_ids_map)
std::string join(T const &container, std::string const &delim)
#define MAPD_FILE_EXT
Definition: File.h:25
static constexpr char const * table_oldinfo_filename
int32_t getTableEpoch(const int32_t db_id, const int32_t table_id) const
Definition: Catalog.cpp:2885
std::vector< std::string > getTableDataDirectories(const TableDescriptor *td) const
Definition: Catalog.cpp:4450
void restoreTable(const Catalog_Namespace::SessionInfo &session, const TableDescriptor *td, const std::string &archive_path, const std::string &compression)
std::string to_string(char const *&&v)
std::vector< std::string > split(std::string_view str, std::string_view delim, std::optional< size_t > maxsplit)
split apart a string into a vector of substrings
constexpr double a
Definition: Utm.h:33
std::string get_quoted_string(const std::string &filename, char quote, char escape)
Quote a string while escaping any existing quotes in the string.
const DBMetadata & getCurrentDB() const
Definition: Catalog.h:225
void rename_table_directories(const File_Namespace::GlobalFileMgr *global_file_mgr, const std::string &temp_data_dir, const std::vector< std::string > &target_paths, const std::string &name_prefix)
OUTPUT transform(INPUT const &input, FUNC const &func)
Definition: misc.h:288
File_Namespace::GlobalFileMgr * getGlobalFileMgr() const
Definition: DataMgr.cpp:616
void validate_allowed_file_path(const std::string &file_path, const DataTransferType data_transfer_type, const bool allow_wildcards)
Definition: DdlUtils.cpp:770
void setTableEpoch(const int db_id, const int table_id, const int new_epoch)
Definition: Catalog.cpp:2936
std::vector< std::string > getTableDictDirectories(const TableDescriptor *td) const
Definition: Catalog.cpp:4485
std::list< const ColumnDescriptor * > getAllColumnMetadataForTable(const int tableId, const bool fetchSystemColumns, const bool fetchVirtualColumns, const bool fetchPhysicalColumns) const
Returns a list of pointers to constant ColumnDescriptor structs for all the columns from a particular...
Definition: Catalog.cpp:1811
Data_Namespace::MemoryLevel persistenceLevel
static constexpr char const * table_epoch_filename
#define CHECK(condition)
Definition: Logger.h:211
bool g_cluster
char * f
const TableDescriptor * getMetadataForTable(const std::string &tableName, const bool populateFragmenter=true) const
Returns a pointer to a const TableDescriptor struct matching the provided tableName.
static bool run
std::string simple_file_cat(const std::string &archive_path, const std::string &file_name, const std::string &compression)
std::string dumpSchema(const TableDescriptor *td) const
Definition: Catalog.cpp:4513
int cpu_threads()
Definition: thread_count.h:24
void dumpTable(const TableDescriptor *td, const std::string &archive_path, const std::string &compression)
A selection of helper methods for File I/O.
#define VLOG(n)
Definition: Logger.h:305
bool g_test_rollback_dump_restore
Catalog_Namespace::Catalog * cat_
Definition: TableArchiver.h:43
size_t file_size(const int fd)
Definition: omnisci_fs.cpp:31