OmniSciDB  bf83d84833
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
TableArchiver.cpp
Go to the documentation of this file.
1 /*
2  * Copyright 2020 OmniSci, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
18 
19 #include <algorithm>
20 #include <boost/filesystem.hpp>
21 #include <boost/process.hpp>
22 #include <boost/range/combine.hpp>
23 #include <boost/version.hpp>
24 #include <cerrno>
25 #include <cstdio>
26 #include <cstring>
27 #include <exception>
28 #include <list>
29 #include <memory>
30 #include <regex>
31 #include <set>
32 #include <sstream>
33 #include <system_error>
34 
36 #include "LockMgr/LockMgr.h"
37 #include "Logger/Logger.h"
38 #include "Parser/ParseDDL.h"
39 #include "Shared/File.h"
40 #include "Shared/StringTransform.h"
42 #include "Shared/measure.h"
43 #include "Shared/thread_count.h"
44 
45 extern bool g_cluster;
47 
48 constexpr static char const* table_schema_filename = "_table.sql";
49 constexpr static char const* table_oldinfo_filename = "_table.oldinfo";
50 constexpr static char const* table_epoch_filename = "_table.epoch";
51 
52 #if BOOST_VERSION < 107300
53 namespace std {
54 
55 template <typename T, typename U>
56 struct tuple_size<boost::tuples::cons<T, U>>
57  : boost::tuples::length<boost::tuples::cons<T, U>> {};
58 template <size_t I, typename T, typename U>
59 struct tuple_element<I, boost::tuples::cons<T, U>>
60  : boost::tuples::element<I, boost::tuples::cons<T, U>> {};
61 
62 } // namespace std
63 #endif
64 
65 namespace {
66 
67 inline auto simple_file_closer = [](FILE* f) { std::fclose(f); };
68 
69 inline std::string abs_path(const File_Namespace::GlobalFileMgr* global_file_mgr) {
70  return boost::filesystem::canonical(global_file_mgr->getBasePath()).string();
71 }
72 
73 inline std::string run(const std::string& cmd, const std::string& chdir = "") {
74  VLOG(3) << "running cmd: " << cmd;
75  int rcode;
76  std::error_code ec;
77  std::string output, errors;
78  const auto time_ms = measure<>::execution([&]() {
79  using namespace boost::process;
80  ipstream stdout, stderr;
81  if (!chdir.empty()) {
82  rcode = system(cmd, std_out > stdout, std_err > stderr, ec, start_dir = chdir);
83  } else {
84  rcode = system(cmd, std_out > stdout, std_err > stderr, ec);
85  }
86  std::ostringstream ss_output, ss_errors;
87  stdout >> ss_output.rdbuf();
88  stderr >> ss_errors.rdbuf();
89  output = ss_output.str();
90  errors = ss_errors.str();
91  });
92  if (rcode || ec) {
93  LOG(ERROR) << "failed cmd: " << cmd;
94  LOG(ERROR) << "exit code: " << rcode;
95  LOG(ERROR) << "error code: " << ec.value() << " - " << ec.message();
96  LOG(ERROR) << "stdout: " << output;
97  LOG(ERROR) << "stderr: " << errors;
98 #if defined(__APPLE__)
99  // osx bsdtar options "--use-compress-program" and "--fast-read" together
100  // run into pipe write error after tar extracts the first occurrence of a
101  // file and closes the read end while the decompression program still writes
102  // to the pipe. bsdtar doesn't handle this situation well like gnu tar does.
103  if (1 == rcode && cmd.find("--fast-read") &&
104  (errors.find("cannot write decoded block") != std::string::npos ||
105  errors.find("Broken pipe") != std::string::npos)) {
106  // ignore this error, or lose speed advantage of "--fast-read" on osx.
107  LOG(ERROR) << "tar error ignored on osx for --fast-read";
108  } else
109 #endif
110  // circumvent tar warning on reading file that is "changed as we read it".
111  // this warning results from reading a table file under concurrent inserts
112  if (1 == rcode && errors.find("changed as we read") != std::string::npos) {
113  LOG(ERROR) << "tar error ignored under concurrent inserts";
114  } else {
115  throw std::runtime_error("Failed to run command: " + cmd +
116  "\nexit code: " + std::to_string(rcode) + "\nerrors:\n" +
117  (rcode ? errors : ec.message()));
118  }
119  } else {
120  VLOG(3) << "finished cmd: " << cmd;
121  VLOG(3) << "time: " << time_ms << " ms";
122  VLOG(3) << "stdout: " << output;
123  }
124  return output;
125 }
126 
127 inline std::string simple_file_cat(const std::string& archive_path,
128  const std::string& file_name,
129  const std::string& compression) {
132 #if defined(__APPLE__)
133  constexpr static auto opt_occurrence = "--fast-read";
134 #else
135  constexpr static auto opt_occurrence = "--occurrence=1";
136 #endif
137  boost::filesystem::path temp_dir =
138  boost::filesystem::temp_directory_path() / boost::filesystem::unique_path();
139  boost::filesystem::create_directories(temp_dir);
140  run("tar " + compression + " -xvf " + get_quoted_string(archive_path) + " " +
141  opt_occurrence + " " + file_name,
142  temp_dir.string());
143  const auto output = run("cat " + (temp_dir / file_name).string());
144  boost::filesystem::remove_all(temp_dir);
145  return output;
146 }
147 
148 inline std::string get_table_schema(const std::string& archive_path,
149  const std::string& table,
150  const std::string& compression) {
151  const auto schema_str =
152  simple_file_cat(archive_path, table_schema_filename, compression);
153  std::regex regex("@T");
154  return std::regex_replace(schema_str, regex, table);
155 }
156 
157 // Adjust column ids in chunk keys in a table's data files under a temp_data_dir,
158 // including files of all shards of the table. Can be slow for big files but should
159 // be scale faster than refragmentizing. Table altering should be rare for olap.
160 void adjust_altered_table_files(const std::string& temp_data_dir,
161  const std::unordered_map<int, int>& column_ids_map) {
162  boost::filesystem::path base_path(temp_data_dir);
163  boost::filesystem::recursive_directory_iterator end_it;
165  for (boost::filesystem::recursive_directory_iterator fit(base_path); fit != end_it;
166  ++fit) {
167  if (boost::filesystem::is_regular_file(fit->status())) {
168  const std::string file_path = fit->path().string();
169  const std::string file_name = fit->path().filename().string();
170  std::vector<std::string> tokens;
171  boost::split(tokens, file_name, boost::is_any_of("."));
172  // ref. FileMgr::init for hint of data file name layout
173  if (tokens.size() > 2 && MAPD_FILE_EXT == "." + tokens[2]) {
174  thread_controller.startThread([file_name, file_path, tokens, &column_ids_map] {
175  const auto page_size = boost::lexical_cast<int64_t>(tokens[1]);
176  const auto file_size = boost::filesystem::file_size(file_path);
177  std::unique_ptr<FILE, decltype(simple_file_closer)> fp(
178  std::fopen(file_path.c_str(), "r+"), simple_file_closer);
179  if (!fp) {
180  throw std::runtime_error("Failed to open " + file_path +
181  " for update: " + std::strerror(errno));
182  }
183  // ref. FileInfo::openExistingFile for hint of chunk header layout
184  for (size_t page = 0; page < file_size / page_size; ++page) {
185  int ints[8];
186  if (0 != std::fseek(fp.get(), page * page_size, SEEK_SET)) {
187  throw std::runtime_error("Failed to seek to page# " + std::to_string(page) +
188  file_path + " for read: " + std::strerror(errno));
189  }
190  if (1 != fread(ints, sizeof ints, 1, fp.get())) {
191  throw std::runtime_error("Failed to read " + file_path + ": " +
192  std::strerror(errno));
193  }
194  if (ints[0] > 0) { // header size
195  auto cit = column_ids_map.find(ints[3]);
196  CHECK(cit != column_ids_map.end());
197  if (ints[3] != cit->second) {
198  ints[3] = cit->second;
199  if (0 != std::fseek(fp.get(), page * page_size, SEEK_SET)) {
200  throw std::runtime_error("Failed to seek to page# " +
201  std::to_string(page) + file_path +
202  " for write: " + std::strerror(errno));
203  }
204  if (1 != fwrite(ints, sizeof ints, 1, fp.get())) {
205  throw std::runtime_error("Failed to write " + file_path + ": " +
206  std::strerror(errno));
207  }
208  }
209  }
210  }
211  });
212  thread_controller.checkThreadsStatus();
213  }
214  }
215  }
216  thread_controller.finish();
217 }
218 
220  const std::string& temp_data_dir,
221  const std::vector<std::string>& target_paths,
222  const std::string& name_prefix) {
223  boost::filesystem::path base_path(temp_data_dir);
224  boost::filesystem::directory_iterator end_it;
225  int target_path_index = 0;
226  for (boost::filesystem::directory_iterator fit(base_path); fit != end_it; ++fit) {
227  if (!boost::filesystem::is_regular_file(fit->status())) {
228  const std::string file_path = fit->path().string();
229  const std::string file_name = fit->path().filename().string();
230  if (boost::istarts_with(file_name, name_prefix)) {
231  const std::string target_path =
232  abs_path(global_file_mgr) + "/" + target_paths[target_path_index++];
233  if (std::rename(file_path.c_str(), target_path.c_str())) {
234  throw std::runtime_error("Failed to rename file " + file_path + " to " +
235  target_path + ": " + std::strerror(errno));
236  }
237  }
238  }
239  }
240 }
241 
242 } // namespace
243 
245  const std::string& archive_path,
246  const std::string& compression) {
249  if (g_cluster) {
250  throw std::runtime_error("DUMP/RESTORE is not supported yet on distributed setup.");
251  }
252  if (boost::filesystem::exists(archive_path)) {
253  throw std::runtime_error("Archive " + archive_path + " already exists.");
254  }
256  throw std::runtime_error("Dumping view or temporary table is not supported.");
257  }
258  // collect paths of files to archive
259  const auto global_file_mgr = cat_->getDataMgr().getGlobalFileMgr();
260  std::vector<std::string> file_paths;
261  auto file_writer = [&file_paths, global_file_mgr](const std::string& file_name,
262  const std::string& file_type,
263  const std::string& file_data) {
264  const auto file_path = abs_path(global_file_mgr) + "/" + file_name;
265  std::unique_ptr<FILE, decltype(simple_file_closer)> fp(
266  std::fopen(file_path.c_str(), "w"), simple_file_closer);
267  if (!fp) {
268  throw std::runtime_error("Failed to create " + file_type + " file '" + file_path +
269  "': " + std::strerror(errno));
270  }
271  if (std::fwrite(file_data.data(), 1, file_data.size(), fp.get()) < file_data.size()) {
272  throw std::runtime_error("Failed to write " + file_type + " file '" + file_path +
273  "': " + std::strerror(errno));
274  }
275  file_paths.push_back(file_name);
276  };
277  // Prevent modification of the table schema during a dump operation, while allowing
278  // concurrent inserts.
279  auto table_read_lock =
281  const auto table_name = td->tableName;
282  {
283  // - gen schema file
284  const auto schema_str = cat_->dumpSchema(td);
285  file_writer(table_schema_filename, "table schema", schema_str);
286  // - gen column-old-info file
287  const auto cds = cat_->getAllColumnMetadataForTable(td->tableId, true, true, true);
288  std::vector<std::string> column_oldinfo;
289  std::transform(cds.begin(),
290  cds.end(),
291  std::back_inserter(column_oldinfo),
292  [&](const auto cd) -> std::string {
293  return cd->columnName + ":" + std::to_string(cd->columnId) + ":" +
295  });
296  const auto column_oldinfo_str = boost::algorithm::join(column_oldinfo, " ");
297  file_writer(table_oldinfo_filename, "table old info", column_oldinfo_str);
298  // - gen table epoch
299  const auto epoch = cat_->getTableEpoch(cat_->getCurrentDB().dbId, td->tableId);
300  file_writer(table_epoch_filename, "table epoch", std::to_string(epoch));
301  // - collect table data file paths ...
302  const auto data_file_dirs = cat_->getTableDataDirectories(td);
303  file_paths.insert(file_paths.end(), data_file_dirs.begin(), data_file_dirs.end());
304  // - collect table dict file paths ...
305  const auto dict_file_dirs = cat_->getTableDictDirectories(td);
306  file_paths.insert(file_paths.end(), dict_file_dirs.begin(), dict_file_dirs.end());
307  // tar takes time. release cat lock to yield the cat to concurrent CREATE statements.
308  }
309  // run tar to archive the files ... this may take a while !!
310  run("tar " + compression + " -cvf " + get_quoted_string(archive_path) + " " +
311  boost::algorithm::join(file_paths, " "),
312  abs_path(global_file_mgr));
313 }
314 
315 // Restore data and dict files of a table from a tgz archive.
317  const TableDescriptor* td,
318  const std::string& archive_path,
319  const std::string& compression) {
322  if (g_cluster) {
323  throw std::runtime_error("DUMP/RESTORE is not supported yet on distributed setup.");
324  }
325  if (!boost::filesystem::exists(archive_path)) {
326  throw std::runtime_error("Archive " + archive_path + " does not exist.");
327  }
329  throw std::runtime_error("Restoring view or temporary table is not supported.");
330  }
331  // Obtain table schema read lock to prevent modification of the schema during
332  // restoration
333  const auto table_read_lock =
335  // prevent concurrent inserts into table during restoration
336  const auto insert_data_lock =
338 
339  // untar takes time. no grab of cat lock to yield to concurrent CREATE stmts.
340  const auto global_file_mgr = cat_->getDataMgr().getGlobalFileMgr();
341  // dirs where src files are untarred and dst files are backed up
342  constexpr static const auto temp_data_basename = "_data";
343  constexpr static const auto temp_back_basename = "_back";
344  const auto temp_data_dir = abs_path(global_file_mgr) + "/" + temp_data_basename;
345  const auto temp_back_dir = abs_path(global_file_mgr) + "/" + temp_back_basename;
346  // clean up tmp dirs and files in any case
347  auto tmp_files_cleaner = [&](void*) {
348  run("rm -rf " + temp_data_dir + " " + temp_back_dir);
349  run("rm -f " + abs_path(global_file_mgr) + "/" + table_schema_filename);
350  run("rm -f " + abs_path(global_file_mgr) + "/" + table_oldinfo_filename);
351  run("rm -f " + abs_path(global_file_mgr) + "/" + table_epoch_filename);
352  };
353  std::unique_ptr<decltype(tmp_files_cleaner), decltype(tmp_files_cleaner)> tfc(
354  &tmp_files_cleaner, tmp_files_cleaner);
355  // extract & parse schema
356  const auto schema_str = get_table_schema(archive_path, td->tableName, compression);
357  const auto create_table_stmt =
358  Parser::parseDDL<Parser::CreateTableStmt>("table schema", schema_str);
359  // verify compatibility between source and destination schemas
360  TableDescriptor src_td;
361  std::list<ColumnDescriptor> src_columns;
362  std::vector<Parser::SharedDictionaryDef> shared_dict_defs;
363  create_table_stmt->executeDryRun(session, src_td, src_columns, shared_dict_defs);
364  // - sanity check table-level compatibility
365  if (src_td.hasDeletedCol != td->hasDeletedCol) {
366  // TODO: allow the case, in which src data enables vacuum while
367  // dst doesn't, by simply discarding src $deleted column data.
368  throw std::runtime_error("Incompatible table VACCUM option");
369  }
370  if (src_td.nShards != td->nShards) {
371  // TODO: allow different shard numbers if they have a "GCD",
372  // by splitting/merging src data files before drop into dst.
373  throw std::runtime_error("Unmatched number of table shards");
374  }
375  // - sanity check column-level compatibility (based on column names)
376  const auto dst_columns =
377  cat_->getAllColumnMetadataForTable(td->tableId, false, false, false);
378  if (dst_columns.size() != src_columns.size()) {
379  throw std::runtime_error("Unmatched number of table columns");
380  }
381  for (const auto& [src_cd, dst_cd] : boost::combine(src_columns, dst_columns)) {
382  if (src_cd.columnType.get_type_name() != dst_cd->columnType.get_type_name() ||
383  src_cd.columnType.get_compression_name() !=
384  dst_cd->columnType.get_compression_name()) {
385  throw std::runtime_error("Incompatible types on column " + src_cd.columnName);
386  }
387  }
388  // extract src table column ids (ALL columns incl. system/virtual/phy geo cols)
389  const auto all_src_oldinfo_str =
390  simple_file_cat(archive_path, table_oldinfo_filename, compression);
391  std::vector<std::string> src_oldinfo_strs;
392  boost::algorithm::split(src_oldinfo_strs,
393  all_src_oldinfo_str,
394  boost::is_any_of(" "),
395  boost::token_compress_on);
396  auto all_dst_columns =
397  cat_->getAllColumnMetadataForTable(td->tableId, true, true, true);
398  if (src_oldinfo_strs.size() != all_dst_columns.size()) {
399  throw std::runtime_error("Source table has a unmatched number of columns: " +
400  std::to_string(src_oldinfo_strs.size()) + " vs " +
401  std::to_string(all_dst_columns.size()));
402  }
403  // build a map of src column ids and dst column ids, just in case src table has been
404  // ALTERed before and chunk keys of src table needs to be adjusted accordingly.
405  // note: this map is used only for the case of migrating a table and not for restoring
406  // a table. When restoring a table, the two tables must have the same column ids.
407  //
408  // also build a map of src dict paths and dst dict paths for relocating src dicts
409  std::unordered_map<int, int> column_ids_map;
410  std::unordered_map<std::string, std::string> dict_paths_map;
411  // sort inputs of transform in lexical order of column names for correct mappings
412  std::list<std::vector<std::string>> src_oldinfo_tokens;
413  std::transform(
414  src_oldinfo_strs.begin(),
415  src_oldinfo_strs.end(),
416  std::back_inserter(src_oldinfo_tokens),
417  [](const auto& src_oldinfo_str) -> auto {
418  std::vector<std::string> tokens;
420  tokens, src_oldinfo_str, boost::is_any_of(":"), boost::token_compress_on);
421  return tokens;
422  });
423  src_oldinfo_tokens.sort(
424  [](const auto& lhs, const auto& rhs) { return lhs[0].compare(rhs[0]) < 0; });
425  all_dst_columns.sort(
426  [](auto a, auto b) { return a->columnName.compare(b->columnName) < 0; });
427  // transform inputs into the maps
428  std::transform(src_oldinfo_tokens.begin(),
429  src_oldinfo_tokens.end(),
430  all_dst_columns.begin(),
431  std::inserter(column_ids_map, column_ids_map.end()),
432  [&](const auto& tokens, const auto& cd) -> std::pair<int, int> {
433  VLOG(3) << boost::algorithm::join(tokens, ":") << " ==> "
434  << cd->columnName << ":" << cd->columnId;
435  dict_paths_map[tokens[2]] = cat_->getColumnDictDirectory(cd);
436  return {boost::lexical_cast<int>(tokens[1]), cd->columnId};
437  });
438  bool was_table_altered = false;
439  std::for_each(column_ids_map.begin(), column_ids_map.end(), [&](auto& it) {
440  was_table_altered = was_table_altered || it.first != it.second;
441  });
442  VLOG(3) << "was_table_altered = " << was_table_altered;
443  // extract all data files to a temp dir. will swap with dst table dir after all set,
444  // otherwise will corrupt table in case any bad thing happens in the middle.
445  run("rm -rf " + temp_data_dir);
446  run("mkdir -p " + temp_data_dir);
447  run("tar " + compression + " -xvf " + get_quoted_string(archive_path), temp_data_dir);
448  // if table was ever altered after it was created, update column ids in chunk headers.
449  if (was_table_altered) {
450  const auto time_ms = measure<>::execution(
451  [&]() { adjust_altered_table_files(temp_data_dir, column_ids_map); });
452  VLOG(3) << "adjust_altered_table_files: " << time_ms << " ms";
453  }
454  // finally,,, swap table data/dict dirs!
455  const auto data_file_dirs = cat_->getTableDataDirectories(td);
456  const auto dict_file_dirs = cat_->getTableDictDirectories(td);
457  // move current target dirs, if exists, to backup dir
458  std::vector<std::string> both_file_dirs;
459  std::merge(data_file_dirs.begin(),
460  data_file_dirs.end(),
461  dict_file_dirs.begin(),
462  dict_file_dirs.end(),
463  std::back_inserter(both_file_dirs));
464  bool backup_completed = false;
465  try {
466  run("rm -rf " + temp_back_dir);
467  run("mkdir -p " + temp_back_dir);
468  for (const auto& dir : both_file_dirs) {
469  const auto dir_full_path = abs_path(global_file_mgr) + "/" + dir;
470  if (boost::filesystem::is_directory(dir_full_path)) {
471  run("mv " + dir_full_path + " " + temp_back_dir);
472  }
473  }
474  backup_completed = true;
475  // accord src data dirs to dst
477  cat_->getDataMgr().getGlobalFileMgr(), temp_data_dir, data_file_dirs, "table_");
478  // accord src dict dirs to dst
479  for (const auto& dit : dict_paths_map) {
480  if (!dit.first.empty() && !dit.second.empty()) {
481  const auto src_dict_path = temp_data_dir + "/" + dit.first;
482  const auto dst_dict_path = abs_path(global_file_mgr) + "/" + dit.second;
483  run("mv " + src_dict_path + " " + dst_dict_path);
484  }
485  }
486  // throw if sanity test forces a rollback
488  throw std::runtime_error("lol!");
489  }
490  } catch (...) {
491  // once backup is completed, whatever in abs_path(global_file_mgr) is the "src"
492  // dirs that are to be rolled back and discarded
493  if (backup_completed) {
494  run("rm -rf " + boost::algorithm::join(both_file_dirs, " "),
495  abs_path(global_file_mgr));
496  }
497  // complete rollback by recovering original "dst" table dirs from backup dir
498  boost::filesystem::path base_path(temp_back_dir);
499  boost::filesystem::directory_iterator end_it;
500  for (boost::filesystem::directory_iterator fit(base_path); fit != end_it; ++fit) {
501  run("mv " + fit->path().string() + " .", abs_path(global_file_mgr));
502  }
503  throw;
504  }
505  // set for reloading table from the restored/migrated files
506  const auto epoch = simple_file_cat(archive_path, table_epoch_filename, compression);
508  cat_->getCurrentDB().dbId, td->tableId, boost::lexical_cast<int>(epoch));
509 }
510 
511 // Migrate a table, which doesn't exist in current db, from a tar ball to the db.
512 // This actually creates the table and restores data/dict files from the tar ball.
514  const std::string& table_name,
515  const std::string& archive_path,
516  const std::string& compression) {
517  // replace table name and drop foreign dict references
518  const auto schema_str = get_table_schema(archive_path, table_name, compression);
519  Parser::parseDDL<Parser::CreateTableStmt>("table schema", schema_str)->execute(session);
520  try {
521  restoreTable(
522  session, cat_->getMetadataForTable(table_name), archive_path, compression);
523  } catch (...) {
524  Parser::parseDDL<Parser::DropTableStmt>("statement",
525  "DROP TABLE IF EXISTS " + table_name + ";")
526  ->execute(session);
527  throw;
528  }
529 }
std::string get_table_schema(const std::string &archive_path, const std::string &table, const std::string &compression)
static ReadLock getReadLockForTable(const Catalog_Namespace::Catalog &cat, const std::string &table_name)
Definition: LockMgrImpl.h:164
std::string getBasePath() const
static constexpr char const * table_schema_filename
std::string tableName
static TimeT::rep execution(F func, Args &&...args)
Definition: sample.cpp:29
std::string abs_path(const File_Namespace::GlobalFileMgr *global_file_mgr)
::FILE * fopen(const char *filename, const char *mode)
Definition: omnisci_fs.cpp:72
Data_Namespace::DataMgr & getDataMgr() const
Definition: Catalog.h:222
#define LOG(tag)
Definition: Logger.h:188
static WriteLock getWriteLockForTable(const Catalog_Namespace::Catalog &cat, const std::string &table_name)
Definition: LockMgrImpl.h:155
void startThread(FuncType &&func, Args &&...args)
void adjust_altered_table_files(const std::string &temp_data_dir, const std::unordered_map< int, int > &column_ids_map)
std::string join(T const &container, std::string const &delim)
#define MAPD_FILE_EXT
Definition: File.h:25
static constexpr char const * table_oldinfo_filename
int32_t getTableEpoch(const int32_t db_id, const int32_t table_id) const
Definition: Catalog.cpp:2741
std::vector< std::string > getTableDataDirectories(const TableDescriptor *td) const
Definition: Catalog.cpp:4131
void restoreTable(const Catalog_Namespace::SessionInfo &session, const TableDescriptor *td, const std::string &archive_path, const std::string &compression)
std::string to_string(char const *&&v)
std::vector< std::string > split(std::string_view str, std::string_view delim, std::optional< size_t > maxsplit)
split apart a string into a vector of substrings
std::string getColumnDictDirectory(const ColumnDescriptor *cd) const
Definition: Catalog.cpp:4145
std::string get_quoted_string(const std::string &filename, char quote, char escape)
Quote a string while escaping any existing quotes in the string.
const DBMetadata & getCurrentDB() const
Definition: Catalog.h:221
void rename_table_directories(const File_Namespace::GlobalFileMgr *global_file_mgr, const std::string &temp_data_dir, const std::vector< std::string > &target_paths, const std::string &name_prefix)
File_Namespace::GlobalFileMgr * getGlobalFileMgr() const
Definition: DataMgr.cpp:528
void validate_allowed_file_path(const std::string &file_path, const DataTransferType data_transfer_type, const bool allow_wildcards)
Definition: DdlUtils.cpp:613
void setTableEpoch(const int db_id, const int table_id, const int new_epoch)
Definition: Catalog.cpp:2792
std::vector< std::string > getTableDictDirectories(const TableDescriptor *td) const
Definition: Catalog.cpp:4161
std::list< const ColumnDescriptor * > getAllColumnMetadataForTable(const int tableId, const bool fetchSystemColumns, const bool fetchVirtualColumns, const bool fetchPhysicalColumns) const
Returns a list of pointers to constant ColumnDescriptor structs for all the columns from a particular...
Definition: Catalog.cpp:1716
Data_Namespace::MemoryLevel persistenceLevel
static constexpr char const * table_epoch_filename
#define CHECK(condition)
Definition: Logger.h:197
bool g_cluster
const TableDescriptor * getMetadataForTable(const std::string &tableName, const bool populateFragmenter=true) const
Returns a pointer to a const TableDescriptor struct matching the provided tableName.
static bool run
std::string simple_file_cat(const std::string &archive_path, const std::string &file_name, const std::string &compression)
std::string dumpSchema(const TableDescriptor *td) const
Definition: Catalog.cpp:4177
int cpu_threads()
Definition: thread_count.h:24
void dumpTable(const TableDescriptor *td, const std::string &archive_path, const std::string &compression)
A selection of helper methods for File I/O.
#define VLOG(n)
Definition: Logger.h:291
bool g_test_rollback_dump_restore
Catalog_Namespace::Catalog * cat_
Definition: TableArchiver.h:43
size_t file_size(const int fd)
Definition: omnisci_fs.cpp:31