OmniSciDB  a5dc49c757
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
MetaConnect.java
Go to the documentation of this file.
1 /*
2  * Copyright 2022 HEAVY.AI, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 package com.mapd.metadata;
18 
19 import com.google.common.collect.ImmutableList;
20 import com.google.gson.Gson;
21 import com.google.gson.JsonArray;
22 import com.google.gson.JsonElement;
23 import com.google.gson.JsonObject;
29 
30 import org.apache.calcite.schema.Table;
31 import org.apache.thrift.TException;
32 import org.apache.thrift.protocol.TBinaryProtocol;
33 import org.apache.thrift.protocol.TProtocol;
34 import org.apache.thrift.transport.TServerSocket;
35 import org.apache.thrift.transport.TSocket;
36 import org.apache.thrift.transport.TTransport;
37 import org.apache.thrift.transport.TTransportException;
38 import org.slf4j.Logger;
39 import org.slf4j.LoggerFactory;
40 
41 import java.io.File;
42 import java.io.FileInputStream;
43 import java.io.IOException;
44 import java.sql.Connection;
45 import java.sql.DriverManager;
46 import java.sql.ResultSet;
47 import java.sql.SQLException;
48 import java.sql.Statement;
49 import java.util.ArrayList;
50 import java.util.HashSet;
51 import java.util.List;
52 import java.util.Map;
53 import java.util.Map.Entry;
54 import java.util.Set;
55 import java.util.concurrent.ConcurrentHashMap;
56 
57 import ai.heavy.thrift.server.Heavy;
58 import ai.heavy.thrift.server.TColumnType;
59 import ai.heavy.thrift.server.TDBException;
60 import ai.heavy.thrift.server.TDBInfo;
61 import ai.heavy.thrift.server.TDatumType;
62 import ai.heavy.thrift.server.TEncodingType;
63 import ai.heavy.thrift.server.TTableDetails;
64 import ai.heavy.thrift.server.TTypeInfo;
65 
66 public class MetaConnect {
67  final static Logger HEAVYDBLOGGER = LoggerFactory.getLogger(MetaConnect.class);
68  private final String dataDir;
69  private final String default_db;
70  private final HeavyDBUser currentUser;
71  private final int dbPort;
72  private Connection catConn;
73  private final HeavyDBParser parser;
74 
75  private static final int KBOOLEAN = 1;
76  private static final int KCHAR = 2;
77  private static final int KVARCHAR = 3;
78  private static final int KNUMERIC = 4;
79  private static final int KDECIMAL = 5;
80  private static final int KINT = 6;
81  private static final int KSMALLINT = 7;
82  private static final int KFLOAT = 8;
83  private static final int KDOUBLE = 9;
84  private static final int KTIME = 10;
85  private static final int KTIMESTAMP = 11;
86  private static final int KBIGINT = 12;
87  private static final int KTEXT = 13;
88  private static final int KDATE = 14;
89  private static final int KARRAY = 15;
90  private static final int KINTERVAL_DAY_TIME = 16;
91  private static final int KINTERVAL_YEAR_MONTH = 17;
92  private static final int KPOINT = 18;
93  private static final int KLINESTRING = 19;
94  private static final int KPOLYGON = 20;
95  private static final int KMULTIPOLYGON = 21;
96  private static final int KTINYINT = 22;
97  private static final int KMULTILINESTRING = 30;
98  private static final int KMULTIPOINT = 31;
99 
100  private static final String CATALOG_DIR_NAME = "catalogs";
101  private static volatile Map<String, Set<String>> DATABASE_TO_TABLES =
102  new ConcurrentHashMap<>();
103  private static volatile Map<List<String>, Table> DB_TABLE_DETAILS =
104  new ConcurrentHashMap<>();
106 
107  public MetaConnect(int dbPort,
108  String dataDir,
109  HeavyDBUser currentHeavyDBUser,
112  String db) {
113  this.dataDir = dataDir;
114  if (db != null) {
115  this.default_db = db;
116  } else {
117  if (currentHeavyDBUser != null) {
118  this.default_db = currentHeavyDBUser.getDB();
119  } else {
120  this.default_db = null;
121  }
122  }
123  this.currentUser = currentHeavyDBUser;
124  this.dbPort = dbPort;
125  this.parser = parser;
126  this.sock_transport_properties = skT;
127 
128  // check to see if we have a populated DATABASE_TO_TABLES structure
129  // first time in we need to make sure this gets populated
130  // It is OK to use a MetaConnect without a user
131  // but it should not attempt to populate the DB
132  if (currentUser != null && DATABASE_TO_TABLES.size() == 0) {
133  // get all databases
135  }
136  }
137 
138  public MetaConnect(int dbPort,
139  String dataDir,
140  HeavyDBUser currentHeavyDBUser,
143  this(dbPort, dataDir, currentHeavyDBUser, parser, skT, null);
144  }
145 
146  public List<String> getDatabases() {
147  List<String> dbList = new ArrayList<String>(DATABASE_TO_TABLES.size());
148  for (String db : DATABASE_TO_TABLES.keySet()) {
149  dbList.add(db);
150  }
151  return dbList;
152  }
153 
154  private void connectToCatalog(String catalog) {
155  try {
156  // try {
157  Class.forName("org.sqlite.JDBC");
158  } catch (ClassNotFoundException ex) {
159  String err = "Could not find class for metadata connection; DB: '" + catalog
160  + "' data dir '" + dataDir + "', error was " + ex.getMessage();
161  HEAVYDBLOGGER.error(err);
162  throw new RuntimeException(err);
163  }
164  String connectURL = "jdbc:sqlite:" + dataDir + "/" + CATALOG_DIR_NAME + "/"
165  + getCatalogFileName(catalog);
166  try {
167  catConn = DriverManager.getConnection(connectURL);
168  } catch (SQLException ex) {
169  String err = "Could not establish a connection for metadata; DB: '" + catalog
170  + "' data dir '" + dataDir + "', error was " + ex.getMessage();
171  HEAVYDBLOGGER.error(err);
172  throw new RuntimeException(err);
173  }
174  HEAVYDBLOGGER.debug("Opened database successfully");
175  }
176 
177  String getCatalogFileName(String catalog) {
178  String path = dataDir + "/" + CATALOG_DIR_NAME;
179  File directory = new File(path);
180  if (!directory.isDirectory()) {
181  throw new RuntimeException("Catalog directory not found at: " + path);
182  }
183  for (File file : directory.listFiles()) {
184  if (file.getName().equalsIgnoreCase(catalog)) {
185  return file.getName();
186  }
187  }
188  throw new RuntimeException("Database file not found for: " + catalog);
189  }
190 
191  private void disconnectFromCatalog() {
192  try {
193  catConn.close();
194  } catch (SQLException ex) {
195  String err = "Could not disconnect from metadata "
196  + " data dir '" + dataDir + "', error was " + ex.getMessage();
197  HEAVYDBLOGGER.error(err);
198  throw new RuntimeException(err);
199  }
200  }
201 
202  private void connectToDBCatalog() {
204  }
205 
206  public Table getTable(String tableName) {
207  List<String> dbTable =
208  ImmutableList.of(default_db.toUpperCase(), tableName.toUpperCase());
209  Table cTable = DB_TABLE_DETAILS.get(dbTable);
210  if (cTable != null) {
211  HEAVYDBLOGGER.debug("Metaconnect DB " + default_db + " get table " + tableName
212  + " details " + cTable);
213  return cTable;
214  }
215 
216  TTableDetails td = get_table_details(tableName);
217 
218  if (td.getView_sql() == null || td.getView_sql().isEmpty()) {
219  HEAVYDBLOGGER.debug("Processing a table");
220  Table rTable = new HeavyDBTable(td);
221  DB_TABLE_DETAILS.putIfAbsent(dbTable, rTable);
222  HEAVYDBLOGGER.debug("Metaconnect DB " + default_db + " get table " + tableName
223  + " details " + rTable + " Not in buffer");
224  return rTable;
225  } else {
226  HEAVYDBLOGGER.debug("Processing a view");
227  Table rTable = new HeavyDBView(getViewSql(tableName), td, parser);
228  DB_TABLE_DETAILS.putIfAbsent(dbTable, rTable);
229  HEAVYDBLOGGER.debug("Metaconnect DB " + default_db + " get view " + tableName
230  + " details " + rTable + " Not in buffer");
231  return rTable;
232  }
233  }
234 
235  public Set<String> getTables() {
236  Set<String> mSet = DATABASE_TO_TABLES.get(default_db.toUpperCase());
237  if (mSet != null && mSet.size() > 0) {
238  HEAVYDBLOGGER.debug("Metaconnect DB getTables " + default_db + " tables " + mSet);
239  return mSet;
240  }
241 
242  if (dbPort == -1) {
243  // use sql
245  Set<String> ts = getTables_SQL();
247  DATABASE_TO_TABLES.put(default_db.toUpperCase(), ts);
248  HEAVYDBLOGGER.debug(
249  "Metaconnect DB getTables " + default_db + " tables " + ts + " from catDB");
250  return ts;
251  }
252  // use thrift direct to local server
253  try {
254  TProtocol protocol = null;
255  TTransport transport =
256  sock_transport_properties.openClientTransport("localhost", dbPort);
257  if (!transport.isOpen()) transport.open();
258  protocol = new TBinaryProtocol(transport);
259 
260  Heavy.Client client = new Heavy.Client(protocol);
261  List<String> tablesList =
262  client.get_tables_for_database(currentUser.getSession(), default_db);
263  Set<String> ts = new HashSet<String>(tablesList.size());
264  for (String tableName : tablesList) {
265  ts.add(tableName);
266  }
267 
268  transport.close();
269  DATABASE_TO_TABLES.put(default_db.toUpperCase(), ts);
270  HEAVYDBLOGGER.debug("Metaconnect DB getTables " + default_db + " tables " + ts
271  + " from server");
272  return ts;
273 
274  } catch (TTransportException ex) {
275  HEAVYDBLOGGER.error("TTransportException on port [" + dbPort + "]");
276  HEAVYDBLOGGER.error(ex.toString());
277  throw new RuntimeException(ex.toString());
278  } catch (TDBException ex) {
279  HEAVYDBLOGGER.error(ex.getError_msg());
280  throw new RuntimeException(ex.getError_msg());
281  } catch (TException ex) {
282  HEAVYDBLOGGER.error(ex.toString());
283  throw new RuntimeException(ex.toString());
284  }
285  }
286 
287  private Set<String> getTables_SQL() {
289  Set<String> tableSet = new HashSet<String>();
290  Statement stmt = null;
291  ResultSet rs = null;
292  String sqlText = "";
293  try {
294  stmt = catConn.createStatement();
295 
296  // get the tables
297  rs = stmt.executeQuery("SELECT name FROM mapd_tables ");
298  while (rs.next()) {
299  tableSet.add(rs.getString("name"));
300  /*--*/
301  HEAVYDBLOGGER.debug("Object name = " + rs.getString("name"));
302  }
303  rs.close();
304  stmt.close();
305 
306  } catch (Exception e) {
307  String err = "error trying to get all the tables, error was " + e.getMessage();
308  HEAVYDBLOGGER.error(err);
309  throw new RuntimeException(err);
310  }
312 
313  try {
314  // open temp table json file
315  final String filePath =
316  dataDir + "/" + CATALOG_DIR_NAME + "/" + default_db + "_temp_tables.json";
317  HEAVYDBLOGGER.debug("Opening temp table file at " + filePath);
318  String tempTablesJsonStr;
319  try {
320  File tempTablesFile = new File(filePath);
321  FileInputStream tempTablesStream = new FileInputStream(tempTablesFile);
322  byte[] data = new byte[(int) tempTablesFile.length()];
323  tempTablesStream.read(data);
324  tempTablesStream.close();
325 
326  tempTablesJsonStr = new String(data, "UTF-8");
327  } catch (java.io.FileNotFoundException e) {
328  return tableSet;
329  }
330 
331  Gson gson = new Gson();
332  JsonObject fileParentObject = gson.fromJson(tempTablesJsonStr, JsonObject.class);
333  for (Entry<String, JsonElement> member : fileParentObject.entrySet()) {
334  String tableName = member.getKey();
335  tableSet.add(tableName);
336  /*--*/
337  HEAVYDBLOGGER.debug("Temp table object name = " + tableName);
338  }
339 
340  } catch (Exception e) {
341  String err = "error trying to load temporary tables from json file, error was "
342  + e.getMessage();
343  HEAVYDBLOGGER.error(err);
344  throw new RuntimeException(err);
345  }
346 
347  return tableSet;
348  }
349 
350  public TTableDetails get_table_details(String tableName) {
351  if (dbPort == -1) {
352  // use sql
354  TTableDetails td = get_table_detail_SQL(tableName);
356  return td;
357  }
358  try {
359  // use thrift direct to local server
360  TProtocol protocol = null;
361 
362  TTransport transport =
363  sock_transport_properties.openClientTransport("localhost", dbPort);
364  if (!transport.isOpen()) transport.open();
365  protocol = new TBinaryProtocol(transport);
366 
367  Heavy.Client client = new Heavy.Client(protocol);
368  TTableDetails td = client.get_internal_table_details_for_database(
369  currentUser.getSession(), tableName, default_db);
370  transport.close();
371 
372  return td;
373  } catch (TTransportException ex) {
374  HEAVYDBLOGGER.error(ex.toString());
375  throw new RuntimeException(ex.toString());
376  } catch (TDBException ex) {
377  HEAVYDBLOGGER.error(ex.getError_msg());
378  throw new RuntimeException(ex.getError_msg());
379  } catch (TException ex) {
380  HEAVYDBLOGGER.error(ex.toString());
381  throw new RuntimeException(ex.toString());
382  }
383  }
384 
385  public static final int get_physical_cols(int type) {
386  switch (type) {
387  case KPOINT:
388  return 1; // coords
389  case KMULTIPOINT:
390  case KLINESTRING:
391  return 2; // coords, bounds
392  case KMULTILINESTRING:
393  return 3; // coords, linestring_sizes, bounds
394  case KPOLYGON:
395  return 3; // coords, ring_sizes, bounds
396  case KMULTIPOLYGON:
397  return 4; // coords, ring_sizes, poly_rings, bounds
398  default:
399  break;
400  }
401  return 0;
402  }
403 
404  public static final boolean is_geometry(int type) {
405  return type == KPOINT || type == KLINESTRING || type == KMULTILINESTRING
406  || type == KPOLYGON || type == KMULTIPOLYGON || type == KMULTIPOINT;
407  }
408 
409  private TTableDetails get_table_detail_SQL(String tableName) {
410  TTableDetails td = new TTableDetails();
411  td.getRow_descIterator();
412  int id = getTableId(tableName);
413  if (id == -1) {
414  try {
415  // need to mark it as temporary table
416  TTableDetails tempTableTd = get_table_detail_JSON(tableName);
417  tempTableTd.is_temporary = true;
418  return tempTableTd;
419  } catch (Exception e) {
420  String err =
421  "Table '" + tableName + "' does not exist for DB '" + default_db + "'";
422  HEAVYDBLOGGER.error(err);
423  throw new RuntimeException(err);
424  }
425  }
426 
427  // read data from table
428  Statement stmt = null;
429  ResultSet rs = null;
430  try {
431  stmt = catConn.createStatement();
432  HEAVYDBLOGGER.debug("table id is " + id);
433  HEAVYDBLOGGER.debug("table name is " + tableName);
434  String query = String.format(
435  "SELECT * FROM mapd_columns where tableid = %d and not is_deletedcol order by columnid;",
436  id);
437  HEAVYDBLOGGER.debug(query);
438  rs = stmt.executeQuery(query);
439  int skip_physical_cols = 0;
440  while (rs.next()) {
441  String colName = rs.getString("name");
442  HEAVYDBLOGGER.debug("name = " + colName);
443  int colType = rs.getInt("coltype");
444  HEAVYDBLOGGER.debug("coltype = " + colType);
445  int colSubType = rs.getInt("colsubtype");
446  HEAVYDBLOGGER.debug("colsubtype = " + colSubType);
447  int compression = rs.getInt("compression");
448  HEAVYDBLOGGER.debug("compression = " + compression);
449  int compression_param = rs.getInt("comp_param");
450  HEAVYDBLOGGER.debug("comp_param = " + compression_param);
451  int size = rs.getInt("size");
452  HEAVYDBLOGGER.debug("size = " + size);
453  int colDim = rs.getInt("coldim");
454  HEAVYDBLOGGER.debug("coldim = " + colDim);
455  int colScale = rs.getInt("colscale");
456  HEAVYDBLOGGER.debug("colscale = " + colScale);
457  boolean isNotNull = rs.getBoolean("is_notnull");
458  HEAVYDBLOGGER.debug("is_notnull = " + isNotNull);
459  boolean isSystemCol = rs.getBoolean("is_systemcol");
460  HEAVYDBLOGGER.debug("is_systemcol = " + isSystemCol);
461  boolean isVirtualCol = rs.getBoolean("is_virtualcol");
462  HEAVYDBLOGGER.debug("is_vitrualcol = " + isVirtualCol);
463  HEAVYDBLOGGER.debug("");
464  TColumnType tct = new TColumnType();
465  TTypeInfo tti = new TTypeInfo();
466  TDatumType tdt;
467 
468  if (colType == KARRAY) {
469  tti.is_array = true;
470  tdt = typeToThrift(colSubType);
471  } else {
472  tti.is_array = false;
473  tdt = typeToThrift(colType);
474  }
475 
476  tti.nullable = !isNotNull;
477  tti.encoding = encodingToThrift(compression);
478  tti.comp_param = compression_param;
479  tti.size = size;
480  tti.type = tdt;
481  tti.scale = colScale;
482  tti.precision = colDim;
483 
484  tct.col_name = colName;
485  tct.col_type = tti;
486  tct.is_system = isSystemCol;
487 
488  if (skip_physical_cols <= 0) skip_physical_cols = get_physical_cols(colType);
489  if (is_geometry(colType) || skip_physical_cols-- <= 0) td.addToRow_desc(tct);
490  }
491  } catch (Exception e) {
492  String err = "error trying to read from mapd_columns, error was " + e.getMessage();
493  HEAVYDBLOGGER.error(err);
494  throw new RuntimeException(err);
495  } finally {
496  if (rs != null) {
497  try {
498  rs.close();
499  } catch (SQLException ex) {
500  String err = "Could not close resultset, error was " + ex.getMessage();
501  HEAVYDBLOGGER.error(err);
502  throw new RuntimeException(err);
503  }
504  }
505  if (stmt != null) {
506  try {
507  stmt.close();
508  } catch (SQLException ex) {
509  String err = "Could not close stmt, error was " + ex.getMessage();
510  HEAVYDBLOGGER.error(err);
511  throw new RuntimeException(err);
512  }
513  }
514  }
515  if (isView(tableName)) {
516  td.setView_sqlIsSet(true);
517  td.setView_sql(getViewSqlViaSql(id));
518  }
519  return td;
520  }
521 
522  private TTableDetails get_table_detail_JSON(String tableName)
523  throws IOException, RuntimeException {
524  TTableDetails td = new TTableDetails();
525  td.getRow_descIterator();
526 
527  // open table json file
528  final String filePath =
529  dataDir + "/" + CATALOG_DIR_NAME + "/" + default_db + "_temp_tables.json";
530  HEAVYDBLOGGER.debug("Opening temp table file at " + filePath);
531 
532  String tempTablesJsonStr;
533  try {
534  File tempTablesFile = new File(filePath);
535  FileInputStream tempTablesStream = new FileInputStream(tempTablesFile);
536  byte[] data = new byte[(int) tempTablesFile.length()];
537  tempTablesStream.read(data);
538  tempTablesStream.close();
539 
540  tempTablesJsonStr = new String(data, "UTF-8");
541  } catch (java.io.FileNotFoundException e) {
542  throw new RuntimeException("Failed to read temporary tables file.");
543  }
544 
545  Gson gson = new Gson();
546  JsonObject fileParentObject = gson.fromJson(tempTablesJsonStr, JsonObject.class);
547  if (fileParentObject == null) {
548  throw new IOException("Malformed temporary tables file.");
549  }
550 
551  JsonObject tableObject = fileParentObject.getAsJsonObject(tableName);
552  if (tableObject == null) {
553  throw new RuntimeException(
554  "Failed to find table " + tableName + " in temporary tables file.");
555  }
556 
557  String jsonTableName = tableObject.get("name").getAsString();
558  assert (tableName == jsonTableName);
559  int id = tableObject.get("id").getAsInt();
560  HEAVYDBLOGGER.debug("table id is " + id);
561  HEAVYDBLOGGER.debug("table name is " + tableName);
562 
563  JsonArray jsonColumns = tableObject.getAsJsonArray("columns");
564  assert (jsonColumns != null);
565 
566  int skip_physical_cols = 0;
567  for (JsonElement columnElement : jsonColumns) {
568  JsonObject columnObject = columnElement.getAsJsonObject();
569 
570  String colName = columnObject.get("name").getAsString();
571  HEAVYDBLOGGER.debug("name = " + colName);
572  int colType = columnObject.get("coltype").getAsInt();
573  HEAVYDBLOGGER.debug("coltype = " + colType);
574  int colSubType = columnObject.get("colsubtype").getAsInt();
575  HEAVYDBLOGGER.debug("colsubtype = " + colSubType);
576  int compression = columnObject.get("compression").getAsInt();
577  HEAVYDBLOGGER.debug("compression = " + compression);
578  int compression_param = columnObject.get("comp_param").getAsInt();
579  HEAVYDBLOGGER.debug("comp_param = " + compression_param);
580  int size = columnObject.get("size").getAsInt();
581  HEAVYDBLOGGER.debug("size = " + size);
582  int colDim = columnObject.get("coldim").getAsInt();
583  HEAVYDBLOGGER.debug("coldim = " + colDim);
584  int colScale = columnObject.get("colscale").getAsInt();
585  HEAVYDBLOGGER.debug("colscale = " + colScale);
586  boolean isNotNull = columnObject.get("is_notnull").getAsBoolean();
587  HEAVYDBLOGGER.debug("is_notnull = " + isNotNull);
588  boolean isSystemCol = columnObject.get("is_systemcol").getAsBoolean();
589  HEAVYDBLOGGER.debug("is_systemcol = " + isSystemCol);
590  boolean isVirtualCol = columnObject.get("is_virtualcol").getAsBoolean();
591  HEAVYDBLOGGER.debug("is_vitrualcol = " + isVirtualCol);
592  boolean isDeletedCol = columnObject.get("is_deletedcol").getAsBoolean();
593  HEAVYDBLOGGER.debug("is_deletedcol = " + isDeletedCol);
594  HEAVYDBLOGGER.debug("");
595 
596  if (isDeletedCol) {
597  HEAVYDBLOGGER.debug("Skipping delete column.");
598  continue;
599  }
600 
601  TColumnType tct = new TColumnType();
602  TTypeInfo tti = new TTypeInfo();
603  TDatumType tdt;
604 
605  if (colType == KARRAY) {
606  tti.is_array = true;
607  tdt = typeToThrift(colSubType);
608  } else {
609  tti.is_array = false;
610  tdt = typeToThrift(colType);
611  }
612 
613  tti.nullable = !isNotNull;
614  tti.encoding = encodingToThrift(compression);
615  tti.comp_param = compression_param;
616  tti.size = size;
617  tti.type = tdt;
618  tti.scale = colScale;
619  tti.precision = colDim;
620 
621  tct.col_name = colName;
622  tct.col_type = tti;
623  tct.is_system = isSystemCol;
624 
625  if (skip_physical_cols <= 0) skip_physical_cols = get_physical_cols(colType);
626  if (is_geometry(colType) || skip_physical_cols-- <= 0) td.addToRow_desc(tct);
627  }
628 
629  return td;
630  }
631 
632  private int getTableId(String tableName) {
633  Statement stmt = null;
634  ResultSet rs = null;
635  int tableId = -1;
636  try {
637  stmt = catConn.createStatement();
638  rs = stmt.executeQuery(String.format(
639  "SELECT tableid FROM mapd_tables where name = '%s' COLLATE NOCASE;",
640  tableName));
641  while (rs.next()) {
642  tableId = rs.getInt("tableid");
643  HEAVYDBLOGGER.debug("tableId = " + tableId);
644  HEAVYDBLOGGER.debug("");
645  }
646  rs.close();
647  stmt.close();
648  } catch (Exception e) {
649  String err = "Error trying to read from metadata table mapd_tables;DB: "
650  + default_db + " data dir " + dataDir + ", error was " + e.getMessage();
651  HEAVYDBLOGGER.error(err);
652  throw new RuntimeException(err);
653  } finally {
654  if (rs != null) {
655  try {
656  rs.close();
657  } catch (SQLException ex) {
658  String err = "Could not close resultset, error was " + ex.getMessage();
659  HEAVYDBLOGGER.error(err);
660  throw new RuntimeException(err);
661  }
662  }
663  if (stmt != null) {
664  try {
665  stmt.close();
666  } catch (SQLException ex) {
667  String err = "Could not close stmt, error was " + ex.getMessage();
668  HEAVYDBLOGGER.error(err);
669  throw new RuntimeException(err);
670  }
671  }
672  }
673  return (tableId);
674  }
675 
676  private boolean isView(String tableName) {
677  Statement stmt;
678  ResultSet rs;
679  int viewFlag = 0;
680  try {
681  stmt = catConn.createStatement();
682  rs = stmt.executeQuery(String.format(
683  "SELECT isview FROM mapd_tables where name = '%s' COLLATE NOCASE;",
684  tableName));
685  while (rs.next()) {
686  viewFlag = rs.getInt("isview");
687  HEAVYDBLOGGER.debug("viewFlag = " + viewFlag);
688  HEAVYDBLOGGER.debug("");
689  }
690  rs.close();
691  stmt.close();
692  } catch (Exception e) {
693  String err = "error trying to read from mapd_views, error was " + e.getMessage();
694  HEAVYDBLOGGER.error(err);
695  throw new RuntimeException(err);
696  }
697  return (viewFlag == 1);
698  }
699 
700  private String getViewSql(String tableName) {
701  String sqlText;
702  if (dbPort == -1) {
703  // use sql
705  sqlText = getViewSqlViaSql(getTableId(tableName));
707  } else {
708  // use thrift direct to local server
709  try {
710  TProtocol protocol = null;
711 
712  TTransport transport =
713  sock_transport_properties.openClientTransport("localhost", dbPort);
714  if (!transport.isOpen()) transport.open();
715  protocol = new TBinaryProtocol(transport);
716 
717  Heavy.Client client = new Heavy.Client(protocol);
718  TTableDetails td = client.get_table_details_for_database(
719  currentUser.getSession(), tableName, default_db);
720  transport.close();
721 
722  sqlText = td.getView_sql();
723 
724  } catch (TTransportException ex) {
725  HEAVYDBLOGGER.error(ex.toString());
726  throw new RuntimeException(ex.toString());
727  } catch (TDBException ex) {
728  HEAVYDBLOGGER.error(ex.getError_msg());
729  throw new RuntimeException(ex.getError_msg());
730  } catch (TException ex) {
731  HEAVYDBLOGGER.error(ex.toString());
732  throw new RuntimeException(ex.toString());
733  }
734  }
735  /* return string without the sqlite's trailing semicolon */
736  if (sqlText.charAt(sqlText.length() - 1) == ';') {
737  return (sqlText.substring(0, sqlText.length() - 1));
738  } else {
739  return (sqlText);
740  }
741  }
742 
743  // we assume there is already a DB connection here
744  private String getViewSqlViaSql(int tableId) {
745  Statement stmt;
746  ResultSet rs;
747  String sqlText = "";
748  try {
749  stmt = catConn.createStatement();
750  rs = stmt.executeQuery(String.format(
751  "SELECT sql FROM mapd_views where tableid = '%s' COLLATE NOCASE;",
752  tableId));
753  while (rs.next()) {
754  sqlText = rs.getString("sql");
755  HEAVYDBLOGGER.debug("View definition = " + sqlText);
756  HEAVYDBLOGGER.debug("");
757  }
758  rs.close();
759  stmt.close();
760  } catch (Exception e) {
761  String err = "error trying to read from mapd_views, error was " + e.getMessage();
762  HEAVYDBLOGGER.error(err);
763  throw new RuntimeException(err);
764  }
765  if (sqlText == null || sqlText.length() == 0) {
766  String err = "No view text found";
767  HEAVYDBLOGGER.error(err);
768  throw new RuntimeException(err);
769  }
770  return sqlText;
771  }
772 
773  private TDatumType typeToThrift(int type) {
774  switch (type) {
775  case KBOOLEAN:
776  return TDatumType.BOOL;
777  case KTINYINT:
778  return TDatumType.TINYINT;
779  case KSMALLINT:
780  return TDatumType.SMALLINT;
781  case KINT:
782  return TDatumType.INT;
783  case KBIGINT:
784  return TDatumType.BIGINT;
785  case KFLOAT:
786  return TDatumType.FLOAT;
787  case KNUMERIC:
788  case KDECIMAL:
789  return TDatumType.DECIMAL;
790  case KDOUBLE:
791  return TDatumType.DOUBLE;
792  case KTEXT:
793  case KVARCHAR:
794  case KCHAR:
795  return TDatumType.STR;
796  case KTIME:
797  return TDatumType.TIME;
798  case KTIMESTAMP:
799  return TDatumType.TIMESTAMP;
800  case KDATE:
801  return TDatumType.DATE;
802  case KINTERVAL_DAY_TIME:
803  return TDatumType.INTERVAL_DAY_TIME;
805  return TDatumType.INTERVAL_YEAR_MONTH;
806  case KPOINT:
807  return TDatumType.POINT;
808  case KMULTIPOINT:
809  return TDatumType.MULTIPOINT;
810  case KLINESTRING:
811  return TDatumType.LINESTRING;
812  case KMULTILINESTRING:
813  return TDatumType.MULTILINESTRING;
814  case KPOLYGON:
815  return TDatumType.POLYGON;
816  case KMULTIPOLYGON:
817  return TDatumType.MULTIPOLYGON;
818  default:
819  return null;
820  }
821  }
822 
823  private TEncodingType encodingToThrift(int comp) {
824  switch (comp) {
825  case 0:
826  return TEncodingType.NONE;
827  case 1:
828  return TEncodingType.FIXED;
829  case 2:
830  return TEncodingType.RL;
831  case 3:
832  return TEncodingType.DIFF;
833  case 4:
834  return TEncodingType.DICT;
835  case 5:
836  return TEncodingType.SPARSE;
837  case 6:
838  return TEncodingType.GEOINT;
839  case 7:
840  return TEncodingType.DATE_IN_DAYS;
841  default:
842  return null;
843  }
844  }
845 
846  private void populateDatabases() {
847  // TODO 13 Mar 2021 MAT
848  // this probably has to come across from the server on first start up rather
849  // than lazy instantiation here
850  // as a user may not be able to see all schemas and this sets it for the life
851  // of the server.
852  // Proceeding this way as a WIP
853  if (dbPort == 0) {
854  // seems to be a condition that is expected
855  // for FSI testing
856  return;
857  }
858  if (dbPort == -1) {
859  // use sql
860  connectToCatalog("system_catalog"); // hardcoded sys catalog
861  Set<String> dbNames = getDatabases_SQL();
863  for (String dbName : dbNames) {
864  Set<String> ts = new HashSet<String>();
865  DATABASE_TO_TABLES.putIfAbsent(dbName.toUpperCase(), ts);
866  }
867  return;
868  }
869  // use thrift direct to local server
870  try {
871  TProtocol protocol = null;
872  TTransport transport =
873  sock_transport_properties.openClientTransport("localhost", dbPort);
874  if (!transport.isOpen()) transport.open();
875  protocol = new TBinaryProtocol(transport);
876 
877  Heavy.Client client = new Heavy.Client(protocol);
878 
879  List<TDBInfo> dbList = client.get_databases(currentUser.getSession());
880  for (TDBInfo dbInfo : dbList) {
881  Set<String> ts = new HashSet<String>();
882  DATABASE_TO_TABLES.putIfAbsent(dbInfo.db_name.toUpperCase(), ts);
883  }
884  transport.close();
885 
886  } catch (TTransportException ex) {
887  HEAVYDBLOGGER.error("TTransportException on port [" + dbPort + "]");
888  HEAVYDBLOGGER.error(ex.toString());
889  throw new RuntimeException(ex.toString());
890  } catch (TDBException ex) {
891  HEAVYDBLOGGER.error(ex.getError_msg());
892  throw new RuntimeException(ex.getError_msg());
893  } catch (TException ex) {
894  HEAVYDBLOGGER.error(ex.toString());
895  throw new RuntimeException(ex.toString());
896  }
897  }
898 
899  private Set<String> getDatabases_SQL() {
900  Set<String> dbSet = new HashSet<String>();
901  Statement stmt = null;
902  ResultSet rs = null;
903  String sqlText = "";
904  try {
905  stmt = catConn.createStatement();
906 
907  // get the tables
908  rs = stmt.executeQuery("SELECT name FROM mapd_databases ");
909  while (rs.next()) {
910  dbSet.add(rs.getString("name"));
911  /*--*/
912  HEAVYDBLOGGER.debug("Object name = " + rs.getString("name"));
913  }
914  rs.close();
915  stmt.close();
916 
917  } catch (Exception e) {
918  String err = "error trying to get all the databases, error was " + e.getMessage();
919  HEAVYDBLOGGER.error(err);
920  throw new RuntimeException(err);
921  }
922  return dbSet;
923  }
924 
925  public void updateMetaData(String schema, String table) {
926  // Check if table is specified, if not we are dropping an entire DB so need to
927  // remove all tables for that DB
928  if (table.equals("")) {
929  // Drop db and all tables
930  // iterate through all and remove matching schema
931  Set<List<String>> all = new HashSet<>(DB_TABLE_DETAILS.keySet());
932  for (List<String> keys : all) {
933  if (keys.get(0).equals(schema.toUpperCase())) {
934  HEAVYDBLOGGER.debug(
935  "removing all for schema " + keys.get(0) + " table " + keys.get(1));
936  DB_TABLE_DETAILS.remove(keys);
937  }
938  }
939  } else {
940  HEAVYDBLOGGER.debug("removing schema " + schema.toUpperCase() + " table "
941  + table.toUpperCase());
942  DB_TABLE_DETAILS.remove(
943  ImmutableList.of(schema.toUpperCase(), table.toUpperCase()));
944  }
945  // Invalidate views
946  Set<List<String>> all = new HashSet<>(DB_TABLE_DETAILS.keySet());
947  for (List<String> keys : all) {
948  if (keys.get(0).equals(schema.toUpperCase())) {
949  Table ttable = DB_TABLE_DETAILS.get(keys);
950  if (ttable instanceof HeavyDBView) {
951  HEAVYDBLOGGER.debug(
952  "removing view in schema " + keys.get(0) + " view " + keys.get(1));
953  DB_TABLE_DETAILS.remove(keys);
954  }
955  }
956  }
957  // Could be a removal or an add request for a DB
958  Set<String> mSet = DATABASE_TO_TABLES.get(schema.toUpperCase());
959  if (mSet != null) {
960  if (table.isEmpty()) {
961  // If table is not specified, then we are dropping an entire DB.
962  HEAVYDBLOGGER.debug("removing schema " + schema.toUpperCase());
963  DATABASE_TO_TABLES.remove(schema.toUpperCase());
964  } else {
965  mSet.clear();
966  }
967  } else {
968  // add a empty database descriptor for new DB, it will be lazily populated when
969  // required
970  Set<String> ts = new HashSet<String>();
971  DATABASE_TO_TABLES.putIfAbsent(schema.toUpperCase(), ts);
972  }
973  }
974 }
static volatile Map< String, Set< String > > DATABASE_TO_TABLES
Table getTable(String tableName)
static final int get_physical_cols(int type)
final HeavyDBUser currentUser
static final int KMULTIPOLYGON
static final int KMULTIPOINT
void updateMetaData(String schema, String table)
static final int KMULTILINESTRING
String getViewSql(String tableName)
static final boolean is_geometry(int type)
MetaConnect(int dbPort, String dataDir, HeavyDBUser currentHeavyDBUser, HeavyDBParser parser, SockTransportProperties skT)
String getViewSqlViaSql(int tableId)
TTableDetails get_table_details(String tableName)
static volatile Map< List< String >, Table > DB_TABLE_DETAILS
static final int KINTERVAL_YEAR_MONTH
final SockTransportProperties sock_transport_properties
static final int KLINESTRING
String getCatalogFileName(String catalog)
static final String CATALOG_DIR_NAME
void connectToCatalog(String catalog)
static final Logger HEAVYDBLOGGER
final HeavyDBParser parser
TTableDetails get_table_detail_SQL(String tableName)
TDatumType typeToThrift(int type)
Set< String > getDatabases_SQL()
int getTableId(String tableName)
MetaConnect(int dbPort, String dataDir, HeavyDBUser currentHeavyDBUser, HeavyDBParser parser, SockTransportProperties skT, String db)
TTableDetails get_table_detail_JSON(String tableName)
static final int KINTERVAL_DAY_TIME
boolean isView(String tableName)
TEncodingType encodingToThrift(int comp)