OmniSciDB  72c90bc290
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
com.mapd.utility.SQLImporter Class Reference
+ Collaboration diagram for com.mapd.utility.SQLImporter:

Static Public Member Functions

static void main (String[] args)
 

Protected Attributes

String session = null
 
Heavy.Client client = null
 

Package Functions

void doWork (String[] args)
 
void executeQuery ()
 

Package Attributes

Db_vendor_types vendor_types = null
 

Static Package Attributes

static final Logger LOGGER = LoggerFactory.getLogger(SQLImporter.class)
 

Private Member Functions

void run_init (Connection conn)
 
void help (Options options)
 
void checkDBTable (Connection otherdb_conn, ResultSetMetaData md) throws SQLException
 
void verifyColumnSignaturesMatch (Connection otherdb_conn, List< TColumnType > dstColumns, ResultSetMetaData srcColumns) throws SQLException
 
void createDBTable (Connection otherdb_conn, ResultSetMetaData metaData)
 
void createDBConnection ()
 
List< TColumnType > getColumnInfo (String tName)
 
boolean tableExists (String tName)
 
void executeDBCommand (String sql)
 
String getColType (int cType, int precision, int scale)
 
TColumn setupBinaryColumn (int i, ResultSetMetaData md, int bufferSize) throws SQLException
 
void setColValue (ResultSet rs, TColumn col, int columnType, int colNum, int scale, String colTypeName) throws SQLException
 
void resetBinaryColumn (int i, ResultSetMetaData md, int bufferSize, TColumn col) throws SQLException
 

Private Attributes

CommandLine cmd = null
 
DateTimeUtils dateTimeUtils
 

Detailed Description

Definition at line 321 of file SQLImporter.java.

Member Function Documentation

void com.mapd.utility.SQLImporter.checkDBTable ( Connection  otherdb_conn,
ResultSetMetaData  md 
) throws SQLException
inlineprivate

Definition at line 530 of file SQLImporter.java.

References com.mapd.utility.SQLImporter.cmd, com.mapd.utility.SQLImporter.createDBConnection(), com.mapd.utility.SQLImporter.createDBTable(), com.mapd.utility.SQLImporter.executeDBCommand(), com.mapd.utility.SQLImporter.getColumnInfo(), com.mapd.utility.SQLImporter.tableExists(), and com.mapd.utility.SQLImporter.verifyColumnSignaturesMatch().

Referenced by com.mapd.utility.SQLImporter.executeQuery().

531  {
533  String tName = cmd.getOptionValue("targetTable");
534 
535  if (tableExists(tName)) {
536  // check if we want to truncate
537  if (cmd.hasOption("truncate")) {
538  executeDBCommand("Drop table " + tName);
539  createDBTable(otherdb_conn, md);
540  } else {
541  List<TColumnType> columnInfo = getColumnInfo(tName);
542  verifyColumnSignaturesMatch(otherdb_conn, columnInfo, md);
543  }
544  } else {
545  createDBTable(otherdb_conn, md);
546  }
547  }
void createDBTable(Connection otherdb_conn, ResultSetMetaData metaData)
void executeDBCommand(String sql)
List< TColumnType > getColumnInfo(String tName)
void verifyColumnSignaturesMatch(Connection otherdb_conn, List< TColumnType > dstColumns, ResultSetMetaData srcColumns)
boolean tableExists(String tName)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

void com.mapd.utility.SQLImporter.createDBConnection ( )
inlineprivate

Definition at line 722 of file SQLImporter.java.

References com.mapd.utility.SQLImporter.client, com.mapd.utility.SQLImporter.cmd, and com.mapd.utility.SQLImporter.session.

Referenced by com.mapd.utility.SQLImporter.checkDBTable().

722  {
723  TTransport transport = null;
724  TProtocol protocol = new TBinaryProtocol(transport);
725  int port = Integer.valueOf(cmd.getOptionValue("port", "6274"));
726  String server = cmd.getOptionValue("server", "localhost");
727  try {
728  // Uses default certificate stores.
729  boolean load_trust_store = cmd.hasOption("https");
730  SockTransportProperties skT = null;
731  if (cmd.hasOption("https")) {
732  skT = SockTransportProperties.getEncryptedClientDefaultTrustStore(
733  !cmd.hasOption("insecure"));
734  transport = skT.openHttpsClientTransport(server, port);
735  transport.open();
736  protocol = new TJSONProtocol(transport);
737  } else if (cmd.hasOption("http")) {
738  skT = SockTransportProperties.getUnencryptedClient();
739  transport = skT.openHttpClientTransport(server, port);
740  protocol = new TJSONProtocol(transport);
741  } else {
742  skT = SockTransportProperties.getUnencryptedClient();
743  transport = skT.openClientTransport(server, port);
744  transport.open();
745  protocol = new TBinaryProtocol(transport);
746  }
747 
748  client = new Heavy.Client(protocol);
749  // This if will be useless until PKI signon
750  if (cmd.hasOption("user")) {
751  session = client.connect(cmd.getOptionValue("user", "admin"),
752  cmd.getOptionValue("passwd", "HyperInteractive"),
753  cmd.getOptionValue("database", "omnisci"));
754  }
755  LOGGER.debug("Connected session is " + session);
756 
757  } catch (TTransportException ex) {
758  LOGGER.error("Connection failed - " + ex.toString());
759  exit(1);
760  } catch (TDBException ex) {
761  LOGGER.error("Connection failed - " + ex.getError_msg());
762  exit(2);
763  } catch (TException ex) {
764  LOGGER.error("Connection failed - " + ex.toString());
765  exit(3);
766  } catch (Exception ex) {
767  LOGGER.error("General exception - " + ex.toString());
768  exit(4);
769  }
770  }

+ Here is the caller graph for this function:

void com.mapd.utility.SQLImporter.createDBTable ( Connection  otherdb_conn,
ResultSetMetaData  metaData 
)
inlineprivate

Definition at line 680 of file SQLImporter.java.

References File_Namespace.append(), com.mapd.utility.SQLImporter.cmd, com.mapd.utility.SQLImporter.executeDBCommand(), com.mapd.utility.SQLImporter.getColType(), Integer, and run_benchmark_import.type.

Referenced by com.mapd.utility.SQLImporter.checkDBTable().

680  {
681  StringBuilder sb = new StringBuilder();
682  sb.append("Create table ").append(cmd.getOptionValue("targetTable")).append("(");
683 
684  // Now iterate the metadata
685  try {
686  for (int i = 1; i <= metaData.getColumnCount(); i++) {
687  if (i > 1) {
688  sb.append(",");
689  }
690  LOGGER.debug("Column name is " + metaData.getColumnName(i));
691  LOGGER.debug("Column type is " + metaData.getColumnTypeName(i));
692  LOGGER.debug("Column type is " + metaData.getColumnType(i));
693 
694  sb.append(metaData.getColumnName(i)).append(" ");
695  int col_type = metaData.getColumnType(i);
696  if (col_type == java.sql.Types.OTHER) {
697  Db_vendor_types.GisType type =
698  vendor_types.find_gis_type(otherdb_conn, metaData, i);
699  sb.append(Db_vendor_types.gis_type_to_str(type));
700  } else {
701  sb.append(getColType(metaData.getColumnType(i),
702  metaData.getPrecision(i),
703  metaData.getScale(i)));
704  }
705  }
706  sb.append(")");
707 
708  if (Integer.valueOf(cmd.getOptionValue("fragmentSize", "0")) > 0) {
709  sb.append(" with (fragment_size = ");
710  sb.append(cmd.getOptionValue("fragmentSize", "0"));
711  sb.append(")");
712  }
713 
714  } catch (SQLException ex) {
715  LOGGER.error("Error processing the metadata - " + ex.toString());
716  exit(1);
717  }
718 
719  executeDBCommand(sb.toString());
720  }
size_t append(FILE *f, const size_t size, const int8_t *buf)
Appends the specified number of bytes to the end of the file f from buf.
Definition: File.cpp:158
String getColType(int cType, int precision, int scale)
void executeDBCommand(String sql)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

void com.mapd.utility.SQLImporter.doWork ( String[]  args)
inlinepackage

Definition at line 337 of file SQLImporter.java.

References run_benchmark_import.args, com.mapd.utility.SQLImporter.cmd, and com.mapd.utility.SQLImporter.executeQuery().

337  {
338  // create Options object
339 
340  SQLImporter_args s_args = new SQLImporter_args();
341 
342  try {
343  cmd = s_args.parse(args);
344  } catch (ParseException ex) {
345  LOGGER.error(ex.getLocalizedMessage());
346  s_args.printHelpMessage();
347  exit(0);
348  }
349  executeQuery();
350  }

+ Here is the call graph for this function:

void com.mapd.utility.SQLImporter.executeDBCommand ( String  sql)
inlineprivate

Definition at line 807 of file SQLImporter.java.

References com.mapd.utility.SQLImporter.session.

Referenced by com.mapd.utility.SQLImporter.checkDBTable(), and com.mapd.utility.SQLImporter.createDBTable().

807  {
808  LOGGER.info("Run Command - " + sql);
809 
810  try {
811  TQueryResult sqlResult = client.sql_execute(session, sql + ";", true, null, -1, -1);
812  } catch (TDBException ex) {
813  LOGGER.error("SQL Execute failed - " + ex.getError_msg());
814  exit(1);
815  } catch (TException ex) {
816  LOGGER.error("SQL Execute failed - " + ex.toString());
817  exit(1);
818  }
819  }

+ Here is the caller graph for this function:

void com.mapd.utility.SQLImporter.executeQuery ( )
inlinepackage

Definition at line 352 of file SQLImporter.java.

References com.mapd.utility.SQLImporter.checkDBTable(), com.mapd.utility.SQLImporter.cmd, report.conn, com.mapd.utility.db_vendors.Db_vendor_types.isAutoCommitDisabledRequired(), com.mapd.utility.SQLImporter.resetBinaryColumn(), com.mapd.utility.SQLImporter.run_init(), com.mapd.utility.SQLImporter.session, com.mapd.utility.SQLImporter.setColValue(), com.mapd.utility.SQLImporter.setupBinaryColumn(), and com.mapd.utility.SQLImporter.vendor_types.

Referenced by com.mapd.utility.SQLImporter.doWork().

352  {
353  Connection conn = null;
354  Statement stmt = null;
355 
356  long totalTime = 0;
357 
358  try {
359  // Open a connection
360  if (cmd.hasOption("nlj")) {
361  LOGGER.info("Connecting to source database.");
362  } else {
363  LOGGER.info("Connecting to database url :" + cmd.getOptionValue("jdbcConnect"));
364  }
365  conn = DriverManager.getConnection(cmd.getOptionValue("jdbcConnect"),
366  cmd.getOptionValue("sourceUser"),
367  cmd.getOptionValue("sourcePasswd"));
368  vendor_types = Db_vendor_types.Db_vendor_factory(cmd.getOptionValue("jdbcConnect"));
369  long startTime = System.currentTimeMillis();
370 
371  // run init file script on targe DB if present
372  if (cmd.hasOption("initializeFile")) {
373  run_init(conn);
374  }
375 
376  try {
378  conn.setAutoCommit(false);
379  }
380  } catch (SQLException se) {
381  LOGGER.warn(
382  "SQLException when attempting to setAutoCommit to false, jdbc driver probably doesnt support it. Error is "
383  + se.toString());
384  }
385 
386  // Execute a query
387  stmt = conn.createStatement();
388 
389  int bufferSize = Integer.valueOf(cmd.getOptionValue("bufferSize", "10000"));
390  // set the jdbc fetch buffer size to reduce the amount of records being moved to
391  // java from postgress
392  stmt.setFetchSize(bufferSize);
393  long timer;
394 
395  ResultSet rs = stmt.executeQuery(cmd.getOptionValue("sqlStmt"));
396 
397  // check if table already exists and is compatible in HEAVYAI with the query
398  // metadata
399  ResultSetMetaData md = rs.getMetaData();
400  checkDBTable(conn, md);
401 
402  timer = System.currentTimeMillis();
403 
404  long resultCount = 0;
405  int bufferCount = 0;
406  long total = 0;
407 
408  List<TColumn> cols = new ArrayList(md.getColumnCount());
409  for (int i = 1; i <= md.getColumnCount(); i++) {
410  TColumn col = setupBinaryColumn(i, md, bufferSize);
411  cols.add(col);
412  }
413 
414  // read data from old DB
415  while (rs.next()) {
416  for (int i = 1; i <= md.getColumnCount(); i++) {
417  setColValue(rs,
418  cols.get(i - 1),
419  md.getColumnType(i),
420  i,
421  md.getScale(i),
422  md.getColumnTypeName(i));
423  }
424  resultCount++;
425  bufferCount++;
426  if (bufferCount == bufferSize) {
427  bufferCount = 0;
428  // send the buffer to HEAVY.AI
429  client.load_table_binary_columnar(
430  session, cmd.getOptionValue("targetTable"), cols, null);
431  // recreate columnar store for use
432  for (int i = 1; i <= md.getColumnCount(); i++) {
433  resetBinaryColumn(i, md, bufferSize, cols.get(i - 1));
434  }
435 
436  if (resultCount % 100000 == 0) {
437  LOGGER.info("Imported " + resultCount + " records");
438  }
439  }
440  }
441  if (bufferCount > 0) {
442  // send the LAST buffer to HEAVY.AI
443  client.load_table_binary_columnar(
444  session, cmd.getOptionValue("targetTable"), cols, null);
445  bufferCount = 0;
446  }
447 
448  LOGGER.info("result set count is " + resultCount + " read time is "
449  + (System.currentTimeMillis() - timer) + "ms");
450 
451  // Clean-up environment
452  rs.close();
453  stmt.close();
454  conn.close();
455 
456  totalTime = System.currentTimeMillis() - startTime;
457  } catch (SQLException se) {
458  LOGGER.error("SQLException - " + se.toString());
459  se.printStackTrace();
460  } catch (TDBException ex) {
461  LOGGER.error("TDBException - " + ex.getError_msg());
462  ex.printStackTrace();
463  } catch (TException ex) {
464  LOGGER.error("TException failed - " + ex.toString());
465  ex.printStackTrace();
466  } finally {
467  // finally block used to close resources
468  try {
469  if (stmt != null) {
470  stmt.close();
471  }
472  } catch (SQLException se2) {
473  } // nothing we can do
474  try {
475  if (conn != null) {
476  conn.close();
477  }
478  } catch (SQLException se) {
479  LOGGER.error("SQlException in close - " + se.toString());
480  se.printStackTrace();
481  }
482  try {
483  if (session != null) {
484  client.disconnect(session);
485  }
486  } catch (TDBException ex) {
487  LOGGER.error("TDBException - in finalization " + ex.getError_msg());
488  ex.printStackTrace();
489  } catch (TException ex) {
490  LOGGER.error("TException - in finalization" + ex.toString());
491  ex.printStackTrace();
492  }
493  }
494  }
void resetBinaryColumn(int i, ResultSetMetaData md, int bufferSize, TColumn col)
void setColValue(ResultSet rs, TColumn col, int columnType, int colNum, int scale, String colTypeName)
void checkDBTable(Connection otherdb_conn, ResultSetMetaData md)
tuple conn
Definition: report.py:41
TColumn setupBinaryColumn(int i, ResultSetMetaData md, int bufferSize)
void run_init(Connection conn)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

String com.mapd.utility.SQLImporter.getColType ( int  cType,
int  precision,
int  scale 
)
inlineprivate

Definition at line 821 of file SQLImporter.java.

Referenced by com.mapd.utility.SQLImporter.createDBTable().

821  {
822  // Note - if cType is OTHER a earlier call will have been made
823  // to try and work out the db vendors specific type.
824  if (precision > 19) {
825  precision = 19;
826  }
827  if (scale > 19) {
828  scale = 18;
829  }
830  switch (cType) {
831  case java.sql.Types.TINYINT:
832  return ("TINYINT");
833  case java.sql.Types.SMALLINT:
834  return ("SMALLINT");
835  case java.sql.Types.INTEGER:
836  return ("INTEGER");
837  case java.sql.Types.BIGINT:
838  return ("BIGINT");
839  case java.sql.Types.FLOAT:
840  return ("FLOAT");
841  case java.sql.Types.DECIMAL:
842  return ("DECIMAL(" + precision + "," + scale + ")");
843  case java.sql.Types.DOUBLE:
844  return ("DOUBLE");
845  case java.sql.Types.REAL:
846  return ("REAL");
847  case java.sql.Types.NUMERIC:
848  return ("NUMERIC(" + precision + "," + scale + ")");
849  case java.sql.Types.TIME:
850  return ("TIME");
851  case java.sql.Types.TIMESTAMP:
852  return ("TIMESTAMP");
853  case java.sql.Types.DATE:
854  return ("DATE");
855  case java.sql.Types.BOOLEAN:
856  case java.sql.Types
857  .BIT: // deal with postgress treating boolean as bit... this will bite me
858  return ("BOOLEAN");
859  case java.sql.Types.NVARCHAR:
860  case java.sql.Types.VARCHAR:
861  case java.sql.Types.NCHAR:
862  case java.sql.Types.CHAR:
863  case java.sql.Types.LONGVARCHAR:
864  case java.sql.Types.LONGNVARCHAR:
865  return ("TEXT ENCODING DICT");
866  default:
867  throw new AssertionError("Column type " + cType + " not Supported");
868  }
869  }

+ Here is the caller graph for this function:

List<TColumnType> com.mapd.utility.SQLImporter.getColumnInfo ( String  tName)
inlineprivate

Definition at line 772 of file SQLImporter.java.

References com.mapd.utility.SQLImporter.session.

Referenced by com.mapd.utility.SQLImporter.checkDBTable().

772  {
773  LOGGER.debug("Getting columns for " + tName);
774  List<TColumnType> row_descriptor = null;
775  try {
776  TTableDetails table_details = client.get_table_details(session, tName);
777  row_descriptor = table_details.row_desc;
778  } catch (TDBException ex) {
779  LOGGER.error("column check failed - " + ex.getError_msg());
780  exit(3);
781  } catch (TException ex) {
782  LOGGER.error("column check failed - " + ex.toString());
783  exit(3);
784  }
785  return row_descriptor;
786  }

+ Here is the caller graph for this function:

void com.mapd.utility.SQLImporter.help ( Options  options)
inlineprivate

Definition at line 523 of file SQLImporter.java.

523  {
524  // automatically generate the help statement
525  HelpFormatter formatter = new HelpFormatter();
526  formatter.setOptionComparator(null); // get options in the order they are created
527  formatter.printHelp("SQLImporter", options);
528  }
static void com.mapd.utility.SQLImporter.main ( String[]  args)
inlinestatic

Definition at line 332 of file SQLImporter.java.

References run_benchmark_import.args.

332  {
333  SQLImporter sq = new SQLImporter();
334  sq.doWork(args);
335  }
void com.mapd.utility.SQLImporter.resetBinaryColumn ( int  i,
ResultSetMetaData  md,
int  bufferSize,
TColumn  col 
) throws SQLException
inlineprivate

Definition at line 1043 of file SQLImporter.java.

Referenced by com.mapd.utility.SQLImporter.executeQuery().

1044  {
1045  col.nulls.clear();
1046 
1047  switch (md.getColumnType(i)) {
1048  case java.sql.Types.TINYINT:
1049  case java.sql.Types.SMALLINT:
1050  case java.sql.Types.INTEGER:
1051  case java.sql.Types.BIGINT:
1052  case java.sql.Types.TIME:
1053  case java.sql.Types.TIMESTAMP:
1054  case java.sql.Types
1055  .BIT: // deal with postgress treating boolean as bit... this will bite me
1056  case java.sql.Types.BOOLEAN:
1057  case java.sql.Types.DATE:
1058  case java.sql.Types.DECIMAL:
1059  case java.sql.Types.NUMERIC:
1060  col.data.int_col.clear();
1061  break;
1062 
1063  case java.sql.Types.FLOAT:
1064  case java.sql.Types.DOUBLE:
1065  case java.sql.Types.REAL:
1066  col.data.real_col.clear();
1067  break;
1068 
1069  case java.sql.Types.NVARCHAR:
1070  case java.sql.Types.VARCHAR:
1071  case java.sql.Types.NCHAR:
1072  case java.sql.Types.CHAR:
1073  case java.sql.Types.LONGVARCHAR:
1074  case java.sql.Types.LONGNVARCHAR:
1075  case java.sql.Types.OTHER:
1076  col.data.str_col.clear();
1077  break;
1078  default:
1079  throw new AssertionError("Column type " + md.getColumnType(i) + " not Supported");
1080  }
1081  }

+ Here is the caller graph for this function:

void com.mapd.utility.SQLImporter.run_init ( Connection  conn)
inlineprivate

Definition at line 496 of file SQLImporter.java.

References com.mapd.utility.SQLImporter.cmd, and parse_ast.line.

Referenced by com.mapd.utility.SQLImporter.executeQuery().

496  {
497  // attempt to open file
498  String line = "";
499  try {
500  BufferedReader reader =
501  new BufferedReader(new FileReader(cmd.getOptionValue("initializeFile")));
502  Statement stmt = conn.createStatement();
503  while ((line = reader.readLine()) != null) {
504  if (line.isEmpty()) {
505  continue;
506  }
507  LOGGER.info("Running : " + line);
508  stmt.execute(line);
509  }
510  stmt.close();
511  reader.close();
512  } catch (IOException e) {
513  LOGGER.error("Exception occurred trying to read initialize file: "
514  + cmd.getOptionValue("initFile"));
515  exit(1);
516  } catch (SQLException e) {
517  LOGGER.error(
518  "Exception occurred trying to execute initialize file entry : " + line);
519  exit(1);
520  }
521  }
tuple line
Definition: parse_ast.py:10

+ Here is the caller graph for this function:

void com.mapd.utility.SQLImporter.setColValue ( ResultSet  rs,
TColumn  col,
int  columnType,
int  colNum,
int  scale,
String  colTypeName 
) throws SQLException
inlineprivate

Definition at line 917 of file SQLImporter.java.

References heavydb.dtypes.Date, Double, and heavydb.dtypes.Time.

Referenced by com.mapd.utility.SQLImporter.executeQuery().

922  {
923  switch (columnType) {
924  case java.sql.Types
925  .BIT: // deal with postgress treating boolean as bit... this will bite me
926  case java.sql.Types.BOOLEAN:
927  Boolean b = rs.getBoolean(colNum);
928  if (rs.wasNull()) {
929  col.nulls.add(Boolean.TRUE);
930  col.data.int_col.add(0L);
931  } else {
932  col.nulls.add(Boolean.FALSE);
933  col.data.int_col.add(b ? 1L : 0L);
934  }
935  break;
936 
937  case java.sql.Types.DECIMAL:
938  case java.sql.Types.NUMERIC:
939  BigDecimal bd = rs.getBigDecimal(colNum);
940  if (rs.wasNull()) {
941  col.nulls.add(Boolean.TRUE);
942  col.data.int_col.add(0L);
943  } else {
944  col.nulls.add(Boolean.FALSE);
945  col.data.int_col.add(bd.multiply(new BigDecimal(pow(10L, scale))).longValue());
946  }
947  break;
948 
949  case java.sql.Types.TINYINT:
950  case java.sql.Types.SMALLINT:
951  case java.sql.Types.INTEGER:
952  case java.sql.Types.BIGINT:
953  Long l = rs.getLong(colNum);
954  if (rs.wasNull()) {
955  col.nulls.add(Boolean.TRUE);
956  col.data.int_col.add(new Long(0));
957  } else {
958  col.nulls.add(Boolean.FALSE);
959  col.data.int_col.add(l);
960  }
961  break;
962 
963  case java.sql.Types.TIME:
964  Time t = rs.getTime(colNum);
965  if (rs.wasNull()) {
966  col.nulls.add(Boolean.TRUE);
967  col.data.int_col.add(0L);
968 
969  } else {
970  col.data.int_col.add(dateTimeUtils.getSecondsFromMilliseconds(t.getTime()));
971  col.nulls.add(Boolean.FALSE);
972  }
973 
974  break;
975  case java.sql.Types.TIMESTAMP:
976  Timestamp ts = rs.getTimestamp(colNum);
977  if (rs.wasNull()) {
978  col.nulls.add(Boolean.TRUE);
979  col.data.int_col.add(0L);
980 
981  } else {
982  col.data.int_col.add(dateTimeUtils.getSecondsFromMilliseconds(ts.getTime()));
983  col.nulls.add(Boolean.FALSE);
984  }
985 
986  break;
987  case java.sql.Types.DATE:
988  Date d = rs.getDate(colNum);
989  if (rs.wasNull()) {
990  col.nulls.add(Boolean.TRUE);
991  col.data.int_col.add(0L);
992 
993  } else {
994  col.data.int_col.add(dateTimeUtils.getSecondsFromMilliseconds(d.getTime()));
995  col.nulls.add(Boolean.FALSE);
996  }
997  break;
998  case java.sql.Types.FLOAT:
999  case java.sql.Types.DOUBLE:
1000  case java.sql.Types.REAL:
1001  Double db = rs.getDouble(colNum);
1002  if (rs.wasNull()) {
1003  col.nulls.add(Boolean.TRUE);
1004  col.data.real_col.add(new Double(0));
1005 
1006  } else {
1007  col.nulls.add(Boolean.FALSE);
1008  col.data.real_col.add(db);
1009  }
1010  break;
1011 
1012  case java.sql.Types.NVARCHAR:
1013  case java.sql.Types.VARCHAR:
1014  case java.sql.Types.NCHAR:
1015  case java.sql.Types.CHAR:
1016  case java.sql.Types.LONGVARCHAR:
1017  case java.sql.Types.LONGNVARCHAR:
1018  String strVal = rs.getString(colNum);
1019  if (rs.wasNull()) {
1020  col.nulls.add(Boolean.TRUE);
1021  col.data.str_col.add("");
1022 
1023  } else {
1024  col.data.str_col.add(strVal);
1025  col.nulls.add(Boolean.FALSE);
1026  }
1027  break;
1028  case java.sql.Types.OTHER:
1029  Object objVal = rs.getObject(colNum);
1030  if (rs.wasNull()) {
1031  col.nulls.add(Boolean.TRUE);
1032  col.data.str_col.add("");
1033  } else {
1034  col.data.str_col.add(vendor_types.get_wkt(rs, colNum, colTypeName));
1035  col.nulls.add(Boolean.FALSE);
1036  }
1037  break;
1038  default:
1039  throw new AssertionError("Column type " + columnType + " not Supported");
1040  }
1041  }

+ Here is the caller graph for this function:

TColumn com.mapd.utility.SQLImporter.setupBinaryColumn ( int  i,
ResultSetMetaData  md,
int  bufferSize 
) throws SQLException
inlineprivate

Definition at line 871 of file SQLImporter.java.

Referenced by com.mapd.utility.SQLImporter.executeQuery().

872  {
873  TColumn col = new TColumn();
874 
875  col.nulls = new ArrayList<Boolean>(bufferSize);
876 
877  col.data = new TColumnData();
878 
879  switch (md.getColumnType(i)) {
880  case java.sql.Types.TINYINT:
881  case java.sql.Types.SMALLINT:
882  case java.sql.Types.INTEGER:
883  case java.sql.Types.BIGINT:
884  case java.sql.Types.TIME:
885  case java.sql.Types.TIMESTAMP:
886  case java.sql.Types
887  .BIT: // deal with postgress treating boolean as bit... this will bite me
888  case java.sql.Types.BOOLEAN:
889  case java.sql.Types.DATE:
890  case java.sql.Types.DECIMAL:
891  case java.sql.Types.NUMERIC:
892  col.data.int_col = new ArrayList<Long>(bufferSize);
893  break;
894 
895  case java.sql.Types.FLOAT:
896  case java.sql.Types.DOUBLE:
897  case java.sql.Types.REAL:
898  col.data.real_col = new ArrayList<Double>(bufferSize);
899  break;
900 
901  case java.sql.Types.NVARCHAR:
902  case java.sql.Types.VARCHAR:
903  case java.sql.Types.NCHAR:
904  case java.sql.Types.CHAR:
905  case java.sql.Types.LONGVARCHAR:
906  case java.sql.Types.LONGNVARCHAR:
907  case java.sql.Types.OTHER:
908  col.data.str_col = new ArrayList<String>(bufferSize);
909  break;
910 
911  default:
912  throw new AssertionError("Column type " + md.getColumnType(i) + " not Supported");
913  }
914  return col;
915  }

+ Here is the caller graph for this function:

boolean com.mapd.utility.SQLImporter.tableExists ( String  tName)
inlineprivate

Definition at line 788 of file SQLImporter.java.

References com.mapd.utility.SQLImporter.session.

Referenced by com.mapd.utility.SQLImporter.checkDBTable().

788  {
789  LOGGER.debug("Check for table " + tName);
790  try {
791  List<String> recv_get_tables = client.get_tables(session);
792  for (String s : recv_get_tables) {
793  if (s.equals(tName)) {
794  return true;
795  }
796  }
797  } catch (TDBException ex) {
798  LOGGER.error("Table check failed - " + ex.getError_msg());
799  exit(3);
800  } catch (TException ex) {
801  LOGGER.error("Table check failed - " + ex.toString());
802  exit(3);
803  }
804  return false;
805  }

+ Here is the caller graph for this function:

void com.mapd.utility.SQLImporter.verifyColumnSignaturesMatch ( Connection  otherdb_conn,
List< TColumnType >  dstColumns,
ResultSetMetaData  srcColumns 
) throws SQLException
inlineprivate

Definition at line 549 of file SQLImporter.java.

References com.mapd.utility.SQLImporter.cmd.

Referenced by com.mapd.utility.SQLImporter.checkDBTable().

551  {
552  if (srcColumns.getColumnCount() != dstColumns.size()) {
553  LOGGER.error("Table sizes do not match: Destination " + dstColumns.size()
554  + " versus Source " + srcColumns.getColumnCount());
555  exit(1);
556  }
557  for (int i = 1; i <= dstColumns.size(); ++i) {
558  if (!dstColumns.get(i - 1).getCol_name().equalsIgnoreCase(
559  srcColumns.getColumnName(i))) {
560  LOGGER.error(
561  "Destination table does not have matching column in same order for column number "
562  + i + " destination column name is " + dstColumns.get(i - 1).col_name
563  + " versus target column " + srcColumns.getColumnName(i));
564  exit(1);
565  }
566  TDatumType dstType = dstColumns.get(i - 1).getCol_type().getType();
567  int dstPrecision = dstColumns.get(i - 1).getCol_type().getPrecision();
568  int dstScale = dstColumns.get(i - 1).getCol_type().getScale();
569  int srcType = srcColumns.getColumnType(i);
570  int srcPrecision = srcColumns.getPrecision(i);
571  int srcScale = srcColumns.getScale(i);
572 
573  boolean match = false;
574  switch (srcType) {
575  case java.sql.Types.TINYINT:
576  match |= dstType == TDatumType.TINYINT;
577  // NOTE: it's okay to import smaller type to a bigger one,
578  // so we just fall through and try to match the next type.
579  // But the order of case statements is important here!
580  case java.sql.Types.SMALLINT:
581  match |= dstType == TDatumType.SMALLINT;
582  case java.sql.Types.INTEGER:
583  match |= dstType == TDatumType.INT;
584  case java.sql.Types.BIGINT:
585  match |= dstType == TDatumType.BIGINT;
586  if (cmd.hasOption("AllowIntegerNarrowing")) {
587  match |= dstType == TDatumType.TINYINT || dstType == TDatumType.SMALLINT
588  || dstType == TDatumType.INT;
589  }
590  break;
591  case java.sql.Types.DECIMAL:
592  case java.sql.Types.NUMERIC:
593  match = dstType == TDatumType.DECIMAL && dstPrecision == srcPrecision
594  && dstScale == srcScale;
595  break;
596  case java.sql.Types.FLOAT:
597  case java.sql.Types.REAL:
598  match |= dstType == TDatumType.FLOAT;
599  // Fall through and try double
600  case java.sql.Types.DOUBLE:
601  match |= dstType == TDatumType.DOUBLE;
602  if (cmd.hasOption("AllowDoubleToFloat")) {
603  match |= dstType == TDatumType.FLOAT;
604  }
605  break;
606  case java.sql.Types.TIME:
607  match = dstType == TDatumType.TIME;
608  break;
609  case java.sql.Types.TIMESTAMP:
610  match = dstType == TDatumType.TIMESTAMP;
611  break;
612  case java.sql.Types.DATE:
613  match = dstType == TDatumType.DATE;
614  break;
615  case java.sql.Types.BOOLEAN:
616  case java.sql.Types
617  .BIT: // deal with postgres treating boolean as bit... this will bite me
618  match = dstType == TDatumType.BOOL;
619  break;
620  case java.sql.Types.NVARCHAR:
621  case java.sql.Types.VARCHAR:
622  case java.sql.Types.NCHAR:
623  case java.sql.Types.CHAR:
624  case java.sql.Types.LONGVARCHAR:
625  case java.sql.Types.LONGNVARCHAR:
626  match = (dstType == TDatumType.STR || dstType == TDatumType.POINT
627  || dstType == TDatumType.POLYGON || dstType == TDatumType.MULTIPOLYGON
628  || dstType == TDatumType.LINESTRING
629  || dstType == TDatumType.MULTILINESTRING
630  || dstType == TDatumType.MULTIPOINT);
631  break;
632  case java.sql.Types.OTHER:
633  // NOTE: I ignore subtypes (geography vs geopetry vs none) here just because
634  // it makes no difference for OmniSciDB at the moment
635  Db_vendor_types.GisType gisType =
636  vendor_types.find_gis_type(otherdb_conn, srcColumns, i);
637  if (gisType.srid != dstScale) {
638  match = false;
639  break;
640  }
641  switch (dstType) {
642  case POINT:
643  match = gisType.type.equalsIgnoreCase("POINT");
644  break;
645  case MULTIPOINT:
646  match = gisType.type.equalsIgnoreCase("MULTIPOINT");
647  break;
648  case LINESTRING:
649  match = gisType.type.equalsIgnoreCase("LINESTRING");
650  break;
651  case MULTILINESTRING:
652  match = gisType.type.equalsIgnoreCase("MULTILINESTRING");
653  break;
654  case POLYGON:
655  match = gisType.type.equalsIgnoreCase("POLYGON");
656  break;
657  case MULTIPOLYGON:
658  match = gisType.type.equalsIgnoreCase("MULTIPOLYGON");
659  break;
660  default:
661  LOGGER.error("Column type " + JDBCType.valueOf(srcType).getName()
662  + " not Supported");
663  exit(1);
664  }
665  break;
666  default:
667  LOGGER.error("Column type " + JDBCType.valueOf(srcType).getName()
668  + " not Supported");
669  exit(1);
670  }
671  if (!match) {
672  LOGGER.error("Source and destination types for column "
673  + srcColumns.getColumnName(i)
674  + " do not match. Please make sure that type, precision and scale are exactly the same");
675  exit(1);
676  }
677  }
678  }

+ Here is the caller graph for this function:

Member Data Documentation

Heavy.Client com.mapd.utility.SQLImporter.client = null
protected

Definition at line 323 of file SQLImporter.java.

Referenced by com.mapd.utility.SQLImporter.createDBConnection().

DateTimeUtils com.mapd.utility.SQLImporter.dateTimeUtils
private
Initial value:
= (milliseconds) -> {
return milliseconds / 1000;
}

Definition at line 326 of file SQLImporter.java.

final Logger com.mapd.utility.SQLImporter.LOGGER = LoggerFactory.getLogger(SQLImporter.class)
staticpackage

Definition at line 325 of file SQLImporter.java.

String com.mapd.utility.SQLImporter.session = null
protected

Definition at line 322 of file SQLImporter.java.

Referenced by com.mapd.utility.SQLImporter.createDBConnection(), com.mapd.utility.SQLImporter.executeDBCommand(), com.mapd.utility.SQLImporter.executeQuery(), com.mapd.utility.SQLImporter.getColumnInfo(), heavydb.thrift.Heavy.disconnect_args.read(), heavydb.thrift.Heavy.switch_database_args.read(), heavydb.thrift.Heavy.clone_session_args.read(), heavydb.thrift.Heavy.get_server_status_args.read(), heavydb.thrift.Heavy.get_status_args.read(), heavydb.thrift.Heavy.get_hardware_info_args.read(), heavydb.thrift.Heavy.get_tables_args.read(), heavydb.thrift.Heavy.get_tables_for_database_args.read(), heavydb.thrift.Heavy.get_physical_tables_args.read(), heavydb.thrift.Heavy.get_views_args.read(), heavydb.thrift.Heavy.get_tables_meta_args.read(), heavydb.thrift.Heavy.get_table_details_args.read(), heavydb.thrift.Heavy.get_table_details_for_database_args.read(), heavydb.thrift.Heavy.get_internal_table_details_args.read(), heavydb.thrift.Heavy.get_internal_table_details_for_database_args.read(), heavydb.thrift.Heavy.get_users_args.read(), heavydb.thrift.Heavy.get_databases_args.read(), heavydb.thrift.Heavy.start_heap_profile_args.read(), heavydb.thrift.Heavy.stop_heap_profile_args.read(), heavydb.thrift.Heavy.get_heap_profile_args.read(), heavydb.thrift.Heavy.get_memory_args.read(), heavydb.thrift.Heavy.clear_cpu_memory_args.read(), heavydb.thrift.Heavy.clear_gpu_memory_args.read(), heavydb.thrift.Heavy.set_table_epoch_args.read(), heavydb.thrift.Heavy.set_table_epoch_by_name_args.read(), heavydb.thrift.Heavy.get_table_epoch_args.read(), heavydb.thrift.Heavy.get_table_epoch_by_name_args.read(), heavydb.thrift.Heavy.get_table_epochs_args.read(), heavydb.thrift.Heavy.set_table_epochs_args.read(), heavydb.thrift.Heavy.get_session_info_args.read(), heavydb.thrift.Heavy.get_queries_info_args.read(), heavydb.thrift.Heavy.set_leaf_info_args.read(), heavydb.thrift.Heavy.sql_execute_args.read(), heavydb.thrift.Heavy.sql_execute_df_args.read(), heavydb.thrift.Heavy.sql_execute_gdf_args.read(), heavydb.thrift.Heavy.deallocate_df_args.read(), heavydb.thrift.Heavy.sql_validate_args.read(), heavydb.thrift.Heavy.get_completion_hints_args.read(), heavydb.thrift.Heavy.set_execution_mode_args.read(), heavydb.thrift.Heavy.render_vega_args.read(), heavydb.thrift.Heavy.get_result_row_for_pixel_args.read(), heavydb.thrift.Heavy.create_custom_expression_args.read(), heavydb.thrift.Heavy.get_custom_expressions_args.read(), heavydb.thrift.Heavy.update_custom_expression_args.read(), heavydb.thrift.Heavy.delete_custom_expressions_args.read(), heavydb.thrift.Heavy.get_dashboard_args.read(), heavydb.thrift.Heavy.get_dashboards_args.read(), heavydb.thrift.Heavy.create_dashboard_args.read(), heavydb.thrift.Heavy.replace_dashboard_args.read(), heavydb.thrift.Heavy.delete_dashboard_args.read(), heavydb.thrift.Heavy.share_dashboards_args.read(), heavydb.thrift.Heavy.delete_dashboards_args.read(), heavydb.thrift.Heavy.share_dashboard_args.read(), heavydb.thrift.Heavy.unshare_dashboard_args.read(), heavydb.thrift.Heavy.unshare_dashboards_args.read(), heavydb.thrift.Heavy.get_dashboard_grantees_args.read(), heavydb.thrift.Heavy.get_link_view_args.read(), heavydb.thrift.Heavy.create_link_args.read(), heavydb.thrift.Heavy.load_table_binary_args.read(), heavydb.thrift.Heavy.load_table_binary_columnar_args.read(), heavydb.thrift.Heavy.load_table_binary_arrow_args.read(), heavydb.thrift.Heavy.load_table_args.read(), heavydb.thrift.Heavy.detect_column_types_args.read(), heavydb.thrift.Heavy.create_table_args.read(), heavydb.thrift.Heavy.import_table_args.read(), heavydb.thrift.Heavy.import_geo_table_args.read(), heavydb.thrift.Heavy.import_table_status_args.read(), heavydb.thrift.Heavy.get_first_geo_file_in_archive_args.read(), heavydb.thrift.Heavy.get_all_files_in_archive_args.read(), heavydb.thrift.Heavy.get_layers_in_geo_file_args.read(), heavydb.thrift.Heavy.query_get_outer_fragment_count_args.read(), heavydb.thrift.Heavy.check_table_consistency_args.read(), heavydb.thrift.Heavy.start_render_query_args.read(), heavydb.thrift.Heavy.insert_data_args.read(), heavydb.thrift.Heavy.insert_chunks_args.read(), heavydb.thrift.Heavy.checkpoint_args.read(), heavydb.thrift.Heavy.get_roles_args.read(), heavydb.thrift.Heavy.get_db_objects_for_grantee_args.read(), heavydb.thrift.Heavy.get_db_object_privs_args.read(), heavydb.thrift.Heavy.get_all_roles_for_user_args.read(), heavydb.thrift.Heavy.get_all_effective_roles_for_user_args.read(), heavydb.thrift.Heavy.has_role_args.read(), heavydb.thrift.Heavy.has_object_privilege_args.read(), heavydb.thrift.Heavy.set_license_key_args.read(), heavydb.thrift.Heavy.get_license_claims_args.read(), heavydb.thrift.Heavy.get_device_parameters_args.read(), heavydb.thrift.Heavy.register_runtime_extension_functions_args.read(), heavydb.thrift.Heavy.get_table_function_names_args.read(), heavydb.thrift.Heavy.get_runtime_table_function_names_args.read(), heavydb.thrift.Heavy.get_table_function_details_args.read(), heavydb.thrift.Heavy.get_function_names_args.read(), heavydb.thrift.Heavy.get_runtime_function_names_args.read(), heavydb.thrift.Heavy.get_function_details_args.read(), com.mapd.utility.SQLImporter.tableExists(), heavydb.thrift.Heavy.disconnect_args.write(), heavydb.thrift.Heavy.switch_database_args.write(), heavydb.thrift.Heavy.clone_session_args.write(), heavydb.thrift.Heavy.get_server_status_args.write(), heavydb.thrift.Heavy.get_status_args.write(), heavydb.thrift.Heavy.get_hardware_info_args.write(), heavydb.thrift.Heavy.get_tables_args.write(), heavydb.thrift.Heavy.get_tables_for_database_args.write(), heavydb.thrift.Heavy.get_physical_tables_args.write(), heavydb.thrift.Heavy.get_views_args.write(), heavydb.thrift.Heavy.get_tables_meta_args.write(), heavydb.thrift.Heavy.get_table_details_args.write(), heavydb.thrift.Heavy.get_table_details_for_database_args.write(), heavydb.thrift.Heavy.get_internal_table_details_args.write(), heavydb.thrift.Heavy.get_internal_table_details_for_database_args.write(), heavydb.thrift.Heavy.get_users_args.write(), heavydb.thrift.Heavy.get_databases_args.write(), heavydb.thrift.Heavy.start_heap_profile_args.write(), heavydb.thrift.Heavy.stop_heap_profile_args.write(), heavydb.thrift.Heavy.get_heap_profile_args.write(), heavydb.thrift.Heavy.get_memory_args.write(), heavydb.thrift.Heavy.clear_cpu_memory_args.write(), heavydb.thrift.Heavy.clear_gpu_memory_args.write(), heavydb.thrift.Heavy.set_table_epoch_args.write(), heavydb.thrift.Heavy.set_table_epoch_by_name_args.write(), heavydb.thrift.Heavy.get_table_epoch_args.write(), heavydb.thrift.Heavy.get_table_epoch_by_name_args.write(), heavydb.thrift.Heavy.get_table_epochs_args.write(), heavydb.thrift.Heavy.set_table_epochs_args.write(), heavydb.thrift.Heavy.get_session_info_args.write(), heavydb.thrift.Heavy.get_queries_info_args.write(), heavydb.thrift.Heavy.set_leaf_info_args.write(), heavydb.thrift.Heavy.sql_execute_args.write(), heavydb.thrift.Heavy.sql_execute_df_args.write(), heavydb.thrift.Heavy.sql_execute_gdf_args.write(), heavydb.thrift.Heavy.deallocate_df_args.write(), heavydb.thrift.Heavy.sql_validate_args.write(), heavydb.thrift.Heavy.get_completion_hints_args.write(), heavydb.thrift.Heavy.set_execution_mode_args.write(), heavydb.thrift.Heavy.render_vega_args.write(), heavydb.thrift.Heavy.get_result_row_for_pixel_args.write(), heavydb.thrift.Heavy.create_custom_expression_args.write(), heavydb.thrift.Heavy.get_custom_expressions_args.write(), heavydb.thrift.Heavy.update_custom_expression_args.write(), heavydb.thrift.Heavy.delete_custom_expressions_args.write(), heavydb.thrift.Heavy.get_dashboard_args.write(), heavydb.thrift.Heavy.get_dashboards_args.write(), heavydb.thrift.Heavy.create_dashboard_args.write(), heavydb.thrift.Heavy.replace_dashboard_args.write(), heavydb.thrift.Heavy.delete_dashboard_args.write(), heavydb.thrift.Heavy.share_dashboards_args.write(), heavydb.thrift.Heavy.delete_dashboards_args.write(), heavydb.thrift.Heavy.share_dashboard_args.write(), heavydb.thrift.Heavy.unshare_dashboard_args.write(), heavydb.thrift.Heavy.unshare_dashboards_args.write(), heavydb.thrift.Heavy.get_dashboard_grantees_args.write(), heavydb.thrift.Heavy.get_link_view_args.write(), heavydb.thrift.Heavy.create_link_args.write(), heavydb.thrift.Heavy.load_table_binary_args.write(), heavydb.thrift.Heavy.load_table_binary_columnar_args.write(), heavydb.thrift.Heavy.load_table_binary_arrow_args.write(), heavydb.thrift.Heavy.load_table_args.write(), heavydb.thrift.Heavy.detect_column_types_args.write(), heavydb.thrift.Heavy.create_table_args.write(), heavydb.thrift.Heavy.import_table_args.write(), heavydb.thrift.Heavy.import_geo_table_args.write(), heavydb.thrift.Heavy.import_table_status_args.write(), heavydb.thrift.Heavy.get_first_geo_file_in_archive_args.write(), heavydb.thrift.Heavy.get_all_files_in_archive_args.write(), heavydb.thrift.Heavy.get_layers_in_geo_file_args.write(), heavydb.thrift.Heavy.query_get_outer_fragment_count_args.write(), heavydb.thrift.Heavy.check_table_consistency_args.write(), heavydb.thrift.Heavy.start_render_query_args.write(), heavydb.thrift.Heavy.insert_data_args.write(), heavydb.thrift.Heavy.insert_chunks_args.write(), heavydb.thrift.Heavy.checkpoint_args.write(), heavydb.thrift.Heavy.get_roles_args.write(), heavydb.thrift.Heavy.get_db_objects_for_grantee_args.write(), heavydb.thrift.Heavy.get_db_object_privs_args.write(), heavydb.thrift.Heavy.get_all_roles_for_user_args.write(), heavydb.thrift.Heavy.get_all_effective_roles_for_user_args.write(), heavydb.thrift.Heavy.has_role_args.write(), heavydb.thrift.Heavy.has_object_privilege_args.write(), heavydb.thrift.Heavy.set_license_key_args.write(), heavydb.thrift.Heavy.get_license_claims_args.write(), heavydb.thrift.Heavy.get_device_parameters_args.write(), heavydb.thrift.Heavy.register_runtime_extension_functions_args.write(), heavydb.thrift.Heavy.get_table_function_names_args.write(), heavydb.thrift.Heavy.get_runtime_table_function_names_args.write(), heavydb.thrift.Heavy.get_table_function_details_args.write(), heavydb.thrift.Heavy.get_function_names_args.write(), heavydb.thrift.Heavy.get_runtime_function_names_args.write(), and heavydb.thrift.Heavy.get_function_details_args.write().

Db_vendor_types com.mapd.utility.SQLImporter.vendor_types = null
package

Definition at line 330 of file SQLImporter.java.

Referenced by com.mapd.utility.SQLImporter.executeQuery().


The documentation for this class was generated from the following file: