OmniSciDB  085a039ca4
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
com.mapd.utility.SQLImporter Class Reference
+ Collaboration diagram for com.mapd.utility.SQLImporter:

Static Public Member Functions

static void main (String[] args)
 

Protected Attributes

String session = null
 
Heavy.Client client = null
 

Package Functions

void doWork (String[] args)
 
void executeQuery ()
 

Package Attributes

Db_vendor_types vendor_types = null
 

Static Package Attributes

static final Logger LOGGER = LoggerFactory.getLogger(SQLImporter.class)
 

Private Member Functions

void run_init (Connection conn)
 
void help (Options options)
 
void checkDBTable (Connection otherdb_conn, ResultSetMetaData md) throws SQLException
 
void verifyColumnSignaturesMatch (Connection otherdb_conn, List< TColumnType > dstColumns, ResultSetMetaData srcColumns) throws SQLException
 
void createDBTable (Connection otherdb_conn, ResultSetMetaData metaData)
 
void createDBConnection ()
 
List< TColumnType > getColumnInfo (String tName)
 
boolean tableExists (String tName)
 
void executeDBCommand (String sql)
 
String getColType (int cType, int precision, int scale)
 
TColumn setupBinaryColumn (int i, ResultSetMetaData md, int bufferSize) throws SQLException
 
void setColValue (ResultSet rs, TColumn col, int columnType, int colNum, int scale, String colTypeName) throws SQLException
 
void resetBinaryColumn (int i, ResultSetMetaData md, int bufferSize, TColumn col) throws SQLException
 

Private Attributes

CommandLine cmd = null
 
DateTimeUtils dateTimeUtils
 

Detailed Description

Definition at line 322 of file SQLImporter.java.

Member Function Documentation

void com.mapd.utility.SQLImporter.checkDBTable ( Connection  otherdb_conn,
ResultSetMetaData  md 
) throws SQLException
inlineprivate

Definition at line 545 of file SQLImporter.java.

References com.mapd.utility.SQLImporter.cmd, com.mapd.utility.SQLImporter.createDBConnection(), com.mapd.utility.SQLImporter.createDBTable(), com.mapd.utility.SQLImporter.executeDBCommand(), com.mapd.utility.SQLImporter.getColumnInfo(), com.mapd.utility.SQLImporter.tableExists(), and com.mapd.utility.SQLImporter.verifyColumnSignaturesMatch().

Referenced by com.mapd.utility.SQLImporter.executeQuery().

546  {
548  String tName = cmd.getOptionValue("targetTable");
549 
550  if (tableExists(tName)) {
551  // check if we want to truncate
552  if (cmd.hasOption("truncate")) {
553  executeDBCommand("Drop table " + tName);
554  createDBTable(otherdb_conn, md);
555  } else {
556  List<TColumnType> columnInfo = getColumnInfo(tName);
557  verifyColumnSignaturesMatch(otherdb_conn, columnInfo, md);
558  }
559  } else {
560  createDBTable(otherdb_conn, md);
561  }
562  }
void createDBTable(Connection otherdb_conn, ResultSetMetaData metaData)
void executeDBCommand(String sql)
List< TColumnType > getColumnInfo(String tName)
void verifyColumnSignaturesMatch(Connection otherdb_conn, List< TColumnType > dstColumns, ResultSetMetaData srcColumns)
boolean tableExists(String tName)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

void com.mapd.utility.SQLImporter.createDBConnection ( )
inlineprivate

Definition at line 729 of file SQLImporter.java.

References com.mapd.utility.SQLImporter.client, com.mapd.utility.SQLImporter.cmd, and com.mapd.utility.SQLImporter.session.

Referenced by com.mapd.utility.SQLImporter.checkDBTable().

729  {
730  TTransport transport = null;
731  TProtocol protocol = new TBinaryProtocol(transport);
732  int port = Integer.valueOf(cmd.getOptionValue("port", "6274"));
733  String server = cmd.getOptionValue("server", "localhost");
734  try {
735  // Uses default certificate stores.
736  boolean load_trust_store = cmd.hasOption("https");
737  SockTransportProperties skT = null;
738  if (cmd.hasOption("https")) {
739  skT = SockTransportProperties.getEncryptedClientDefaultTrustStore(
740  !cmd.hasOption("insecure"));
741  transport = skT.openHttpsClientTransport(server, port);
742  transport.open();
743  protocol = new TJSONProtocol(transport);
744  } else if (cmd.hasOption("http")) {
745  skT = SockTransportProperties.getUnencryptedClient();
746  transport = skT.openHttpClientTransport(server, port);
747  protocol = new TJSONProtocol(transport);
748  } else {
749  skT = SockTransportProperties.getUnencryptedClient();
750  transport = skT.openClientTransport(server, port);
751  transport.open();
752  protocol = new TBinaryProtocol(transport);
753  }
754 
755  client = new Heavy.Client(protocol);
756  // This if will be useless until PKI signon
757  if (cmd.hasOption("user")) {
758  session = client.connect(cmd.getOptionValue("user", "admin"),
759  cmd.getOptionValue("passwd", "HyperInteractive"),
760  cmd.getOptionValue("database", "omnisci"));
761  }
762  LOGGER.debug("Connected session is " + session);
763 
764  } catch (TTransportException ex) {
765  LOGGER.error("Connection failed - " + ex.toString());
766  exit(1);
767  } catch (TDBException ex) {
768  LOGGER.error("Connection failed - " + ex.getError_msg());
769  exit(2);
770  } catch (TException ex) {
771  LOGGER.error("Connection failed - " + ex.toString());
772  exit(3);
773  } catch (Exception ex) {
774  LOGGER.error("General exception - " + ex.toString());
775  exit(4);
776  }
777  }

+ Here is the caller graph for this function:

void com.mapd.utility.SQLImporter.createDBTable ( Connection  otherdb_conn,
ResultSetMetaData  metaData 
)
inlineprivate

Definition at line 687 of file SQLImporter.java.

References File_Namespace.append(), com.mapd.utility.SQLImporter.cmd, com.mapd.utility.SQLImporter.executeDBCommand(), com.mapd.utility.SQLImporter.getColType(), Integer, and run_benchmark_import.type.

Referenced by com.mapd.utility.SQLImporter.checkDBTable().

687  {
688  StringBuilder sb = new StringBuilder();
689  sb.append("Create table ").append(cmd.getOptionValue("targetTable")).append("(");
690 
691  // Now iterate the metadata
692  try {
693  for (int i = 1; i <= metaData.getColumnCount(); i++) {
694  if (i > 1) {
695  sb.append(",");
696  }
697  LOGGER.debug("Column name is " + metaData.getColumnName(i));
698  LOGGER.debug("Column type is " + metaData.getColumnTypeName(i));
699  LOGGER.debug("Column type is " + metaData.getColumnType(i));
700 
701  sb.append(metaData.getColumnName(i)).append(" ");
702  int col_type = metaData.getColumnType(i);
703  if (col_type == java.sql.Types.OTHER) {
704  Db_vendor_types.GisType type =
705  vendor_types.find_gis_type(otherdb_conn, metaData, i);
706  sb.append(Db_vendor_types.gis_type_to_str(type));
707  } else {
708  sb.append(getColType(metaData.getColumnType(i),
709  metaData.getPrecision(i),
710  metaData.getScale(i)));
711  }
712  }
713  sb.append(")");
714 
715  if (Integer.valueOf(cmd.getOptionValue("fragmentSize", "0")) > 0) {
716  sb.append(" with (fragment_size = ");
717  sb.append(cmd.getOptionValue("fragmentSize", "0"));
718  sb.append(")");
719  }
720 
721  } catch (SQLException ex) {
722  LOGGER.error("Error processing the metadata - " + ex.toString());
723  exit(1);
724  }
725 
726  executeDBCommand(sb.toString());
727  }
size_t append(FILE *f, const size_t size, const int8_t *buf)
Appends the specified number of bytes to the end of the file f from buf.
Definition: File.cpp:168
String getColType(int cType, int precision, int scale)
void executeDBCommand(String sql)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

void com.mapd.utility.SQLImporter.doWork ( String[]  args)
inlinepackage

Definition at line 338 of file SQLImporter.java.

References run_benchmark_import.args, com.mapd.utility.SQLImporter.cmd, and com.mapd.utility.SQLImporter.executeQuery().

338  {
339  // create Options object
340 
341  SQLImporter_args s_args = new SQLImporter_args();
342 
343  try {
344  cmd = s_args.parse(args);
345  } catch (ParseException ex) {
346  LOGGER.error(ex.getLocalizedMessage());
347  s_args.printHelpMessage();
348  exit(0);
349  }
350  executeQuery();
351  }

+ Here is the call graph for this function:

void com.mapd.utility.SQLImporter.executeDBCommand ( String  sql)
inlineprivate

Definition at line 814 of file SQLImporter.java.

References com.mapd.utility.SQLImporter.session.

Referenced by com.mapd.utility.SQLImporter.checkDBTable(), and com.mapd.utility.SQLImporter.createDBTable().

814  {
815  LOGGER.info("Run Command - " + sql);
816 
817  try {
818  TQueryResult sqlResult = client.sql_execute(session, sql + ";", true, null, -1, -1);
819  } catch (TDBException ex) {
820  LOGGER.error("SQL Execute failed - " + ex.getError_msg());
821  exit(1);
822  } catch (TException ex) {
823  LOGGER.error("SQL Execute failed - " + ex.toString());
824  exit(1);
825  }
826  }

+ Here is the caller graph for this function:

void com.mapd.utility.SQLImporter.executeQuery ( )
inlinepackage

Definition at line 353 of file SQLImporter.java.

References com.mapd.utility.SQLImporter.checkDBTable(), com.mapd.utility.SQLImporter.cmd, com.mapd.utility.db_vendors.Db_vendor_types.isAutoCommitDisabledRequired(), com.mapd.utility.SQLImporter.resetBinaryColumn(), com.mapd.utility.SQLImporter.run_init(), com.mapd.utility.SQLImporter.session, com.mapd.utility.SQLImporter.setColValue(), com.mapd.utility.SQLImporter.setupBinaryColumn(), and com.mapd.utility.SQLImporter.vendor_types.

Referenced by com.mapd.utility.SQLImporter.doWork().

353  {
354  Connection conn = null;
355  Statement stmt = null;
356 
357  long totalTime = 0;
358 
359  try {
360  // Open a connection
361  LOGGER.info("Connecting to database url :" + cmd.getOptionValue("jdbcConnect"));
362  conn = DriverManager.getConnection(cmd.getOptionValue("jdbcConnect"),
363  cmd.getOptionValue("sourceUser"),
364  cmd.getOptionValue("sourcePasswd"));
365  vendor_types = Db_vendor_types.Db_vendor_factory(cmd.getOptionValue("jdbcConnect"));
366  long startTime = System.currentTimeMillis();
367 
368  // run init file script on targe DB if present
369  if (cmd.hasOption("initializeFile")) {
370  run_init(conn);
371  }
372 
373  try {
375  conn.setAutoCommit(false);
376  }
377  } catch (SQLException se) {
378  LOGGER.warn(
379  "SQLException when attempting to setAutoCommit to false, jdbc driver probably doesnt support it. Error is "
380  + se.toString());
381  }
382 
383  // Execute a query
384  stmt = conn.createStatement();
385 
386  int bufferSize = Integer.valueOf(cmd.getOptionValue("bufferSize", "10000"));
387  // set the jdbc fetch buffer size to reduce the amount of records being moved to
388  // java from postgress
389  stmt.setFetchSize(bufferSize);
390  long timer;
391 
392  ResultSet rs = stmt.executeQuery(cmd.getOptionValue("sqlStmt"));
393 
394  // check if table already exists and is compatible in HEAVYAI with the query
395  // metadata
396  ResultSetMetaData md = rs.getMetaData();
397  checkDBTable(conn, md);
398 
399  timer = System.currentTimeMillis();
400 
401  long resultCount = 0;
402  int bufferCount = 0;
403  long total = 0;
404 
405  List<TColumn> cols = new ArrayList(md.getColumnCount());
406  for (int i = 1; i <= md.getColumnCount(); i++) {
407  TColumn col = setupBinaryColumn(i, md, bufferSize);
408  cols.add(col);
409  }
410 
411  boolean assignRenderGroups = !cmd.hasOption("noPolyRenderGroups");
412 
413  // read data from old DB
414  while (rs.next()) {
415  for (int i = 1; i <= md.getColumnCount(); i++) {
416  setColValue(rs,
417  cols.get(i - 1),
418  md.getColumnType(i),
419  i,
420  md.getScale(i),
421  md.getColumnTypeName(i));
422  }
423  resultCount++;
424  bufferCount++;
425  if (bufferCount == bufferSize) {
426  bufferCount = 0;
427  // send the buffer to HEAVY.AI
428  if (assignRenderGroups) {
429  client.load_table_binary_columnar_polys(
430  session, cmd.getOptionValue("targetTable"), cols, null, true);
431  } else {
432  client.load_table_binary_columnar(
433  session, cmd.getOptionValue("targetTable"), cols, null);
434  }
435  // recreate columnar store for use
436  for (int i = 1; i <= md.getColumnCount(); i++) {
437  resetBinaryColumn(i, md, bufferSize, cols.get(i - 1));
438  }
439 
440  if (resultCount % 100000 == 0) {
441  LOGGER.info("Imported " + resultCount + " records");
442  }
443  }
444  }
445  if (bufferCount > 0) {
446  // send the LAST buffer to HEAVY.AI
447  if (assignRenderGroups) {
448  client.load_table_binary_columnar_polys(
449  session, cmd.getOptionValue("targetTable"), cols, null, true);
450  } else {
451  client.load_table_binary_columnar(
452  session, cmd.getOptionValue("targetTable"), cols, null);
453  }
454  bufferCount = 0;
455  }
456 
457  // dump render group assignment data immediately
458  if (assignRenderGroups) {
459  client.load_table_binary_columnar_polys(
460  session, cmd.getOptionValue("targetTable"), null, null, false);
461  }
462 
463  LOGGER.info("result set count is " + resultCount + " read time is "
464  + (System.currentTimeMillis() - timer) + "ms");
465 
466  // Clean-up environment
467  rs.close();
468  stmt.close();
469  conn.close();
470 
471  totalTime = System.currentTimeMillis() - startTime;
472  } catch (SQLException se) {
473  LOGGER.error("SQLException - " + se.toString());
474  se.printStackTrace();
475  } catch (TDBException ex) {
476  LOGGER.error("TDBException - " + ex.getError_msg());
477  ex.printStackTrace();
478  } catch (TException ex) {
479  LOGGER.error("TException failed - " + ex.toString());
480  ex.printStackTrace();
481  } finally {
482  // finally block used to close resources
483  try {
484  if (stmt != null) {
485  stmt.close();
486  }
487  } catch (SQLException se2) {
488  } // nothing we can do
489  try {
490  if (conn != null) {
491  conn.close();
492  }
493  } catch (SQLException se) {
494  LOGGER.error("SQlException in close - " + se.toString());
495  se.printStackTrace();
496  }
497  try {
498  if (session != null) {
499  client.disconnect(session);
500  }
501  } catch (TDBException ex) {
502  LOGGER.error("TDBException - in finalization " + ex.getError_msg());
503  ex.printStackTrace();
504  } catch (TException ex) {
505  LOGGER.error("TException - in finalization" + ex.toString());
506  ex.printStackTrace();
507  }
508  }
509  }
void resetBinaryColumn(int i, ResultSetMetaData md, int bufferSize, TColumn col)
void setColValue(ResultSet rs, TColumn col, int columnType, int colNum, int scale, String colTypeName)
void checkDBTable(Connection otherdb_conn, ResultSetMetaData md)
TColumn setupBinaryColumn(int i, ResultSetMetaData md, int bufferSize)
void run_init(Connection conn)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

String com.mapd.utility.SQLImporter.getColType ( int  cType,
int  precision,
int  scale 
)
inlineprivate

Definition at line 828 of file SQLImporter.java.

Referenced by com.mapd.utility.SQLImporter.createDBTable().

828  {
829  // Note - if cType is OTHER a earlier call will have been made
830  // to try and work out the db vendors specific type.
831  if (precision > 19) {
832  precision = 19;
833  }
834  if (scale > 19) {
835  scale = 18;
836  }
837  switch (cType) {
838  case java.sql.Types.TINYINT:
839  return ("TINYINT");
840  case java.sql.Types.SMALLINT:
841  return ("SMALLINT");
842  case java.sql.Types.INTEGER:
843  return ("INTEGER");
844  case java.sql.Types.BIGINT:
845  return ("BIGINT");
846  case java.sql.Types.FLOAT:
847  return ("FLOAT");
848  case java.sql.Types.DECIMAL:
849  return ("DECIMAL(" + precision + "," + scale + ")");
850  case java.sql.Types.DOUBLE:
851  return ("DOUBLE");
852  case java.sql.Types.REAL:
853  return ("REAL");
854  case java.sql.Types.NUMERIC:
855  return ("NUMERIC(" + precision + "," + scale + ")");
856  case java.sql.Types.TIME:
857  return ("TIME");
858  case java.sql.Types.TIMESTAMP:
859  return ("TIMESTAMP");
860  case java.sql.Types.DATE:
861  return ("DATE");
862  case java.sql.Types.BOOLEAN:
863  case java.sql.Types
864  .BIT: // deal with postgress treating boolean as bit... this will bite me
865  return ("BOOLEAN");
866  case java.sql.Types.NVARCHAR:
867  case java.sql.Types.VARCHAR:
868  case java.sql.Types.NCHAR:
869  case java.sql.Types.CHAR:
870  case java.sql.Types.LONGVARCHAR:
871  case java.sql.Types.LONGNVARCHAR:
872  return ("TEXT ENCODING DICT");
873  default:
874  throw new AssertionError("Column type " + cType + " not Supported");
875  }
876  }

+ Here is the caller graph for this function:

List<TColumnType> com.mapd.utility.SQLImporter.getColumnInfo ( String  tName)
inlineprivate

Definition at line 779 of file SQLImporter.java.

References com.mapd.utility.SQLImporter.session.

Referenced by com.mapd.utility.SQLImporter.checkDBTable().

779  {
780  LOGGER.debug("Getting columns for " + tName);
781  List<TColumnType> row_descriptor = null;
782  try {
783  TTableDetails table_details = client.get_table_details(session, tName);
784  row_descriptor = table_details.row_desc;
785  } catch (TDBException ex) {
786  LOGGER.error("column check failed - " + ex.getError_msg());
787  exit(3);
788  } catch (TException ex) {
789  LOGGER.error("column check failed - " + ex.toString());
790  exit(3);
791  }
792  return row_descriptor;
793  }

+ Here is the caller graph for this function:

void com.mapd.utility.SQLImporter.help ( Options  options)
inlineprivate

Definition at line 538 of file SQLImporter.java.

538  {
539  // automatically generate the help statement
540  HelpFormatter formatter = new HelpFormatter();
541  formatter.setOptionComparator(null); // get options in the order they are created
542  formatter.printHelp("SQLImporter", options);
543  }
static void com.mapd.utility.SQLImporter.main ( String[]  args)
inlinestatic

Definition at line 333 of file SQLImporter.java.

References run_benchmark_import.args.

333  {
334  SQLImporter sq = new SQLImporter();
335  sq.doWork(args);
336  }
void com.mapd.utility.SQLImporter.resetBinaryColumn ( int  i,
ResultSetMetaData  md,
int  bufferSize,
TColumn  col 
) throws SQLException
inlineprivate

Definition at line 1050 of file SQLImporter.java.

Referenced by com.mapd.utility.SQLImporter.executeQuery().

1051  {
1052  col.nulls.clear();
1053 
1054  switch (md.getColumnType(i)) {
1055  case java.sql.Types.TINYINT:
1056  case java.sql.Types.SMALLINT:
1057  case java.sql.Types.INTEGER:
1058  case java.sql.Types.BIGINT:
1059  case java.sql.Types.TIME:
1060  case java.sql.Types.TIMESTAMP:
1061  case java.sql.Types
1062  .BIT: // deal with postgress treating boolean as bit... this will bite me
1063  case java.sql.Types.BOOLEAN:
1064  case java.sql.Types.DATE:
1065  case java.sql.Types.DECIMAL:
1066  case java.sql.Types.NUMERIC:
1067  col.data.int_col.clear();
1068  break;
1069 
1070  case java.sql.Types.FLOAT:
1071  case java.sql.Types.DOUBLE:
1072  case java.sql.Types.REAL:
1073  col.data.real_col.clear();
1074  break;
1075 
1076  case java.sql.Types.NVARCHAR:
1077  case java.sql.Types.VARCHAR:
1078  case java.sql.Types.NCHAR:
1079  case java.sql.Types.CHAR:
1080  case java.sql.Types.LONGVARCHAR:
1081  case java.sql.Types.LONGNVARCHAR:
1082  case java.sql.Types.OTHER:
1083  col.data.str_col.clear();
1084  break;
1085  default:
1086  throw new AssertionError("Column type " + md.getColumnType(i) + " not Supported");
1087  }
1088  }

+ Here is the caller graph for this function:

void com.mapd.utility.SQLImporter.run_init ( Connection  conn)
inlineprivate

Definition at line 511 of file SQLImporter.java.

References com.mapd.utility.SQLImporter.cmd, and parse_ast.line.

Referenced by com.mapd.utility.SQLImporter.executeQuery().

511  {
512  // attempt to open file
513  String line = "";
514  try {
515  BufferedReader reader =
516  new BufferedReader(new FileReader(cmd.getOptionValue("initializeFile")));
517  Statement stmt = conn.createStatement();
518  while ((line = reader.readLine()) != null) {
519  if (line.isEmpty()) {
520  continue;
521  }
522  LOGGER.info("Running : " + line);
523  stmt.execute(line);
524  }
525  stmt.close();
526  reader.close();
527  } catch (IOException e) {
528  LOGGER.error("Exception occurred trying to read initialize file: "
529  + cmd.getOptionValue("initFile"));
530  exit(1);
531  } catch (SQLException e) {
532  LOGGER.error(
533  "Exception occurred trying to execute initialize file entry : " + line);
534  exit(1);
535  }
536  }
tuple line
Definition: parse_ast.py:10

+ Here is the caller graph for this function:

void com.mapd.utility.SQLImporter.setColValue ( ResultSet  rs,
TColumn  col,
int  columnType,
int  colNum,
int  scale,
String  colTypeName 
) throws SQLException
inlineprivate

Definition at line 924 of file SQLImporter.java.

References heavydb.dtypes.Date, Double, and heavydb.dtypes.Time.

Referenced by com.mapd.utility.SQLImporter.executeQuery().

929  {
930  switch (columnType) {
931  case java.sql.Types
932  .BIT: // deal with postgress treating boolean as bit... this will bite me
933  case java.sql.Types.BOOLEAN:
934  Boolean b = rs.getBoolean(colNum);
935  if (rs.wasNull()) {
936  col.nulls.add(Boolean.TRUE);
937  col.data.int_col.add(0L);
938  } else {
939  col.nulls.add(Boolean.FALSE);
940  col.data.int_col.add(b ? 1L : 0L);
941  }
942  break;
943 
944  case java.sql.Types.DECIMAL:
945  case java.sql.Types.NUMERIC:
946  BigDecimal bd = rs.getBigDecimal(colNum);
947  if (rs.wasNull()) {
948  col.nulls.add(Boolean.TRUE);
949  col.data.int_col.add(0L);
950  } else {
951  col.nulls.add(Boolean.FALSE);
952  col.data.int_col.add(bd.multiply(new BigDecimal(pow(10L, scale))).longValue());
953  }
954  break;
955 
956  case java.sql.Types.TINYINT:
957  case java.sql.Types.SMALLINT:
958  case java.sql.Types.INTEGER:
959  case java.sql.Types.BIGINT:
960  Long l = rs.getLong(colNum);
961  if (rs.wasNull()) {
962  col.nulls.add(Boolean.TRUE);
963  col.data.int_col.add(new Long(0));
964  } else {
965  col.nulls.add(Boolean.FALSE);
966  col.data.int_col.add(l);
967  }
968  break;
969 
970  case java.sql.Types.TIME:
971  Time t = rs.getTime(colNum);
972  if (rs.wasNull()) {
973  col.nulls.add(Boolean.TRUE);
974  col.data.int_col.add(0L);
975 
976  } else {
977  col.data.int_col.add(dateTimeUtils.getSecondsFromMilliseconds(t.getTime()));
978  col.nulls.add(Boolean.FALSE);
979  }
980 
981  break;
982  case java.sql.Types.TIMESTAMP:
983  Timestamp ts = rs.getTimestamp(colNum);
984  if (rs.wasNull()) {
985  col.nulls.add(Boolean.TRUE);
986  col.data.int_col.add(0L);
987 
988  } else {
989  col.data.int_col.add(dateTimeUtils.getSecondsFromMilliseconds(ts.getTime()));
990  col.nulls.add(Boolean.FALSE);
991  }
992 
993  break;
994  case java.sql.Types.DATE:
995  Date d = rs.getDate(colNum);
996  if (rs.wasNull()) {
997  col.nulls.add(Boolean.TRUE);
998  col.data.int_col.add(0L);
999 
1000  } else {
1001  col.data.int_col.add(dateTimeUtils.getSecondsFromMilliseconds(d.getTime()));
1002  col.nulls.add(Boolean.FALSE);
1003  }
1004  break;
1005  case java.sql.Types.FLOAT:
1006  case java.sql.Types.DOUBLE:
1007  case java.sql.Types.REAL:
1008  Double db = rs.getDouble(colNum);
1009  if (rs.wasNull()) {
1010  col.nulls.add(Boolean.TRUE);
1011  col.data.real_col.add(new Double(0));
1012 
1013  } else {
1014  col.nulls.add(Boolean.FALSE);
1015  col.data.real_col.add(db);
1016  }
1017  break;
1018 
1019  case java.sql.Types.NVARCHAR:
1020  case java.sql.Types.VARCHAR:
1021  case java.sql.Types.NCHAR:
1022  case java.sql.Types.CHAR:
1023  case java.sql.Types.LONGVARCHAR:
1024  case java.sql.Types.LONGNVARCHAR:
1025  String strVal = rs.getString(colNum);
1026  if (rs.wasNull()) {
1027  col.nulls.add(Boolean.TRUE);
1028  col.data.str_col.add("");
1029 
1030  } else {
1031  col.data.str_col.add(strVal);
1032  col.nulls.add(Boolean.FALSE);
1033  }
1034  break;
1035  case java.sql.Types.OTHER:
1036  Object objVal = rs.getObject(colNum);
1037  if (rs.wasNull()) {
1038  col.nulls.add(Boolean.TRUE);
1039  col.data.str_col.add("");
1040  } else {
1041  col.data.str_col.add(vendor_types.get_wkt(rs, colNum, colTypeName));
1042  col.nulls.add(Boolean.FALSE);
1043  }
1044  break;
1045  default:
1046  throw new AssertionError("Column type " + columnType + " not Supported");
1047  }
1048  }

+ Here is the caller graph for this function:

TColumn com.mapd.utility.SQLImporter.setupBinaryColumn ( int  i,
ResultSetMetaData  md,
int  bufferSize 
) throws SQLException
inlineprivate

Definition at line 878 of file SQLImporter.java.

Referenced by com.mapd.utility.SQLImporter.executeQuery().

879  {
880  TColumn col = new TColumn();
881 
882  col.nulls = new ArrayList<Boolean>(bufferSize);
883 
884  col.data = new TColumnData();
885 
886  switch (md.getColumnType(i)) {
887  case java.sql.Types.TINYINT:
888  case java.sql.Types.SMALLINT:
889  case java.sql.Types.INTEGER:
890  case java.sql.Types.BIGINT:
891  case java.sql.Types.TIME:
892  case java.sql.Types.TIMESTAMP:
893  case java.sql.Types
894  .BIT: // deal with postgress treating boolean as bit... this will bite me
895  case java.sql.Types.BOOLEAN:
896  case java.sql.Types.DATE:
897  case java.sql.Types.DECIMAL:
898  case java.sql.Types.NUMERIC:
899  col.data.int_col = new ArrayList<Long>(bufferSize);
900  break;
901 
902  case java.sql.Types.FLOAT:
903  case java.sql.Types.DOUBLE:
904  case java.sql.Types.REAL:
905  col.data.real_col = new ArrayList<Double>(bufferSize);
906  break;
907 
908  case java.sql.Types.NVARCHAR:
909  case java.sql.Types.VARCHAR:
910  case java.sql.Types.NCHAR:
911  case java.sql.Types.CHAR:
912  case java.sql.Types.LONGVARCHAR:
913  case java.sql.Types.LONGNVARCHAR:
914  case java.sql.Types.OTHER:
915  col.data.str_col = new ArrayList<String>(bufferSize);
916  break;
917 
918  default:
919  throw new AssertionError("Column type " + md.getColumnType(i) + " not Supported");
920  }
921  return col;
922  }

+ Here is the caller graph for this function:

boolean com.mapd.utility.SQLImporter.tableExists ( String  tName)
inlineprivate

Definition at line 795 of file SQLImporter.java.

References com.mapd.utility.SQLImporter.session.

Referenced by com.mapd.utility.SQLImporter.checkDBTable().

795  {
796  LOGGER.debug("Check for table " + tName);
797  try {
798  List<String> recv_get_tables = client.get_tables(session);
799  for (String s : recv_get_tables) {
800  if (s.equals(tName)) {
801  return true;
802  }
803  }
804  } catch (TDBException ex) {
805  LOGGER.error("Table check failed - " + ex.getError_msg());
806  exit(3);
807  } catch (TException ex) {
808  LOGGER.error("Table check failed - " + ex.toString());
809  exit(3);
810  }
811  return false;
812  }

+ Here is the caller graph for this function:

void com.mapd.utility.SQLImporter.verifyColumnSignaturesMatch ( Connection  otherdb_conn,
List< TColumnType >  dstColumns,
ResultSetMetaData  srcColumns 
) throws SQLException
inlineprivate

Definition at line 564 of file SQLImporter.java.

References com.mapd.utility.SQLImporter.cmd.

Referenced by com.mapd.utility.SQLImporter.checkDBTable().

566  {
567  if (srcColumns.getColumnCount() != dstColumns.size()) {
568  LOGGER.error("Table sizes do not match: Destination " + dstColumns.size()
569  + " versus Source " + srcColumns.getColumnCount());
570  exit(1);
571  }
572  for (int i = 1; i <= dstColumns.size(); ++i) {
573  if (!dstColumns.get(i - 1).getCol_name().equalsIgnoreCase(
574  srcColumns.getColumnName(i))) {
575  LOGGER.error(
576  "Destination table does not have matching column in same order for column number "
577  + i + " destination column name is " + dstColumns.get(i - 1).col_name
578  + " versus target column " + srcColumns.getColumnName(i));
579  exit(1);
580  }
581  TDatumType dstType = dstColumns.get(i - 1).getCol_type().getType();
582  int dstPrecision = dstColumns.get(i - 1).getCol_type().getPrecision();
583  int dstScale = dstColumns.get(i - 1).getCol_type().getScale();
584  int srcType = srcColumns.getColumnType(i);
585  int srcPrecision = srcColumns.getPrecision(i);
586  int srcScale = srcColumns.getScale(i);
587 
588  boolean match = false;
589  switch (srcType) {
590  case java.sql.Types.TINYINT:
591  match |= dstType == TDatumType.TINYINT;
592  // NOTE: it's okay to import smaller type to a bigger one,
593  // so we just fall through and try to match the next type.
594  // But the order of case statements is important here!
595  case java.sql.Types.SMALLINT:
596  match |= dstType == TDatumType.SMALLINT;
597  case java.sql.Types.INTEGER:
598  match |= dstType == TDatumType.INT;
599  case java.sql.Types.BIGINT:
600  match |= dstType == TDatumType.BIGINT;
601  if (cmd.hasOption("AllowIntegerNarrowing")) {
602  match |= dstType == TDatumType.TINYINT || dstType == TDatumType.SMALLINT
603  || dstType == TDatumType.INT;
604  }
605  break;
606  case java.sql.Types.DECIMAL:
607  case java.sql.Types.NUMERIC:
608  match = dstType == TDatumType.DECIMAL && dstPrecision == srcPrecision
609  && dstScale == srcScale;
610  break;
611  case java.sql.Types.FLOAT:
612  case java.sql.Types.REAL:
613  match |= dstType == TDatumType.FLOAT;
614  // Fall through and try double
615  case java.sql.Types.DOUBLE:
616  match |= dstType == TDatumType.DOUBLE;
617  if (cmd.hasOption("AllowDoubleToFloat")) {
618  match |= dstType == TDatumType.FLOAT;
619  }
620  break;
621  case java.sql.Types.TIME:
622  match = dstType == TDatumType.TIME;
623  break;
624  case java.sql.Types.TIMESTAMP:
625  match = dstType == TDatumType.TIMESTAMP;
626  break;
627  case java.sql.Types.DATE:
628  match = dstType == TDatumType.DATE;
629  break;
630  case java.sql.Types.BOOLEAN:
631  case java.sql.Types
632  .BIT: // deal with postgres treating boolean as bit... this will bite me
633  match = dstType == TDatumType.BOOL;
634  break;
635  case java.sql.Types.NVARCHAR:
636  case java.sql.Types.VARCHAR:
637  case java.sql.Types.NCHAR:
638  case java.sql.Types.CHAR:
639  case java.sql.Types.LONGVARCHAR:
640  case java.sql.Types.LONGNVARCHAR:
641  match = (dstType == TDatumType.STR || dstType == TDatumType.POINT
642  || dstType == TDatumType.POLYGON || dstType == TDatumType.MULTIPOLYGON
643  || dstType == TDatumType.LINESTRING);
644  break;
645  case java.sql.Types.OTHER:
646  // NOTE: I ignore subtypes (geography vs geopetry vs none) here just because
647  // it makes no difference for OmniSciDB at the moment
648  Db_vendor_types.GisType gisType =
649  vendor_types.find_gis_type(otherdb_conn, srcColumns, i);
650  if (gisType.srid != dstScale) {
651  match = false;
652  break;
653  }
654  switch (dstType) {
655  case POINT:
656  match = gisType.type.equalsIgnoreCase("POINT");
657  break;
658  case LINESTRING:
659  match = gisType.type.equalsIgnoreCase("LINESTRING");
660  break;
661  case POLYGON:
662  match = gisType.type.equalsIgnoreCase("POLYGON");
663  break;
664  case MULTIPOLYGON:
665  match = gisType.type.equalsIgnoreCase("MULTIPOLYGON");
666  break;
667  default:
668  LOGGER.error("Column type " + JDBCType.valueOf(srcType).getName()
669  + " not Supported");
670  exit(1);
671  }
672  break;
673  default:
674  LOGGER.error("Column type " + JDBCType.valueOf(srcType).getName()
675  + " not Supported");
676  exit(1);
677  }
678  if (!match) {
679  LOGGER.error("Source and destination types for column "
680  + srcColumns.getColumnName(i)
681  + " do not match. Please make sure that type, precision and scale are exactly the same");
682  exit(1);
683  }
684  }
685  }

+ Here is the caller graph for this function:

Member Data Documentation

Heavy.Client com.mapd.utility.SQLImporter.client = null
protected

Definition at line 324 of file SQLImporter.java.

Referenced by com.mapd.utility.SQLImporter.createDBConnection().

DateTimeUtils com.mapd.utility.SQLImporter.dateTimeUtils
private
Initial value:
= (milliseconds) -> {
return milliseconds / 1000;
}

Definition at line 327 of file SQLImporter.java.

final Logger com.mapd.utility.SQLImporter.LOGGER = LoggerFactory.getLogger(SQLImporter.class)
staticpackage

Definition at line 326 of file SQLImporter.java.

String com.mapd.utility.SQLImporter.session = null
protected

Definition at line 323 of file SQLImporter.java.

Referenced by com.mapd.utility.SQLImporter.createDBConnection(), com.mapd.utility.SQLImporter.executeDBCommand(), com.mapd.utility.SQLImporter.executeQuery(), com.mapd.utility.SQLImporter.getColumnInfo(), heavydb.thrift.Heavy.disconnect_args.read(), heavydb.thrift.Heavy.switch_database_args.read(), heavydb.thrift.Heavy.clone_session_args.read(), heavydb.thrift.Heavy.get_server_status_args.read(), heavydb.thrift.Heavy.get_status_args.read(), heavydb.thrift.Heavy.get_hardware_info_args.read(), heavydb.thrift.Heavy.get_tables_args.read(), heavydb.thrift.Heavy.get_tables_for_database_args.read(), heavydb.thrift.Heavy.get_physical_tables_args.read(), heavydb.thrift.Heavy.get_views_args.read(), heavydb.thrift.Heavy.get_tables_meta_args.read(), heavydb.thrift.Heavy.get_table_details_args.read(), heavydb.thrift.Heavy.get_table_details_for_database_args.read(), heavydb.thrift.Heavy.get_internal_table_details_args.read(), heavydb.thrift.Heavy.get_internal_table_details_for_database_args.read(), heavydb.thrift.Heavy.get_users_args.read(), heavydb.thrift.Heavy.get_databases_args.read(), heavydb.thrift.Heavy.start_heap_profile_args.read(), heavydb.thrift.Heavy.stop_heap_profile_args.read(), heavydb.thrift.Heavy.get_heap_profile_args.read(), heavydb.thrift.Heavy.get_memory_args.read(), heavydb.thrift.Heavy.clear_cpu_memory_args.read(), heavydb.thrift.Heavy.clear_gpu_memory_args.read(), heavydb.thrift.Heavy.set_table_epoch_args.read(), heavydb.thrift.Heavy.set_table_epoch_by_name_args.read(), heavydb.thrift.Heavy.get_table_epoch_args.read(), heavydb.thrift.Heavy.get_table_epoch_by_name_args.read(), heavydb.thrift.Heavy.get_table_epochs_args.read(), heavydb.thrift.Heavy.set_table_epochs_args.read(), heavydb.thrift.Heavy.get_session_info_args.read(), heavydb.thrift.Heavy.get_queries_info_args.read(), heavydb.thrift.Heavy.set_leaf_info_args.read(), heavydb.thrift.Heavy.sql_execute_args.read(), heavydb.thrift.Heavy.sql_execute_df_args.read(), heavydb.thrift.Heavy.sql_execute_gdf_args.read(), heavydb.thrift.Heavy.deallocate_df_args.read(), heavydb.thrift.Heavy.sql_validate_args.read(), heavydb.thrift.Heavy.get_completion_hints_args.read(), heavydb.thrift.Heavy.set_execution_mode_args.read(), heavydb.thrift.Heavy.render_vega_args.read(), heavydb.thrift.Heavy.get_result_row_for_pixel_args.read(), heavydb.thrift.Heavy.create_custom_expression_args.read(), heavydb.thrift.Heavy.get_custom_expressions_args.read(), heavydb.thrift.Heavy.update_custom_expression_args.read(), heavydb.thrift.Heavy.delete_custom_expressions_args.read(), heavydb.thrift.Heavy.get_dashboard_args.read(), heavydb.thrift.Heavy.get_dashboards_args.read(), heavydb.thrift.Heavy.create_dashboard_args.read(), heavydb.thrift.Heavy.replace_dashboard_args.read(), heavydb.thrift.Heavy.delete_dashboard_args.read(), heavydb.thrift.Heavy.share_dashboards_args.read(), heavydb.thrift.Heavy.delete_dashboards_args.read(), heavydb.thrift.Heavy.share_dashboard_args.read(), heavydb.thrift.Heavy.unshare_dashboard_args.read(), heavydb.thrift.Heavy.unshare_dashboards_args.read(), heavydb.thrift.Heavy.get_dashboard_grantees_args.read(), heavydb.thrift.Heavy.get_link_view_args.read(), heavydb.thrift.Heavy.create_link_args.read(), heavydb.thrift.Heavy.load_table_binary_args.read(), heavydb.thrift.Heavy.load_table_binary_columnar_args.read(), heavydb.thrift.Heavy.load_table_binary_columnar_polys_args.read(), heavydb.thrift.Heavy.load_table_binary_arrow_args.read(), heavydb.thrift.Heavy.load_table_args.read(), heavydb.thrift.Heavy.detect_column_types_args.read(), heavydb.thrift.Heavy.create_table_args.read(), heavydb.thrift.Heavy.import_table_args.read(), heavydb.thrift.Heavy.import_geo_table_args.read(), heavydb.thrift.Heavy.import_table_status_args.read(), heavydb.thrift.Heavy.get_first_geo_file_in_archive_args.read(), heavydb.thrift.Heavy.get_all_files_in_archive_args.read(), heavydb.thrift.Heavy.get_layers_in_geo_file_args.read(), heavydb.thrift.Heavy.query_get_outer_fragment_count_args.read(), heavydb.thrift.Heavy.check_table_consistency_args.read(), heavydb.thrift.Heavy.start_render_query_args.read(), heavydb.thrift.Heavy.insert_data_args.read(), heavydb.thrift.Heavy.insert_chunks_args.read(), heavydb.thrift.Heavy.checkpoint_args.read(), heavydb.thrift.Heavy.get_roles_args.read(), heavydb.thrift.Heavy.get_db_objects_for_grantee_args.read(), heavydb.thrift.Heavy.get_db_object_privs_args.read(), heavydb.thrift.Heavy.get_all_roles_for_user_args.read(), heavydb.thrift.Heavy.get_all_effective_roles_for_user_args.read(), heavydb.thrift.Heavy.has_role_args.read(), heavydb.thrift.Heavy.has_object_privilege_args.read(), heavydb.thrift.Heavy.set_license_key_args.read(), heavydb.thrift.Heavy.get_license_claims_args.read(), heavydb.thrift.Heavy.get_device_parameters_args.read(), heavydb.thrift.Heavy.register_runtime_extension_functions_args.read(), heavydb.thrift.Heavy.get_table_function_names_args.read(), heavydb.thrift.Heavy.get_runtime_table_function_names_args.read(), heavydb.thrift.Heavy.get_table_function_details_args.read(), com.mapd.utility.SQLImporter.tableExists(), heavydb.thrift.Heavy.disconnect_args.write(), heavydb.thrift.Heavy.switch_database_args.write(), heavydb.thrift.Heavy.clone_session_args.write(), heavydb.thrift.Heavy.get_server_status_args.write(), heavydb.thrift.Heavy.get_status_args.write(), heavydb.thrift.Heavy.get_hardware_info_args.write(), heavydb.thrift.Heavy.get_tables_args.write(), heavydb.thrift.Heavy.get_tables_for_database_args.write(), heavydb.thrift.Heavy.get_physical_tables_args.write(), heavydb.thrift.Heavy.get_views_args.write(), heavydb.thrift.Heavy.get_tables_meta_args.write(), heavydb.thrift.Heavy.get_table_details_args.write(), heavydb.thrift.Heavy.get_table_details_for_database_args.write(), heavydb.thrift.Heavy.get_internal_table_details_args.write(), heavydb.thrift.Heavy.get_internal_table_details_for_database_args.write(), heavydb.thrift.Heavy.get_users_args.write(), heavydb.thrift.Heavy.get_databases_args.write(), heavydb.thrift.Heavy.start_heap_profile_args.write(), heavydb.thrift.Heavy.stop_heap_profile_args.write(), heavydb.thrift.Heavy.get_heap_profile_args.write(), heavydb.thrift.Heavy.get_memory_args.write(), heavydb.thrift.Heavy.clear_cpu_memory_args.write(), heavydb.thrift.Heavy.clear_gpu_memory_args.write(), heavydb.thrift.Heavy.set_table_epoch_args.write(), heavydb.thrift.Heavy.set_table_epoch_by_name_args.write(), heavydb.thrift.Heavy.get_table_epoch_args.write(), heavydb.thrift.Heavy.get_table_epoch_by_name_args.write(), heavydb.thrift.Heavy.get_table_epochs_args.write(), heavydb.thrift.Heavy.set_table_epochs_args.write(), heavydb.thrift.Heavy.get_session_info_args.write(), heavydb.thrift.Heavy.get_queries_info_args.write(), heavydb.thrift.Heavy.set_leaf_info_args.write(), heavydb.thrift.Heavy.sql_execute_args.write(), heavydb.thrift.Heavy.sql_execute_df_args.write(), heavydb.thrift.Heavy.sql_execute_gdf_args.write(), heavydb.thrift.Heavy.deallocate_df_args.write(), heavydb.thrift.Heavy.sql_validate_args.write(), heavydb.thrift.Heavy.get_completion_hints_args.write(), heavydb.thrift.Heavy.set_execution_mode_args.write(), heavydb.thrift.Heavy.render_vega_args.write(), heavydb.thrift.Heavy.get_result_row_for_pixel_args.write(), heavydb.thrift.Heavy.create_custom_expression_args.write(), heavydb.thrift.Heavy.get_custom_expressions_args.write(), heavydb.thrift.Heavy.update_custom_expression_args.write(), heavydb.thrift.Heavy.delete_custom_expressions_args.write(), heavydb.thrift.Heavy.get_dashboard_args.write(), heavydb.thrift.Heavy.get_dashboards_args.write(), heavydb.thrift.Heavy.create_dashboard_args.write(), heavydb.thrift.Heavy.replace_dashboard_args.write(), heavydb.thrift.Heavy.delete_dashboard_args.write(), heavydb.thrift.Heavy.share_dashboards_args.write(), heavydb.thrift.Heavy.delete_dashboards_args.write(), heavydb.thrift.Heavy.share_dashboard_args.write(), heavydb.thrift.Heavy.unshare_dashboard_args.write(), heavydb.thrift.Heavy.unshare_dashboards_args.write(), heavydb.thrift.Heavy.get_dashboard_grantees_args.write(), heavydb.thrift.Heavy.get_link_view_args.write(), heavydb.thrift.Heavy.create_link_args.write(), heavydb.thrift.Heavy.load_table_binary_args.write(), heavydb.thrift.Heavy.load_table_binary_columnar_args.write(), heavydb.thrift.Heavy.load_table_binary_columnar_polys_args.write(), heavydb.thrift.Heavy.load_table_binary_arrow_args.write(), heavydb.thrift.Heavy.load_table_args.write(), heavydb.thrift.Heavy.detect_column_types_args.write(), heavydb.thrift.Heavy.create_table_args.write(), heavydb.thrift.Heavy.import_table_args.write(), heavydb.thrift.Heavy.import_geo_table_args.write(), heavydb.thrift.Heavy.import_table_status_args.write(), heavydb.thrift.Heavy.get_first_geo_file_in_archive_args.write(), heavydb.thrift.Heavy.get_all_files_in_archive_args.write(), heavydb.thrift.Heavy.get_layers_in_geo_file_args.write(), heavydb.thrift.Heavy.query_get_outer_fragment_count_args.write(), heavydb.thrift.Heavy.check_table_consistency_args.write(), heavydb.thrift.Heavy.start_render_query_args.write(), heavydb.thrift.Heavy.insert_data_args.write(), heavydb.thrift.Heavy.insert_chunks_args.write(), heavydb.thrift.Heavy.checkpoint_args.write(), heavydb.thrift.Heavy.get_roles_args.write(), heavydb.thrift.Heavy.get_db_objects_for_grantee_args.write(), heavydb.thrift.Heavy.get_db_object_privs_args.write(), heavydb.thrift.Heavy.get_all_roles_for_user_args.write(), heavydb.thrift.Heavy.get_all_effective_roles_for_user_args.write(), heavydb.thrift.Heavy.has_role_args.write(), heavydb.thrift.Heavy.has_object_privilege_args.write(), heavydb.thrift.Heavy.set_license_key_args.write(), heavydb.thrift.Heavy.get_license_claims_args.write(), heavydb.thrift.Heavy.get_device_parameters_args.write(), heavydb.thrift.Heavy.register_runtime_extension_functions_args.write(), heavydb.thrift.Heavy.get_table_function_names_args.write(), heavydb.thrift.Heavy.get_runtime_table_function_names_args.write(), and heavydb.thrift.Heavy.get_table_function_details_args.write().

Db_vendor_types com.mapd.utility.SQLImporter.vendor_types = null
package

Definition at line 331 of file SQLImporter.java.

Referenced by com.mapd.utility.SQLImporter.executeQuery().


The documentation for this class was generated from the following file: