OmniSciDB  343343d194
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
com.mapd.utility.SQLImporter Class Reference
+ Collaboration diagram for com.mapd.utility.SQLImporter:

Static Public Member Functions

static void main (String[] args)
 

Protected Attributes

String session = null
 
MapD.Client client = null
 

Package Functions

void doWork (String[] args)
 
void executeQuery ()
 

Package Attributes

Db_vendor_types vendor_types = null
 

Static Package Attributes

static final Logger LOGGER = LoggerFactory.getLogger(SQLImporter.class)
 

Private Member Functions

void run_init (Connection conn)
 
void help (Options options)
 
void checkMapDTable (Connection otherdb_conn, ResultSetMetaData md) throws SQLException
 
void createMapDTable (Connection otherdb_conn, ResultSetMetaData metaData)
 
void createMapDConnection ()
 
List< TColumnType > getColumnInfo (String tName)
 
boolean tableExists (String tName)
 
void executeMapDCommand (String sql)
 
String getColType (int cType, int precision, int scale)
 
TColumn setupBinaryColumn (int i, ResultSetMetaData md, int bufferSize) throws SQLException
 
void setColValue (ResultSet rs, TColumn col, int columnType, int colNum, int scale, String colTypeName) throws SQLException
 
void resetBinaryColumn (int i, ResultSetMetaData md, int bufferSize, TColumn col) throws SQLException
 

Private Attributes

CommandLine cmd = null
 
DateTimeUtils dateTimeUtils
 

Detailed Description

Definition at line 307 of file SQLImporter.java.

Member Function Documentation

void com.mapd.utility.SQLImporter.checkMapDTable ( Connection  otherdb_conn,
ResultSetMetaData  md 
) throws SQLException
inlineprivate

Definition at line 499 of file SQLImporter.java.

References com.mapd.utility.SQLImporter.cmd, com.mapd.utility.SQLImporter.createMapDConnection(), com.mapd.utility.SQLImporter.createMapDTable(), com.mapd.utility.SQLImporter.executeMapDCommand(), com.mapd.utility.SQLImporter.getColumnInfo(), Experimental.String, and com.mapd.utility.SQLImporter.tableExists().

Referenced by com.mapd.utility.SQLImporter.executeQuery().

500  {
502  String tName = cmd.getOptionValue("targetTable");
503 
504  if (tableExists(tName)) {
505  // check if we want to truncate
506  if (cmd.hasOption("truncate")) {
507  executeMapDCommand("Drop table " + tName);
508  createMapDTable(otherdb_conn, md);
509  } else {
510  List<TColumnType> columnInfo = getColumnInfo(tName);
511  // table exists lets check it has same number of columns
512 
513  if (md.getColumnCount() != columnInfo.size()) {
514  LOGGER.error("Table sizes do not match - OmniSci " + columnInfo.size()
515  + " versus Select " + md.getColumnCount());
516  exit(1);
517  }
518  // table exists lets check it is same layout - check names will do for now
519  // Note weird start from 1 and reduce index by one is due to sql metatdata
520  // beinging with 1 not 0
521  for (int colNum = 1; colNum <= columnInfo.size(); colNum++) {
522  if (!columnInfo.get(colNum - 1)
523  .col_name.equalsIgnoreCase(md.getColumnName(colNum))) {
524  LOGGER.error(
525  "OmniSci Table does not have matching column in same order for column number"
526  + colNum + " OmniSci column name is "
527  + columnInfo.get(colNum - 1).col_name + " versus Select "
528  + md.getColumnName(colNum));
529  exit(1);
530  }
531  }
532  }
533  } else {
534  createMapDTable(otherdb_conn, md);
535  }
536  }
List< TColumnType > getColumnInfo(String tName)
void createMapDTable(Connection otherdb_conn, ResultSetMetaData metaData)
boolean tableExists(String tName)
void executeMapDCommand(String sql)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

void com.mapd.utility.SQLImporter.createMapDConnection ( )
inlineprivate

Definition at line 580 of file SQLImporter.java.

References com.mapd.utility.SQLImporter.client, com.mapd.utility.SQLImporter.cmd, com.mapd.utility.SQLImporter.session, and Experimental.String.

Referenced by com.mapd.utility.SQLImporter.checkMapDTable().

580  {
581  TTransport transport = null;
582  TProtocol protocol = new TBinaryProtocol(transport);
583  int port = Integer.valueOf(cmd.getOptionValue("port", "6274"));
584  String server = cmd.getOptionValue("server", "localhost");
585  try {
586  // Uses default certificate stores.
587  boolean load_trust_store = cmd.hasOption("https");
588  SockTransportProperties skT = null;
589  if (cmd.hasOption("https")) {
590  skT = new SockTransportProperties(load_trust_store & !cmd.hasOption("insecure"));
591  transport = skT.openHttpsClientTransport(server, port);
592  transport.open();
593  protocol = new TJSONProtocol(transport);
594  } else if (cmd.hasOption("http")) {
595  skT = new SockTransportProperties(load_trust_store);
596  transport = skT.openHttpClientTransport(server, port);
597  protocol = new TJSONProtocol(transport);
598  } else {
599  skT = new SockTransportProperties(load_trust_store);
600  transport = skT.openClientTransport(server, port);
601  transport.open();
602  protocol = new TBinaryProtocol(transport);
603  }
604 
605  client = new MapD.Client(protocol);
606  // This if will be useless until PKI signon
607  if (cmd.hasOption("user")) {
608  session = client.connect(cmd.getOptionValue("user", "admin"),
609  cmd.getOptionValue("passwd", "HyperInteractive"),
610  cmd.getOptionValue("database", "omnisci"));
611  }
612  LOGGER.debug("Connected session is " + session);
613 
614  } catch (TTransportException ex) {
615  LOGGER.error("Connection failed - " + ex.toString());
616  exit(1);
617  } catch (TMapDException ex) {
618  LOGGER.error("Connection failed - " + ex.toString());
619  exit(2);
620  } catch (TException ex) {
621  LOGGER.error("Connection failed - " + ex.toString());
622  exit(3);
623  } catch (Exception ex) {
624  LOGGER.error("General exception - " + ex.toString());
625  exit(4);
626  }
627  }

+ Here is the caller graph for this function:

void com.mapd.utility.SQLImporter.createMapDTable ( Connection  otherdb_conn,
ResultSetMetaData  metaData 
)
inlineprivate

Definition at line 538 of file SQLImporter.java.

References File_Namespace.append(), com.mapd.utility.SQLImporter.cmd, com.mapd.utility.SQLImporter.executeMapDCommand(), com.mapd.utility.SQLImporter.getColType(), and Integer.

Referenced by com.mapd.utility.SQLImporter.checkMapDTable().

538  {
539  StringBuilder sb = new StringBuilder();
540  sb.append("Create table ").append(cmd.getOptionValue("targetTable")).append("(");
541 
542  // Now iterate the metadata
543  try {
544  for (int i = 1; i <= metaData.getColumnCount(); i++) {
545  if (i > 1) {
546  sb.append(",");
547  }
548  LOGGER.debug("Column name is " + metaData.getColumnName(i));
549  LOGGER.debug("Column type is " + metaData.getColumnTypeName(i));
550  LOGGER.debug("Column type is " + metaData.getColumnType(i));
551 
552  sb.append(metaData.getColumnName(i)).append(" ");
553  int col_type = metaData.getColumnType(i);
554  if (col_type == java.sql.Types.OTHER) {
555  sb.append(vendor_types.find_gis_type(otherdb_conn,
556  metaData.getColumnName(i),
557  metaData.getColumnTypeName(i)));
558  } else {
559  sb.append(getColType(metaData.getColumnType(i),
560  metaData.getPrecision(i),
561  metaData.getScale(i)));
562  }
563  }
564  sb.append(")");
565 
566  if (Integer.valueOf(cmd.getOptionValue("fragmentSize", "0")) > 0) {
567  sb.append(" with (fragment_size = ");
568  sb.append(cmd.getOptionValue("fragmentSize", "0"));
569  sb.append(")");
570  }
571 
572  } catch (SQLException ex) {
573  LOGGER.error("Error processing the metadata - " + ex.toString());
574  exit(1);
575  }
576 
577  executeMapDCommand(sb.toString());
578  }
String getColType(int cType, int precision, int scale)
size_t append(FILE *f, const size_t size, int8_t *buf)
Appends the specified number of bytes to the end of the file f from buf.
Definition: File.cpp:136
void executeMapDCommand(String sql)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

void com.mapd.utility.SQLImporter.doWork ( String[]  args)
inlinepackage

Definition at line 323 of file SQLImporter.java.

References run-benchmark-import.args, com.mapd.utility.SQLImporter.cmd, and com.mapd.utility.SQLImporter.executeQuery().

323  {
324  // create Options object
325 
326  SQLImporter_args s_args = new SQLImporter_args();
327 
328  try {
329  cmd = s_args.parse(args);
330  } catch (ParseException ex) {
331  LOGGER.error(ex.getLocalizedMessage());
332  s_args.printHelpMessage();
333  exit(0);
334  }
335  executeQuery();
336  }

+ Here is the call graph for this function:

void com.mapd.utility.SQLImporter.executeMapDCommand ( String  sql)
inlineprivate

Definition at line 664 of file SQLImporter.java.

References com.mapd.utility.SQLImporter.session, and sql().

Referenced by com.mapd.utility.SQLImporter.checkMapDTable(), and com.mapd.utility.SQLImporter.createMapDTable().

664  {
665  LOGGER.info(" run comamnd :" + sql);
666 
667  try {
668  TQueryResult sqlResult = client.sql_execute(session, sql + ";", true, null, -1, -1);
669  } catch (TMapDException ex) {
670  LOGGER.error("SQL Execute failed - " + ex.toString());
671  exit(1);
672  } catch (TException ex) {
673  LOGGER.error("SQL Execute failed - " + ex.toString());
674  exit(1);
675  }
676  }
auto sql(std::string_view sql_stmts)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

void com.mapd.utility.SQLImporter.executeQuery ( )
inlinepackage

Definition at line 338 of file SQLImporter.java.

References com.mapd.utility.SQLImporter.checkMapDTable(), com.mapd.utility.SQLImporter.cmd, com.mapd.utility.SQLImporter.resetBinaryColumn(), com.mapd.utility.SQLImporter.run_init(), com.mapd.utility.SQLImporter.session, com.mapd.utility.SQLImporter.setColValue(), com.mapd.utility.SQLImporter.setupBinaryColumn(), and com.mapd.utility.SQLImporter.vendor_types.

Referenced by com.mapd.utility.SQLImporter.doWork().

338  {
339  Connection conn = null;
340  Statement stmt = null;
341 
342  long totalTime = 0;
343 
344  try {
345  // Open a connection
346  LOGGER.info("Connecting to database url :" + cmd.getOptionValue("jdbcConnect"));
347  conn = DriverManager.getConnection(cmd.getOptionValue("jdbcConnect"),
348  cmd.getOptionValue("sourceUser"),
349  cmd.getOptionValue("sourcePasswd"));
350  vendor_types = Db_vendor_types.Db_vendor_factory(cmd.getOptionValue("jdbcConnect"));
351  long startTime = System.currentTimeMillis();
352 
353  // run init file script on targe DB if present
354  if (cmd.hasOption("initializeFile")) {
355  run_init(conn);
356  }
357 
358  // set autocommit off to allow postgress to not load all results
359  try {
360  conn.setAutoCommit(false);
361  } catch (SQLException se) {
362  LOGGER.warn(
363  "SQLException when attempting to setAutoCommit to false, jdbc driver probably doesnt support it. Error is "
364  + se.toString());
365  }
366 
367  // Execute a query
368  stmt = conn.createStatement();
369 
370  int bufferSize = Integer.valueOf(cmd.getOptionValue("bufferSize", "10000"));
371  // set the jdbc fetch buffer size to reduce the amount of records being moved to
372  // java from postgress
373  stmt.setFetchSize(bufferSize);
374  long timer;
375 
376  ResultSet rs = stmt.executeQuery(cmd.getOptionValue("sqlStmt"));
377 
378  // check if table already exists and is compatible in OmniSci with the query
379  // metadata
380  ResultSetMetaData md = rs.getMetaData();
381  checkMapDTable(conn, md);
382 
383  timer = System.currentTimeMillis();
384 
385  long resultCount = 0;
386  int bufferCount = 0;
387  long total = 0;
388 
389  List<TColumn> cols = new ArrayList(md.getColumnCount());
390  for (int i = 1; i <= md.getColumnCount(); i++) {
391  TColumn col = setupBinaryColumn(i, md, bufferSize);
392  cols.add(col);
393  }
394 
395  // read data from old DB
396  while (rs.next()) {
397  for (int i = 1; i <= md.getColumnCount(); i++) {
398  setColValue(rs,
399  cols.get(i - 1),
400  md.getColumnType(i),
401  i,
402  md.getScale(i),
403  md.getColumnTypeName(i));
404  }
405  resultCount++;
406  bufferCount++;
407  if (bufferCount == bufferSize) {
408  bufferCount = 0;
409  // send the buffer to mapD
410  client.load_table_binary_columnar(
411  session, cmd.getOptionValue("targetTable"), cols); // old
412  // recreate columnar store for use
413  for (int i = 1; i <= md.getColumnCount(); i++) {
414  resetBinaryColumn(i, md, bufferSize, cols.get(i - 1));
415  }
416 
417  if (resultCount % 100000 == 0) {
418  LOGGER.info("Imported " + resultCount + " records");
419  }
420  }
421  }
422  if (bufferCount > 0) {
423  // send the LAST buffer to mapD
424  client.load_table_binary_columnar(
425  session, cmd.getOptionValue("targetTable"), cols);
426  bufferCount = 0;
427  }
428  LOGGER.info("result set count is " + resultCount + " read time is "
429  + (System.currentTimeMillis() - timer) + "ms");
430 
431  // Clean-up environment
432  rs.close();
433  stmt.close();
434 
435  totalTime = System.currentTimeMillis() - startTime;
436  conn.close();
437  } catch (SQLException se) {
438  LOGGER.error("SQLException - " + se.toString());
439  se.printStackTrace();
440  } catch (TMapDException ex) {
441  LOGGER.error("TMapDException - " + ex.toString());
442  ex.printStackTrace();
443  } catch (TException ex) {
444  LOGGER.error("TException failed - " + ex.toString());
445  ex.printStackTrace();
446  } finally {
447  // finally block used to close resources
448  try {
449  if (stmt != null) {
450  stmt.close();
451  }
452  } catch (SQLException se2) {
453  } // nothing we can do
454  try {
455  if (conn != null) {
456  conn.close();
457  }
458  } catch (SQLException se) {
459  LOGGER.error("SQlException in close - " + se.toString());
460  se.printStackTrace();
461  } // end finally try
462  } // end try
463  }
void resetBinaryColumn(int i, ResultSetMetaData md, int bufferSize, TColumn col)
void checkMapDTable(Connection otherdb_conn, ResultSetMetaData md)
void setColValue(ResultSet rs, TColumn col, int columnType, int colNum, int scale, String colTypeName)
TColumn setupBinaryColumn(int i, ResultSetMetaData md, int bufferSize)
void run_init(Connection conn)

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

String com.mapd.utility.SQLImporter.getColType ( int  cType,
int  precision,
int  scale 
)
inlineprivate

Definition at line 678 of file SQLImporter.java.

Referenced by com.mapd.utility.SQLImporter.createMapDTable().

678  {
679  // Note - if cType is OTHER a earlier call will have been made
680  // to try and work out the db vendors specific type.
681  if (precision > 19) {
682  precision = 19;
683  }
684  if (scale > 19) {
685  scale = 18;
686  }
687  switch (cType) {
688  case java.sql.Types.TINYINT:
689  return ("TINYINT");
690  case java.sql.Types.SMALLINT:
691  return ("SMALLINT");
692  case java.sql.Types.INTEGER:
693  return ("INTEGER");
694  case java.sql.Types.BIGINT:
695  return ("BIGINT");
696  case java.sql.Types.FLOAT:
697  return ("FLOAT");
698  case java.sql.Types.DECIMAL:
699  return ("DECIMAL(" + precision + "," + scale + ")");
700  case java.sql.Types.DOUBLE:
701  return ("DOUBLE");
702  case java.sql.Types.REAL:
703  return ("REAL");
704  case java.sql.Types.NUMERIC:
705  return ("NUMERIC(" + precision + "," + scale + ")");
706  case java.sql.Types.TIME:
707  return ("TIME");
708  case java.sql.Types.TIMESTAMP:
709  return ("TIMESTAMP");
710  case java.sql.Types.DATE:
711  return ("DATE");
712  case java.sql.Types.BOOLEAN:
713  case java.sql.Types
714  .BIT: // deal with postgress treating boolean as bit... this will bite me
715  return ("BOOLEAN");
716  case java.sql.Types.NVARCHAR:
717  case java.sql.Types.VARCHAR:
718  case java.sql.Types.NCHAR:
719  case java.sql.Types.CHAR:
720  case java.sql.Types.LONGVARCHAR:
721  case java.sql.Types.LONGNVARCHAR:
722  return ("TEXT ENCODING DICT");
723  default:
724  throw new AssertionError("Column type " + cType + " not Supported");
725  }
726  }

+ Here is the caller graph for this function:

List<TColumnType> com.mapd.utility.SQLImporter.getColumnInfo ( String  tName)
inlineprivate

Definition at line 629 of file SQLImporter.java.

References com.mapd.utility.SQLImporter.session.

Referenced by com.mapd.utility.SQLImporter.checkMapDTable().

629  {
630  LOGGER.debug("Getting columns for " + tName);
631  List<TColumnType> row_descriptor = null;
632  try {
633  TTableDetails table_details = client.get_table_details(session, tName);
634  row_descriptor = table_details.row_desc;
635  } catch (TMapDException ex) {
636  LOGGER.error("column check failed - " + ex.toString());
637  exit(3);
638  } catch (TException ex) {
639  LOGGER.error("column check failed - " + ex.toString());
640  exit(3);
641  }
642  return row_descriptor;
643  }

+ Here is the caller graph for this function:

void com.mapd.utility.SQLImporter.help ( Options  options)
inlineprivate

Definition at line 492 of file SQLImporter.java.

492  {
493  // automatically generate the help statement
494  HelpFormatter formatter = new HelpFormatter();
495  formatter.setOptionComparator(null); // get options in the order they are created
496  formatter.printHelp("SQLImporter", options);
497  }
static void com.mapd.utility.SQLImporter.main ( String[]  args)
inlinestatic

Definition at line 318 of file SQLImporter.java.

References run-benchmark-import.args.

318  {
319  SQLImporter sq = new SQLImporter();
320  sq.doWork(args);
321  }
void com.mapd.utility.SQLImporter.resetBinaryColumn ( int  i,
ResultSetMetaData  md,
int  bufferSize,
TColumn  col 
) throws SQLException
inlineprivate

Definition at line 899 of file SQLImporter.java.

Referenced by com.mapd.utility.SQLImporter.executeQuery().

900  {
901  col.nulls.clear();
902 
903  switch (md.getColumnType(i)) {
904  case java.sql.Types.TINYINT:
905  case java.sql.Types.SMALLINT:
906  case java.sql.Types.INTEGER:
907  case java.sql.Types.BIGINT:
908  case java.sql.Types.TIME:
909  case java.sql.Types.TIMESTAMP:
910  case java.sql.Types
911  .BIT: // deal with postgress treating boolean as bit... this will bite me
912  case java.sql.Types.BOOLEAN:
913  case java.sql.Types.DATE:
914  case java.sql.Types.DECIMAL:
915  case java.sql.Types.NUMERIC:
916  col.data.int_col.clear();
917  break;
918 
919  case java.sql.Types.FLOAT:
920  case java.sql.Types.DOUBLE:
921  case java.sql.Types.REAL:
922  col.data.real_col.clear();
923  break;
924 
925  case java.sql.Types.NVARCHAR:
926  case java.sql.Types.VARCHAR:
927  case java.sql.Types.NCHAR:
928  case java.sql.Types.CHAR:
929  case java.sql.Types.LONGVARCHAR:
930  case java.sql.Types.LONGNVARCHAR:
931  col.data.str_col.clear();
932  break;
933 
934  // Handle WKT for geo columns
935  case java.sql.Types.OTHER:
936  col.data.str_col.clear();
937  break;
938 
939  default:
940  throw new AssertionError("Column type " + md.getColumnType(i) + " not Supported");
941  }
942  }

+ Here is the caller graph for this function:

void com.mapd.utility.SQLImporter.run_init ( Connection  conn)
inlineprivate

Definition at line 465 of file SQLImporter.java.

References com.mapd.utility.SQLImporter.cmd, and Experimental.String.

Referenced by com.mapd.utility.SQLImporter.executeQuery().

465  {
466  // attempt to open file
467  String line = "";
468  try {
469  BufferedReader reader =
470  new BufferedReader(new FileReader(cmd.getOptionValue("initializeFile")));
471  Statement stmt = conn.createStatement();
472  while ((line = reader.readLine()) != null) {
473  if (line.isEmpty()) {
474  continue;
475  }
476  LOGGER.info("Running : " + line);
477  stmt.execute(line);
478  }
479  stmt.close();
480  reader.close();
481  } catch (IOException e) {
482  LOGGER.error("Exception occurred trying to read initialize file: "
483  + cmd.getOptionValue("initFile"));
484  exit(1);
485  } catch (SQLException e) {
486  LOGGER.error(
487  "Exception occurred trying to execute initialize file entry : " + line);
488  exit(1);
489  }
490  }

+ Here is the caller graph for this function:

void com.mapd.utility.SQLImporter.setColValue ( ResultSet  rs,
TColumn  col,
int  columnType,
int  colNum,
int  scale,
String  colTypeName 
) throws SQLException
inlineprivate

Definition at line 774 of file SQLImporter.java.

References anonymous_namespace{ImportTest.cpp}.d(), Double, and Experimental.String.

Referenced by com.mapd.utility.SQLImporter.executeQuery().

779  {
780  switch (columnType) {
781  case java.sql.Types
782  .BIT: // deal with postgress treating boolean as bit... this will bite me
783  case java.sql.Types.BOOLEAN:
784  Boolean b = rs.getBoolean(colNum);
785  if (rs.wasNull()) {
786  col.nulls.add(Boolean.TRUE);
787  col.data.int_col.add(0L);
788  } else {
789  col.nulls.add(Boolean.FALSE);
790  col.data.int_col.add(b ? 1L : 0L);
791  }
792  break;
793 
794  case java.sql.Types.DECIMAL:
795  case java.sql.Types.NUMERIC:
796  BigDecimal bd = rs.getBigDecimal(colNum);
797  if (rs.wasNull()) {
798  col.nulls.add(Boolean.TRUE);
799  col.data.int_col.add(0L);
800  } else {
801  col.nulls.add(Boolean.FALSE);
802  col.data.int_col.add(bd.multiply(new BigDecimal(pow(10L, scale))).longValue());
803  }
804  break;
805 
806  case java.sql.Types.TINYINT:
807  case java.sql.Types.SMALLINT:
808  case java.sql.Types.INTEGER:
809  case java.sql.Types.BIGINT:
810  Long l = rs.getLong(colNum);
811  if (rs.wasNull()) {
812  col.nulls.add(Boolean.TRUE);
813  col.data.int_col.add(new Long(0));
814  } else {
815  col.nulls.add(Boolean.FALSE);
816  col.data.int_col.add(l);
817  }
818  break;
819 
820  case java.sql.Types.TIME:
821  Time t = rs.getTime(colNum);
822  if (rs.wasNull()) {
823  col.nulls.add(Boolean.TRUE);
824  col.data.int_col.add(0L);
825 
826  } else {
827  col.data.int_col.add(dateTimeUtils.getSecondsFromMilliseconds(t.getTime()));
828  col.nulls.add(Boolean.FALSE);
829  }
830 
831  break;
832  case java.sql.Types.TIMESTAMP:
833  Timestamp ts = rs.getTimestamp(colNum);
834  if (rs.wasNull()) {
835  col.nulls.add(Boolean.TRUE);
836  col.data.int_col.add(0L);
837 
838  } else {
839  col.data.int_col.add(dateTimeUtils.getSecondsFromMilliseconds(ts.getTime()));
840  col.nulls.add(Boolean.FALSE);
841  }
842 
843  break;
844  case java.sql.Types.DATE:
845  Date d = rs.getDate(colNum);
846  if (rs.wasNull()) {
847  col.nulls.add(Boolean.TRUE);
848  col.data.int_col.add(0L);
849 
850  } else {
851  col.data.int_col.add(dateTimeUtils.getSecondsFromMilliseconds(d.getTime()));
852  col.nulls.add(Boolean.FALSE);
853  }
854  break;
855  case java.sql.Types.FLOAT:
856  case java.sql.Types.DOUBLE:
857  case java.sql.Types.REAL:
858  Double db = rs.getDouble(colNum);
859  if (rs.wasNull()) {
860  col.nulls.add(Boolean.TRUE);
861  col.data.real_col.add(new Double(0));
862 
863  } else {
864  col.nulls.add(Boolean.FALSE);
865  col.data.real_col.add(db);
866  }
867  break;
868 
869  case java.sql.Types.NVARCHAR:
870  case java.sql.Types.VARCHAR:
871  case java.sql.Types.NCHAR:
872  case java.sql.Types.CHAR:
873  case java.sql.Types.LONGVARCHAR:
874  case java.sql.Types.LONGNVARCHAR:
875  String strVal = rs.getString(colNum);
876  if (rs.wasNull()) {
877  col.nulls.add(Boolean.TRUE);
878  col.data.str_col.add("");
879 
880  } else {
881  col.data.str_col.add(strVal);
882  col.nulls.add(Boolean.FALSE);
883  }
884  break;
885  case java.sql.Types.OTHER:
886  if (rs.wasNull()) {
887  col.nulls.add(Boolean.TRUE);
888  col.data.str_col.add("");
889  } else {
890  col.data.str_col.add(vendor_types.get_wkt(rs, colNum, colTypeName));
891  col.nulls.add(Boolean.FALSE);
892  }
893  break;
894  default:
895  throw new AssertionError("Column type " + columnType + " not Supported");
896  }
897  }
void d(const SQLTypes expected_type, const std::string &str)
Definition: ImportTest.cpp:289

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

TColumn com.mapd.utility.SQLImporter.setupBinaryColumn ( int  i,
ResultSetMetaData  md,
int  bufferSize 
) throws SQLException
inlineprivate

Definition at line 728 of file SQLImporter.java.

Referenced by com.mapd.utility.SQLImporter.executeQuery().

729  {
730  TColumn col = new TColumn();
731 
732  col.nulls = new ArrayList<Boolean>(bufferSize);
733 
734  col.data = new TColumnData();
735 
736  switch (md.getColumnType(i)) {
737  case java.sql.Types.TINYINT:
738  case java.sql.Types.SMALLINT:
739  case java.sql.Types.INTEGER:
740  case java.sql.Types.BIGINT:
741  case java.sql.Types.TIME:
742  case java.sql.Types.TIMESTAMP:
743  case java.sql.Types
744  .BIT: // deal with postgress treating boolean as bit... this will bite me
745  case java.sql.Types.BOOLEAN:
746  case java.sql.Types.DATE:
747  case java.sql.Types.DECIMAL:
748  case java.sql.Types.NUMERIC:
749  col.data.int_col = new ArrayList<Long>(bufferSize);
750  break;
751 
752  case java.sql.Types.FLOAT:
753  case java.sql.Types.DOUBLE:
754  case java.sql.Types.REAL:
755  col.data.real_col = new ArrayList<Double>(bufferSize);
756  break;
757 
758  case java.sql.Types.NVARCHAR:
759  case java.sql.Types.VARCHAR:
760  case java.sql.Types.NCHAR:
761  case java.sql.Types.CHAR:
762  case java.sql.Types.LONGVARCHAR:
763  case java.sql.Types.LONGNVARCHAR:
764  case java.sql.Types.OTHER:
765  col.data.str_col = new ArrayList<String>(bufferSize);
766  break;
767 
768  default:
769  throw new AssertionError("Column type " + md.getColumnType(i) + " not Supported");
770  }
771  return col;
772  }

+ Here is the caller graph for this function:

boolean com.mapd.utility.SQLImporter.tableExists ( String  tName)
inlineprivate

Definition at line 645 of file SQLImporter.java.

References com.mapd.utility.SQLImporter.session, and Experimental.String.

Referenced by com.mapd.utility.SQLImporter.checkMapDTable().

645  {
646  LOGGER.debug("Check for table " + tName);
647  try {
648  List<String> recv_get_tables = client.get_tables(session);
649  for (String s : recv_get_tables) {
650  if (s.equals(tName)) {
651  return true;
652  }
653  }
654  } catch (TMapDException ex) {
655  LOGGER.error("Table check failed - " + ex.toString());
656  exit(3);
657  } catch (TException ex) {
658  LOGGER.error("Table check failed - " + ex.toString());
659  exit(3);
660  }
661  return false;
662  }

+ Here is the caller graph for this function:

Member Data Documentation

MapD.Client com.mapd.utility.SQLImporter.client = null
protected

Definition at line 309 of file SQLImporter.java.

Referenced by com.mapd.utility.SQLImporter.createMapDConnection().

DateTimeUtils com.mapd.utility.SQLImporter.dateTimeUtils
private
Initial value:
= (milliseconds) -> {
return milliseconds / 1000;
}

Definition at line 312 of file SQLImporter.java.

final Logger com.mapd.utility.SQLImporter.LOGGER = LoggerFactory.getLogger(SQLImporter.class)
staticpackage

Definition at line 311 of file SQLImporter.java.

Db_vendor_types com.mapd.utility.SQLImporter.vendor_types = null
package

Definition at line 316 of file SQLImporter.java.

Referenced by com.mapd.utility.SQLImporter.executeQuery().


The documentation for this class was generated from the following file: