OmniSciDB  343343d194
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
run-benchmark Namespace Reference

Functions

def get_connection
 
def validate_query_file
 
def execute_query
 
def calculate_query_times
 
def get_mem_usage
 
def json_format_handler
 

Variables

tuple parser = ArgumentParser()
 
tuple optional = parser._action_groups.pop()
 
tuple required = parser.add_argument_group("required arguments")
 
string action = "store_true"
 
string help = "Suppress script outuput "
 
string dest = "passwd"
 
string default = "HyperInteractive"
 
 type = int,
 
tuple args = parser.parse_args()
 
 source_db_user = args.user
 
 source_db_passwd = args.passwd
 
 source_db_server = args.server
 
 source_db_port = args.port
 
 source_db_name = args.name
 
 source_table = args.table
 
 label = args.label
 
 queries_dir = args.queries_dir
 
tuple iterations = int(args.iterations)
 
 gpu_count = args.gpu_count
 
 gpu_name = args.gpu_name
 
 no_gather_conn_gpu_info = args.no_gather_conn_gpu_info
 
 gather_nvml_gpu_info = args.gather_nvml_gpu_info
 
 no_gather_nvml_gpu_info = args.no_gather_nvml_gpu_info
 
 machine_name = args.machine_name
 
 machine_uname = args.machine_uname
 
tuple destinations = args.destination.split(",")
 
 valid_destination_set = True
 
 dest_db_user = args.dest_user
 
 dest_db_passwd = args.dest_passwd
 
 dest_db_server = args.dest_server
 
 dest_db_port = args.dest_port
 
 dest_db_name = args.dest_name
 
 dest_table = args.dest_table
 
 dest_table_schema_file = args.dest_table_schema_file
 
 output_file_json = args.output_file_json
 
 output_file_jenkins = args.output_file_jenkins
 
 output_tag_jenkins = args.output_tag_jenkins
 
tuple con
 
tuple run_guid = str(uuid.uuid4())
 
tuple run_timestamp = datetime.datetime.now()
 
tuple run_connection = str(con)
 
string run_driver = ""
 
tuple run_version = con._client.get_version()
 
tuple run_version_short = run_version.split("-")
 
tuple conn_machine_name = re.search(r"@(.*?):", run_connection)
 
 conn_gpu_count = None
 
 source_db_gpu_count = None
 
 source_db_gpu_mem = None
 
string source_db_gpu_driver_ver = ""
 
string source_db_gpu_name = ""
 
tuple conn_hardware_info = con._client.get_hardware_info(con._session)
 
tuple handle = pynvml.nvmlDeviceGetHandleByIndex(i)
 
tuple local_uname = os.uname()
 
 run_machine_name = machine_name
 
 run_machine_uname = machine_uname
 
list query_list = []
 
tuple query_mapdql = query_filepath.read()
 
list query_id = query["name"]
 
list query_results = []
 
tuple query_total_start_time = timeit.default_timer()
 
tuple pre_query_cpu_mem_usage = get_mem_usage(con=con, mem_type="cpu")
 
tuple pre_query_gpu_mem_usage = get_mem_usage(con=con, mem_type="gpu")
 
tuple query_result
 
tuple post_query_cpu_mem_usage = get_mem_usage(con=con, mem_type="cpu")
 
tuple post_query_gpu_mem_usage = get_mem_usage(con=con, mem_type="gpu")
 
tuple query_cpu_mem_usage
 
tuple query_gpu_mem_usage
 
string query_error_info = ""
 
tuple first_execution_time = round(query_result["execution_time"], 1)
 
tuple first_connect_time = round(query_result["connect_time"], 1)
 
tuple first_results_iter_time
 
tuple first_total_time
 
 first_cpu_mem_usage = query_cpu_mem_usage
 
 first_gpu_mem_usage = query_gpu_mem_usage
 
tuple query_total_elapsed_time
 
list result_count = query_result["result_count"]
 
tuple query_times
 
 succesful_query_list = query_list
 
tuple query_list_json = json.dumps(query_list, default=json_format_handler, indent=2)
 
tuple results_df = DataFrame(query_results)
 
tuple dest_con
 
tuple tables = dest_con.get_tables()
 
tuple create_table_sql = table_schema.read()
 
tuple res = dest_con.execute(create_table_sql)
 
 preserve_index = False,
 
int chunk_size_bytes = 0
 
 col_names_from_schema = True,
 
tuple file_json_open = open(output_file_json, "w")
 
list jenkins_bench_results = []
 
tuple jenkins_bench_json
 
tuple file_jenkins_open = open(output_file_jenkins, "w")
 

Function Documentation

def run-benchmark.calculate_query_times (   kwargs)
  Calculates aggregate query times from all iteration times

  Kwargs:
    total_times(list): List of total time calculations
    execution_times(list): List of execution_time calculations
    results_iter_times(list): List of results_iter_time calculations
    connect_times(list): List of connect_time calculations

  Returns:
    query_execution(dict): Query times
    False(bool): The query failed. Exception should be logged.

Definition at line 151 of file run-benchmark.py.

152 def calculate_query_times(**kwargs):
153  """
154  Calculates aggregate query times from all iteration times
155 
156  Kwargs:
157  total_times(list): List of total time calculations
158  execution_times(list): List of execution_time calculations
159  results_iter_times(list): List of results_iter_time calculations
160  connect_times(list): List of connect_time calculations
161 
162  Returns:
163  query_execution(dict): Query times
164  False(bool): The query failed. Exception should be logged.
165  """
166  return {
167  "total_time_avg": round(numpy.mean(kwargs["total_times"]), 1),
168  "total_time_min": round(numpy.min(kwargs["total_times"]), 1),
169  "total_time_max": round(numpy.max(kwargs["total_times"]), 1),
170  "total_time_85": round(numpy.percentile(kwargs["total_times"], 85), 1),
171  "execution_time_avg": round(numpy.mean(kwargs["execution_times"]), 1),
172  "execution_time_min": round(numpy.min(kwargs["execution_times"]), 1),
173  "execution_time_max": round(numpy.max(kwargs["execution_times"]), 1),
174  "execution_time_85": round(
175  numpy.percentile(kwargs["execution_times"], 85), 1
176  ),
177  "execution_time_25": round(
178  numpy.percentile(kwargs["execution_times"], 25), 1
179  ),
180  "execution_time_std": round(numpy.std(kwargs["execution_times"]), 1),
181  "connect_time_avg": round(numpy.mean(kwargs["connect_times"]), 1),
182  "connect_time_min": round(numpy.min(kwargs["connect_times"]), 1),
183  "connect_time_max": round(numpy.max(kwargs["connect_times"]), 1),
184  "connect_time_85": round(
185  numpy.percentile(kwargs["connect_times"], 85), 1
186  ),
187  "results_iter_time_avg": round(
188  numpy.mean(kwargs["results_iter_times"]), 1
189  ),
190  "results_iter_time_min": round(
191  numpy.min(kwargs["results_iter_times"]), 1
192  ),
193  "results_iter_time_max": round(
194  numpy.max(kwargs["results_iter_times"]), 1
195  ),
196  "results_iter_time_85": round(
197  numpy.percentile(kwargs["results_iter_times"], 85), 1
198  ),
199  }
200 
def calculate_query_times
def run-benchmark.execute_query (   kwargs)
  Executes a query against the connected db using pymapd
  https://pymapd.readthedocs.io/en/latest/usage.html#querying

  Kwargs:
    query_name(str): Name of query
    query_mapdql(str): Query to run
    iteration(int): Iteration number

  Returns:
    query_execution(dict):::
      result_count(int): Number of results returned
      execution_time(float): Time (in ms) that pymapd reports
                             backend spent on query.
      connect_time(float): Time (in ms) for overhead of query, calculated
                           by subtracting backend execution time
                           from time spent on the execution function.
      results_iter_time(float): Time (in ms) it took to for
                                pymapd.fetchone() to iterate through all
                                of the results.
      total_time(float): Time (in ms) from adding all above times.
    False(bool): The query failed. Exception should be logged.

Definition at line 70 of file run-benchmark.py.

70 
71 def execute_query(**kwargs):
72  """
73  Executes a query against the connected db using pymapd
74  https://pymapd.readthedocs.io/en/latest/usage.html#querying
75 
76  Kwargs:
77  query_name(str): Name of query
78  query_mapdql(str): Query to run
79  iteration(int): Iteration number
80 
81  Returns:
82  query_execution(dict):::
83  result_count(int): Number of results returned
84  execution_time(float): Time (in ms) that pymapd reports
85  backend spent on query.
86  connect_time(float): Time (in ms) for overhead of query, calculated
87  by subtracting backend execution time
88  from time spent on the execution function.
89  results_iter_time(float): Time (in ms) it took to for
90  pymapd.fetchone() to iterate through all
91  of the results.
92  total_time(float): Time (in ms) from adding all above times.
93  False(bool): The query failed. Exception should be logged.
94  """
95  start_time = timeit.default_timer()
96  try:
97  # Run the query
98  query_result = con.execute(kwargs["query_mapdql"])
99  logging.debug(
100  "Completed iteration "
101  + str(kwargs["iteration"])
102  + " of query "
103  + kwargs["query_name"]
104  )
105  except (pymapd.exceptions.ProgrammingError, pymapd.exceptions.Error):
106  logging.exception(
107  "Error running query "
108  + kwargs["query_name"]
109  + " during iteration "
110  + str(kwargs["iteration"])
111  )
112  return False
113 
114  # Calculate times
115  query_elapsed_time = (timeit.default_timer() - start_time) * 1000
116  execution_time = query_result._result.execution_time_ms
117  connect_time = round((query_elapsed_time - execution_time), 1)
118 
119  # Iterate through each result from the query
120  logging.debug(
121  "Counting results from query"
122  + kwargs["query_name"]
123  + " iteration "
124  + str(kwargs["iteration"])
125  )
126  result_count = 0
127  start_time = timeit.default_timer()
128  while query_result.fetchone():
129  result_count += 1
130  results_iter_time = round(
131  ((timeit.default_timer() - start_time) * 1000), 1
132  )
133 
134  query_execution = {
135  "result_count": result_count,
136  "execution_time": execution_time,
137  "connect_time": connect_time,
138  "results_iter_time": results_iter_time,
139  "total_time": execution_time + connect_time + results_iter_time,
140  }
141  logging.debug(
142  "Execution results for query"
143  + kwargs["query_name"]
144  + " iteration "
145  + str(kwargs["iteration"])
146  + ": "
147  + str(query_execution)
148  )
149  return query_execution
150 
def run-benchmark.get_connection (   kwargs)
  Connects to the db using pymapd
  https://pymapd.readthedocs.io/en/latest/usage.html#connecting

  Kwargs:
    db_user(str): DB username
    db_passwd(str): DB password
    db_server(str): DB host
    db_port(int): DB port
    db_name(str): DB name

  Returns:
    con(class): Connection class
    False(bool): The connection failed. Exception should be logged.

Definition at line 16 of file run-benchmark.py.

16 
17 def get_connection(**kwargs):
18  """
19  Connects to the db using pymapd
20  https://pymapd.readthedocs.io/en/latest/usage.html#connecting
21 
22  Kwargs:
23  db_user(str): DB username
24  db_passwd(str): DB password
25  db_server(str): DB host
26  db_port(int): DB port
27  db_name(str): DB name
28 
29  Returns:
30  con(class): Connection class
31  False(bool): The connection failed. Exception should be logged.
32  """
33  try:
34  logging.debug("Connecting to mapd db...")
35  con = pymapd.connect(
36  user=kwargs["db_user"],
37  password=kwargs["db_passwd"],
38  host=kwargs["db_server"],
39  port=kwargs["db_port"],
40  dbname=kwargs["db_name"],
41  )
42  logging.info("Succesfully connected to mapd db")
43  return con
44  except (pymapd.exceptions.OperationalError, pymapd.exceptions.Error):
45  logging.exception("Error connecting to database.")
46  return False
47 
def run-benchmark.get_mem_usage (   kwargs)
  Calculates memory statistics from mapd_server _client.get_memory call

  Kwargs:
    con(class 'pymapd.connection.Connection'): Mapd connection
    mem_type(str): [gpu, cpu] Type of memory to gather metrics for

  Returns:
    ramusage(dict):::
      usedram(float): Amount of memory (in MB) used
      freeram(float): Amount of memory (in MB) free
      totalallocated(float): Total amount of memory (in MB) allocated
      errormessage(str): Error if returned by get_memory call
      rawdata(list): Raw data returned from get_memory call

Definition at line 201 of file run-benchmark.py.

202 def get_mem_usage(**kwargs):
203  """
204  Calculates memory statistics from mapd_server _client.get_memory call
205 
206  Kwargs:
207  con(class 'pymapd.connection.Connection'): Mapd connection
208  mem_type(str): [gpu, cpu] Type of memory to gather metrics for
209 
210  Returns:
211  ramusage(dict):::
212  usedram(float): Amount of memory (in MB) used
213  freeram(float): Amount of memory (in MB) free
214  totalallocated(float): Total amount of memory (in MB) allocated
215  errormessage(str): Error if returned by get_memory call
216  rawdata(list): Raw data returned from get_memory call
217  """
218  try:
219  con_mem_data_list = con._client.get_memory(
220  session=kwargs["con"]._session, memory_level=kwargs["mem_type"]
221  )
222  usedram = 0
223  freeram = 0
224  for con_mem_data in con_mem_data_list:
225  page_size = con_mem_data.page_size
226  node_memory_data_list = con_mem_data.node_memory_data
227  for node_memory_data in node_memory_data_list:
228  ram = node_memory_data.num_pages * page_size
229  is_free = node_memory_data.is_free
230  if is_free:
231  freeram += ram
232  else:
233  usedram += ram
234  totalallocated = usedram + freeram
235  if totalallocated > 0:
236  totalallocated = round(totalallocated / 1024 / 1024, 1)
237  usedram = round(usedram / 1024 / 1024, 1)
238  freeram = round(freeram / 1024 / 1024, 1)
239  ramusage = {}
240  ramusage["usedram"] = usedram
241  ramusage["freeram"] = freeram
242  ramusage["totalallocated"] = totalallocated
243  ramusage["errormessage"] = ""
244  except Exception as e:
245  errormessage = "Get memory failed with error: " + str(e)
246  logging.error(errormessage)
247  ramusage["errormessage"] = errormessage
248  return ramusage
249 
def run-benchmark.json_format_handler (   x)

Definition at line 250 of file run-benchmark.py.

251 def json_format_handler(x):
252  # Function to allow json to deal with datetime and numpy int
253  if isinstance(x, datetime.datetime):
254  return x.isoformat()
255  if isinstance(x, numpy.int64):
256  return int(x)
257  raise TypeError("Unknown type")
258 
259 
# Parse input parameters
def run-benchmark.validate_query_file (   kwargs)
  Validates query file. Currently only checks the query file name

  Kwargs:
    query_filename(str): Name of query file

  Returns:
    True(bool): Query succesfully validated
    False(bool): Query failed validation

Definition at line 48 of file run-benchmark.py.

48 
49 def validate_query_file(**kwargs):
50  """
51  Validates query file. Currently only checks the query file name
52 
53  Kwargs:
54  query_filename(str): Name of query file
55 
56  Returns:
57  True(bool): Query succesfully validated
58  False(bool): Query failed validation
59  """
60  if not kwargs["query_filename"].endswith(".sql"):
61  logging.warning(
62  "Query filename "
63  + kwargs["query_filename"]
64  + ' is invalid - does not end in ".sql". Skipping'
65  )
66  return False
67  else:
68  return True
69 
def validate_query_file

Variable Documentation

string run-benchmark.action = "store_true"

Definition at line 270 of file run-benchmark.py.

tuple run-benchmark.args = parser.parse_args()

Definition at line 461 of file run-benchmark.py.

int run-benchmark.chunk_size_bytes = 0

Definition at line 910 of file run-benchmark.py.

run-benchmark.col_names_from_schema = True,

Definition at line 911 of file run-benchmark.py.

tuple run-benchmark.con
Initial value:
2  db_user=source_db_user,
3  db_passwd=source_db_passwd,
4  db_server=source_db_server,
5  db_port=source_db_port,
6  db_name=source_db_name,
7 )

Definition at line 536 of file run-benchmark.py.

list run-benchmark.conn_gpu_count = None

Definition at line 560 of file run-benchmark.py.

tuple run-benchmark.conn_hardware_info = con._client.get_hardware_info(con._session)

Definition at line 577 of file run-benchmark.py.

tuple run-benchmark.conn_machine_name = re.search(r"@(.*?):", run_connection)

Definition at line 558 of file run-benchmark.py.

tuple run-benchmark.create_table_sql = table_schema.read()

Definition at line 891 of file run-benchmark.py.

string run-benchmark.default = "HyperInteractive"

Definition at line 280 of file run-benchmark.py.

string run-benchmark.dest = "passwd"

Definition at line 279 of file run-benchmark.py.

tuple run-benchmark.dest_con
Initial value:
2  db_user=dest_db_user,
3  db_passwd=dest_db_passwd,
4  db_server=dest_db_server,
5  db_port=dest_db_port,
6  db_name=dest_db_name,
7  )

Definition at line 873 of file run-benchmark.py.

run-benchmark.dest_db_name = args.dest_name

Definition at line 503 of file run-benchmark.py.

run-benchmark.dest_db_passwd = args.dest_passwd

Definition at line 495 of file run-benchmark.py.

run-benchmark.dest_db_port = args.dest_port

Definition at line 502 of file run-benchmark.py.

run-benchmark.dest_db_server = args.dest_server

Definition at line 501 of file run-benchmark.py.

run-benchmark.dest_db_user = args.dest_user

Definition at line 494 of file run-benchmark.py.

run-benchmark.dest_table = args.dest_table

Definition at line 504 of file run-benchmark.py.

run-benchmark.dest_table_schema_file = args.dest_table_schema_file

Definition at line 505 of file run-benchmark.py.

tuple run-benchmark.destinations = args.destination.split(",")

Definition at line 491 of file run-benchmark.py.

tuple run-benchmark.file_jenkins_open = open(output_file_jenkins, "w")

Definition at line 954 of file run-benchmark.py.

tuple run-benchmark.file_json_open = open(output_file_json, "w")

Definition at line 917 of file run-benchmark.py.

tuple run-benchmark.first_connect_time = round(query_result["connect_time"], 1)

Definition at line 711 of file run-benchmark.py.

run-benchmark.first_cpu_mem_usage = query_cpu_mem_usage

Definition at line 720 of file run-benchmark.py.

tuple run-benchmark.first_execution_time = round(query_result["execution_time"], 1)

Definition at line 710 of file run-benchmark.py.

run-benchmark.first_gpu_mem_usage = query_gpu_mem_usage

Definition at line 721 of file run-benchmark.py.

tuple run-benchmark.first_results_iter_time
Initial value:
1 = round(
2  query_result["results_iter_time"], 1
3  )

Definition at line 712 of file run-benchmark.py.

tuple run-benchmark.first_total_time
Initial value:
1 = (
2  first_execution_time
3  + first_connect_time
4  + first_results_iter_time
5  )

Definition at line 715 of file run-benchmark.py.

run-benchmark.gather_nvml_gpu_info = args.gather_nvml_gpu_info

Definition at line 487 of file run-benchmark.py.

run-benchmark.gpu_count = args.gpu_count

Definition at line 484 of file run-benchmark.py.

Referenced by get_available_gpus().

run-benchmark.gpu_name = args.gpu_name

Definition at line 485 of file run-benchmark.py.

tuple run-benchmark.handle = pynvml.nvmlDeviceGetHandleByIndex(i)

Definition at line 613 of file run-benchmark.py.

Referenced by add_window_pending_output(), apply_window_pending_outputs_double(), apply_window_pending_outputs_float(), apply_window_pending_outputs_float_columnar(), anonymous_namespace{WindowContext.cpp}.apply_window_pending_outputs_int(), apply_window_pending_outputs_int16(), apply_window_pending_outputs_int32(), apply_window_pending_outputs_int64(), and apply_window_pending_outputs_int8().

string run-benchmark.help = "Suppress script outuput "

Definition at line 271 of file run-benchmark.py.

tuple run-benchmark.iterations = int(args.iterations)

Definition at line 479 of file run-benchmark.py.

Referenced by com.mapd.bench.Benchmark.doWork(), com.mapd.bench.BenchmarkCloud.doWork(), com.mapd.bench.Benchmark.executeQuery(), and com.mapd.bench.BenchmarkCloud.executeQuery().

tuple run-benchmark.jenkins_bench_json
Initial value:
1 = json.dumps(
2  {
3  "groups": [
4  {
5  "name": source_table + output_tag_jenkins,
6  "description": "Source table: " + source_table,
7  "tests": jenkins_bench_results,
8  }
9  ]
10  }
11  )

Definition at line 941 of file run-benchmark.py.

list run-benchmark.jenkins_bench_results = []

Definition at line 923 of file run-benchmark.py.

run-benchmark.label = args.label

Definition at line 474 of file run-benchmark.py.

tuple run-benchmark.local_uname = os.uname()

Definition at line 622 of file run-benchmark.py.

run-benchmark.machine_name = args.machine_name

Definition at line 489 of file run-benchmark.py.

run-benchmark.machine_uname = args.machine_uname

Definition at line 490 of file run-benchmark.py.

run-benchmark.no_gather_conn_gpu_info = args.no_gather_conn_gpu_info

Definition at line 486 of file run-benchmark.py.

run-benchmark.no_gather_nvml_gpu_info = args.no_gather_nvml_gpu_info

Definition at line 488 of file run-benchmark.py.

tuple run-benchmark.optional = parser._action_groups.pop()

Definition at line 261 of file run-benchmark.py.

run-benchmark.output_file_jenkins = args.output_file_jenkins

Definition at line 528 of file run-benchmark.py.

run-benchmark.output_file_json = args.output_file_json

Definition at line 515 of file run-benchmark.py.

run-benchmark.output_tag_jenkins = args.output_tag_jenkins

Definition at line 529 of file run-benchmark.py.

tuple run-benchmark.parser = ArgumentParser()

Definition at line 260 of file run-benchmark.py.

tuple run-benchmark.post_query_cpu_mem_usage = get_mem_usage(con=con, mem_type="cpu")

Definition at line 691 of file run-benchmark.py.

tuple run-benchmark.post_query_gpu_mem_usage = get_mem_usage(con=con, mem_type="gpu")

Definition at line 693 of file run-benchmark.py.

tuple run-benchmark.pre_query_cpu_mem_usage = get_mem_usage(con=con, mem_type="cpu")

Definition at line 674 of file run-benchmark.py.

tuple run-benchmark.pre_query_gpu_mem_usage = get_mem_usage(con=con, mem_type="gpu")

Definition at line 676 of file run-benchmark.py.

run-benchmark.preserve_index = False,

Definition at line 909 of file run-benchmark.py.

tuple run-benchmark.queries_dir = args.queries_dir

Definition at line 476 of file run-benchmark.py.

tuple run-benchmark.query_cpu_mem_usage
Initial value:
1 = round(
2  post_query_cpu_mem_usage["usedram"]
3  - pre_query_cpu_mem_usage["usedram"],
4  1,
5  )

Definition at line 695 of file run-benchmark.py.

string run-benchmark.query_error_info = ""

Definition at line 707 of file run-benchmark.py.

tuple run-benchmark.query_gpu_mem_usage
Initial value:
1 = round(
2  post_query_gpu_mem_usage["usedram"]
3  - pre_query_gpu_mem_usage["usedram"],
4  1,
5  )

Definition at line 700 of file run-benchmark.py.

list run-benchmark.query_id = query["name"]

Definition at line 663 of file run-benchmark.py.

Referenced by MapDHandler.broadcast_serialized_rows().

list run-benchmark.query_list = []

Definition at line 639 of file run-benchmark.py.

tuple run-benchmark.query_list_json = json.dumps(query_list, default=json_format_handler, indent=2)

Definition at line 864 of file run-benchmark.py.

tuple run-benchmark.query_mapdql = query_filepath.read()

Definition at line 649 of file run-benchmark.py.

tuple run-benchmark.query_result
Initial value:
2  query_name=query["name"],
3  query_mapdql=query["mapdql"],
4  iteration=iteration,
5  )

Definition at line 684 of file run-benchmark.py.

list run-benchmark.query_results = []

Definition at line 666 of file run-benchmark.py.

tuple run-benchmark.query_times
Initial value:
2  total_times=total_times,
3  execution_times=execution_times,
4  connect_times=connect_times,
5  results_iter_times=results_iter_times,
6  )
def calculate_query_times

Definition at line 778 of file run-benchmark.py.

tuple run-benchmark.query_total_elapsed_time
Initial value:
1 = round(
2  ((timeit.default_timer() - query_total_start_time) * 1000), 1
3  )

Definition at line 756 of file run-benchmark.py.

tuple run-benchmark.query_total_start_time = timeit.default_timer()

Definition at line 670 of file run-benchmark.py.

run-benchmark.required = parser.add_argument_group("required arguments")

Definition at line 262 of file run-benchmark.py.

tuple run-benchmark.res = dest_con.execute(create_table_sql)

Definition at line 900 of file run-benchmark.py.

list run-benchmark.result_count = query_result["result_count"]

Definition at line 774 of file run-benchmark.py.

Referenced by TEST(), com.omnisci.jdbc.OmniSciDatabaseMetaDataTest.tst02_omnisci_table(), com.omnisci.jdbc.OmniSciDatabaseMetaDataTest.tst03_omnisci_table(), com.omnisci.jdbc.OmniSciDatabaseMetaDataTest.tst04_omnisci_table(), com.omnisci.jdbc.OmniSciDatabaseMetaDataTest.tst05_user_table(), com.omnisci.jdbc.OmniSciDatabaseMetaDataTest.tst06_user_table(), com.omnisci.jdbc.OmniSciDatabaseMetaDataTest.tst07_user_table(), com.omnisci.jdbc.OmniSciDatabaseMetaDataTest.tst08_user_table(), com.omnisci.jdbc.OmniSciDatabaseMetaDataTest.tst09_user_table(), com.omnisci.jdbc.OmniSciDatabaseMetaDataTest.tst10_omnisci_table(), and com.omnisci.jdbc.OmniSciDatabaseMetaDataTest.tst11_user2_table().

tuple run-benchmark.results_df = DataFrame(query_results)

Definition at line 870 of file run-benchmark.py.

tuple run-benchmark.run_connection = str(con)

Definition at line 550 of file run-benchmark.py.

string run-benchmark.run_driver = ""

Definition at line 552 of file run-benchmark.py.

tuple run-benchmark.run_guid = str(uuid.uuid4())

Definition at line 547 of file run-benchmark.py.

tuple run-benchmark.run_machine_name = machine_name

Definition at line 624 of file run-benchmark.py.

string run-benchmark.run_machine_uname = machine_uname

Definition at line 631 of file run-benchmark.py.

tuple run-benchmark.run_timestamp = datetime.datetime.now()

Definition at line 549 of file run-benchmark.py.

tuple run-benchmark.run_version = con._client.get_version()

Definition at line 553 of file run-benchmark.py.

run-benchmark.run_version_short = run_version.split("-")

Definition at line 555 of file run-benchmark.py.

run-benchmark.source_db_gpu_count = None

Definition at line 561 of file run-benchmark.py.

tuple run-benchmark.source_db_gpu_driver_ver = ""

Definition at line 563 of file run-benchmark.py.

tuple run-benchmark.source_db_gpu_mem = None

Definition at line 562 of file run-benchmark.py.

tuple run-benchmark.source_db_gpu_name = ""

Definition at line 564 of file run-benchmark.py.

run-benchmark.source_db_name = args.name

Definition at line 472 of file run-benchmark.py.

run-benchmark.source_db_passwd = args.passwd

Definition at line 469 of file run-benchmark.py.

run-benchmark.source_db_port = args.port

Definition at line 471 of file run-benchmark.py.

run-benchmark.source_db_server = args.server

Definition at line 470 of file run-benchmark.py.

run-benchmark.source_db_user = args.user

Definition at line 468 of file run-benchmark.py.

run-benchmark.source_table = args.table

Definition at line 473 of file run-benchmark.py.

run-benchmark.succesful_query_list = query_list

Definition at line 855 of file run-benchmark.py.

tuple run-benchmark.tables = dest_con.get_tables()

Definition at line 883 of file run-benchmark.py.

run-benchmark.type = int,

Definition at line 294 of file run-benchmark.py.

run-benchmark.valid_destination_set = True

Definition at line 493 of file run-benchmark.py.