OmniSciDB  5ade3759e0
run-benchmark.py File Reference

Go to the source code of this file.

Namespaces

 run-benchmark
 

Functions

def run-benchmark.get_connection (kwargs)
 
def run-benchmark.validate_query_file (kwargs)
 
def run-benchmark.execute_query (kwargs)
 
def run-benchmark.calculate_query_times (kwargs)
 
def run-benchmark.get_mem_usage (kwargs)
 
def run-benchmark.json_format_handler (x)
 

Variables

 run-benchmark.parser = ArgumentParser()
 
 run-benchmark.optional = parser._action_groups.pop()
 
 run-benchmark.required = parser.add_argument_group("required arguments")
 
 run-benchmark.action
 
 run-benchmark.help
 
 run-benchmark.dest
 
 run-benchmark.default
 
 run-benchmark.type
 
 run-benchmark.True
 
 run-benchmark.args = parser.parse_args()
 
 run-benchmark.level
 
 run-benchmark.source_db_user = args.user
 
 run-benchmark.source_db_passwd = args.passwd
 
 run-benchmark.source_db_server = args.server
 
 run-benchmark.source_db_port = args.port
 
 run-benchmark.source_db_name = args.name
 
 run-benchmark.source_table = args.table
 
 run-benchmark.label = args.label
 
 run-benchmark.queries_dir = args.queries_dir
 
 run-benchmark.iterations = int(args.iterations)
 
 run-benchmark.gpu_count = args.gpu_count
 
 run-benchmark.gpu_name = args.gpu_name
 
 run-benchmark.no_gather_conn_gpu_info = args.no_gather_conn_gpu_info
 
 run-benchmark.gather_nvml_gpu_info = args.gather_nvml_gpu_info
 
 run-benchmark.no_gather_nvml_gpu_info = args.no_gather_nvml_gpu_info
 
 run-benchmark.machine_name = args.machine_name
 
 run-benchmark.machine_uname = args.machine_uname
 
 run-benchmark.destinations = args.destination.split(",")
 
bool run-benchmark.valid_destination_set = True
 
 run-benchmark.dest_db_user = args.dest_user
 
 run-benchmark.dest_db_passwd = args.dest_passwd
 
 run-benchmark.dest_db_server = args.dest_server
 
 run-benchmark.dest_db_port = args.dest_port
 
 run-benchmark.dest_db_name = args.dest_name
 
 run-benchmark.dest_table = args.dest_table
 
 run-benchmark.dest_table_schema_file = args.dest_table_schema_file
 
 run-benchmark.output_file_json = args.output_file_json
 
 run-benchmark.output_file_jenkins = args.output_file_jenkins
 
 run-benchmark.output_tag_jenkins = args.output_tag_jenkins
 
def run-benchmark.con
 
 run-benchmark.run_guid = str(uuid.uuid4())
 
 run-benchmark.run_timestamp = datetime.datetime.now()
 
 run-benchmark.run_connection = str(con)
 
string run-benchmark.run_driver = ""
 
def run-benchmark.run_version = con._client.get_version()
 
def run-benchmark.run_version_short = run_version.split("-")[0]
 
 run-benchmark.conn_machine_name = re.search(r"@(.*?):", run_connection).group(1)
 
 run-benchmark.conn_gpu_count = None
 
 run-benchmark.source_db_gpu_count = None
 
 run-benchmark.source_db_gpu_mem = None
 
string run-benchmark.source_db_gpu_driver_ver = ""
 
string run-benchmark.source_db_gpu_name = ""
 
def run-benchmark.conn_hardware_info = con._client.get_hardware_info(con._session)
 
 run-benchmark.handle = pynvml.nvmlDeviceGetHandleByIndex(i)
 
 run-benchmark.local_uname = os.uname()
 
 run-benchmark.run_machine_name = machine_name
 
 run-benchmark.run_machine_uname = machine_uname
 
list run-benchmark.query_list = []
 
 run-benchmark.query_filename
 
 run-benchmark.query_mapdql = query_filepath.read().replace("\n", " ")
 
 run-benchmark.query_id = query["name"].rsplit(".")[0]
 
list run-benchmark.query_results = []
 
 run-benchmark.query_total_start_time = timeit.default_timer()
 
def run-benchmark.pre_query_cpu_mem_usage = get_mem_usage(con=con, mem_type="cpu")
 
def run-benchmark.pre_query_gpu_mem_usage = get_mem_usage(con=con, mem_type="gpu")
 
def run-benchmark.query_result
 
def run-benchmark.post_query_cpu_mem_usage = get_mem_usage(con=con, mem_type="cpu")
 
def run-benchmark.post_query_gpu_mem_usage = get_mem_usage(con=con, mem_type="gpu")
 
 run-benchmark.query_cpu_mem_usage
 
 run-benchmark.query_gpu_mem_usage
 
string run-benchmark.query_error_info = ""
 
 run-benchmark.first_execution_time = round(query_result["execution_time"], 1)
 
 run-benchmark.first_connect_time = round(query_result["connect_time"], 1)
 
 run-benchmark.first_results_iter_time
 
tuple run-benchmark.first_total_time
 
 run-benchmark.first_cpu_mem_usage = query_cpu_mem_usage
 
 run-benchmark.first_gpu_mem_usage = query_gpu_mem_usage
 
 run-benchmark.query_total_elapsed_time
 
 run-benchmark.execution_times
 
 run-benchmark.connect_times
 
 run-benchmark.results_iter_times
 
 run-benchmark.total_times
 
def run-benchmark.result_count = query_result["result_count"]
 
def run-benchmark.query_times
 
list run-benchmark.succesful_query_list = query_list
 
 run-benchmark.query_list_json = json.dumps(query_list, default=json_format_handler, indent=2)
 
 run-benchmark.results_df = DataFrame(query_results)
 
def run-benchmark.dest_con
 
def run-benchmark.tables = dest_con.get_tables()
 
 run-benchmark.create_table_sql = table_schema.read().replace("\n", " ")
 
def run-benchmark.res = dest_con.execute(create_table_sql)
 
 run-benchmark.preserve_index
 
 run-benchmark.chunk_size_bytes
 
 run-benchmark.col_names_from_schema
 
 run-benchmark.file_json_open = open(output_file_json, "w")
 
list run-benchmark.jenkins_bench_results = []
 
 run-benchmark.jenkins_bench_json
 
 run-benchmark.file_jenkins_open = open(output_file_jenkins, "w")