OmniSciDB  72c90bc290
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
analyze_benchmark.py
Go to the documentation of this file.
1 import json
2 import sys
3 from os import listdir
4 import os.path
5 import getopt
6 
7 # loads a single benchmark json at a time, and can access its data
8 
9 
11  def __init__(self, dir_name, filename_list):
12  self.dir_name = dir_name
13  self.filename_list = filename_list
14  self.data = []
15 
16  # reading the benchmark json file
17  def load(self, bench_filename):
18  assert bench_filename in self.filename_list
19 
20  with open(self.dir_name + bench_filename) as json_file:
21  # only load those queries that were successful
22  filtered_input_data = filter(
23  lambda experiment: experiment["succeeded"] is True,
24  json.load(json_file),
25  )
26  # sort queries based on their IDs
27  self.data = sorted(
28  filtered_input_data,
29  key=lambda experiment: experiment["results"]["query_id"],
30  )
31 
32  def getFrontAttribute(self, attribute):
33  if self.data:
34  return self.data[0]["results"][attribute]
35  else:
36  return "None"
37 
39  return self.getFrontAttribute("run_label")
40 
41  def getGpuName(self):
42  return self.getFrontAttribute("run_gpu_name")
43 
44  def getRunTableName(self):
45  return self.getFrontAttribute("run_table")
46 
47  # return a list of the attribute, from queries in query_names, stored in self.data
48  def fetchAttribute(self, attribute, query_names):
49  result = []
50  for query in query_names:
51  for experiment in self.data:
52  assert attribute in experiment["results"], (
53  attribute + " is not a valid attribute."
54  )
55  if query == experiment["results"]["query_id"]:
56  result.append(experiment["results"][attribute])
57  break
58  return result
59 
60  def fetchQueryNames(self):
61  result = []
62  for experiment in self.data:
63  result.append(experiment["results"]["query_id"])
64  return result
65 
66 
68  def __init__(self, ref, sample, attribute):
69  assert isinstance(ref, BenchmarkLoader)
70  assert isinstance(sample, BenchmarkLoader)
71  self.__header_info = [ref.getFrontAttribute("query_group"), attribute]
72  self.__label_name_ref = ref.fetchQueryNames()
73  self.__label_name_sample = sample.fetchQueryNames()
77  assert self.__label_name_ref == self.__label_name_sample
78  self.__attribute_ref = ref.fetchAttribute(
79  attribute, self.__label_name_ref
80  )
81  self.__attribute_sample = sample.fetchAttribute(
82  attribute, self.__label_name_sample
83  )
84 
85  # collects all those queries that does not exist in both of the results
87  for query in self.__label_name_ref:
88  if query not in self.__label_name_sample:
89  self.__missing_queries_sample.append(query)
90  self.__label_name_ref.remove(query)
91  for query in self.__label_name_sample:
92  if query not in self.__label_name_ref:
93  self.__missing_queries_ref.append(query)
94  self.__label_name_sample.remove(query)
95 
96  def printHeader(self):
97  for h in self.__header_info:
98  print(" " + h, end="")
99 
100  def findAnomaliesRatio(self, epsilon):
101  found = False
102  speedup = compute_speedup(
104  )
105  print("Differences outside of %2.0f%%: " % (epsilon * 100), end="")
106  self.printHeader()
107  for i in range(len(speedup)):
108  if abs(speedup[i] - 1.0) > epsilon:
109  if found == False:
110  found = True
111  print(
112  "\n%s: reference = %.2f ms, sample = %.2f ms, speedup = %.2fx"
113  % (
114  self.__label_name_ref[i],
115  self.__attribute_ref[i],
116  self.__attribute_sample[i],
117  speedup[i],
118  ),
119  end="",
120  )
121  if found == False:
122  print(": None", end="")
123  if self.__missing_queries_ref:
124  print("\n*** Missing queries from reference: ", end="")
125  for query in self.__missing_queries_ref:
126  print(query + " ", end="")
127  if self.__missing_queries_sample:
128  print("\n*** Missing queries from sample: ", end="")
129  for query in self.__missing_queries_sample:
130  print(query + " ", end="")
131  print(
132  "\n======================================================================="
133  )
134 
135 
136 def compute_speedup(x, y):
137  result = []
138  zipped = list(zip(x, y))
139  for q in zipped:
140  result.append(q[0] / q[1])
141  return result
142 
143 
145  """
146  This class is just used to print out the benchmark results into the terminal.
147  By default, it is used for cross comparison of the results between a reference
148  branch (ref) and a sample branch (sample); for a particular attribute, all elements
149  within each branch are shown as well as the speedup (sample / ref).
150 
151  If cross_comparison is disabled, then it just shows the result for the ref branch.
152  """
153 
154  def __init__(
155  self,
156  ref,
157  sample,
158  attribute,
159  cross_comparison=True,
160  num_items_per_line=5,
161  ):
162  self.__cross_comparison = cross_comparison
163  assert isinstance(ref, BenchmarkLoader)
164  if cross_comparison:
165  assert isinstance(sample, BenchmarkLoader)
166  self.__header_info = [
167  ref.getRunTableName(),
168  attribute,
169  ref.getGpuName(),
170  ]
171  self.__num_items_per_line = num_items_per_line
172  self.__label_name_ref = ref.fetchQueryNames()
173  if cross_comparison:
174  self.__label_name_sample = sample.fetchQueryNames()
175  assert self.__label_name_ref == self.__label_name_sample
178  if cross_comparison:
179  self.collectMissingQueries()
180  self.__attribute_ref = ref.fetchAttribute(
181  attribute, self.__label_name_ref
182  )
183  if cross_comparison:
184  self.__attribute_sample = sample.fetchAttribute(
185  attribute, self.__label_name_sample
186  )
189 
190  # collects all those queries that does not exist in both of the results
192  for query in self.__label_name_ref:
193  if query not in self.__label_name_sample:
194  self.__missing_queries_sample.append(query)
195  self.__label_name_ref.remove(query)
196  for query in self.__label_name_sample:
197  if query not in self.__label_name_ref:
198  self.__missing_queries_ref.append(query)
199  self.__label_name_sample.remove(query)
200 
201  def printSolidLine(self, pattern):
202  for i in range(self.__num_items_per_line + 1):
203  for j in range(11):
204  print(pattern, end="")
205  print("")
206 
207  def printHeader(self):
208  for h in self.__header_info:
209  print("\t" + h)
210  self.printSolidLine("=")
211 
213  return self.__ref_line_count * self.__num_items_per_line
214 
215  def printLine(self, array):
216  begin = self.getRefElementsPerLine()
217  end = self.getRefElementsPerLine() + self.__num_items_per_line
218  for i in range(begin, min(end, len(self.__attribute_ref))):
219  if isinstance(array[i], float):
220  print("%10.2f" % (array[i]), end="")
221  elif isinstance(array[i], str):
222  print("%10s" % (array[i]), end="")
223  else:
224  assert False
225  print("")
226 
227  def printAttribute(self):
228  self.printHeader()
229  ref_count = len(self.__attribute_ref)
230  while self.getRefElementsPerLine() < ref_count:
231  print("%10s" % "Queries", end="")
232  self.printLine(self.__label_name_ref)
233  self.printSolidLine("-")
234  print("%10s" % "Reference", end="")
235  self.printLine(self.__attribute_ref)
236  if self.__cross_comparison:
237  print("%10s" % "Sample", end="")
238  self.printLine(self.__attribute_sample)
239  print("%10s" % "Speedup", end="")
240  self.printLine(
243  )
244  )
245  self.printSolidLine("=")
246  self.__ref_line_count += 1
247  print("\n\n\n")
248 
249 
250 def main(argv):
251  try:
252  opts, args = getopt.getopt(
253  argv,
254  "hs:r:e:a:p",
255  [
256  "help",
257  "sample=",
258  "reference=",
259  "epsilon=",
260  "attribute=",
261  "print",
262  ],
263  )
264  except getopt.GetOptError:
265  print(
266  "python3 analyze-benchmark.py -s <sample dir> -r <reference dir> -e <epsilon> -a <attribute> -p"
267  )
268  sys.exit(2)
269 
270  dir_artifact_sample = ""
271  dir_artifact_ref = ""
272  epsilon = 0.05
273  query_attribute = (
274  "query_exec_trimmed_avg"
275  ) # default attribute to use for benchmark comparison
276 
277  to_print = False # printing all the results, disabled by default
278 
279  for opt, arg in opts:
280  if opt in ("-h", "--help"):
281  print(
282  """
283  -s/--sample:\t\t\t directory of the results for the benchmarked sample branch
284  -r/--reference:\t\t\t directory of the results for the benchmarked reference branch
285  -e/--epsilon:\t\t\t ratio tolerance for reporting results outside this range
286  -a/--attribute:\t\t\t attribute to be used for benchmark comparison (default: query_total_avg)
287  -p/--print:\t\t\t\t print all the results
288  """
289  )
290  sys.exit()
291  else:
292  if opt in ("-s", "--sample"):
293  dir_artifact_sample = arg
294  assert os.path.isdir(dir_artifact_sample)
295  elif opt in ("-r", "--reference"):
296  dir_artifact_ref = arg
297  assert os.path.isdir(dir_artifact_ref)
298  elif opt in ("-e", "--epsilon"):
299  epsilon = float(arg)
300  elif opt in ("-a", "--attribute"):
301  query_attribute = arg
302  elif opt in ("-p", "--print"):
303  to_print = True
304 
305  assert dir_artifact_ref != ""
306  assert dir_artifact_sample != ""
307  assert epsilon <= 1
308 
309  GPU_list_ref = listdir(dir_artifact_ref)
310  GPU_list_sample = listdir(dir_artifact_sample)
311 
312  for gpu in GPU_list_ref:
313  dir_name_ref = dir_artifact_ref + "/" + gpu + "/Benchmarks"
314  filename_list_ref = listdir(dir_name_ref)
315  dir_name_ref += "/"
316 
317  refBench = BenchmarkLoader(dir_name_ref, filename_list_ref)
318 
319  if gpu in GPU_list_sample:
320  dir_name_sample = dir_artifact_sample + "/" + gpu + "/Benchmarks"
321  filename_list_sample = listdir(dir_name_sample)
322  dir_name_sample += "/"
323 
324  sampleBench = BenchmarkLoader(
325  dir_name_sample, filename_list_sample
326  )
327  first_header = True
328  for index in range(len(filename_list_ref)):
329  refBench.load(filename_list_ref[index])
330  if filename_list_ref[index] in filename_list_sample:
331  sampleBench.load(filename_list_ref[index])
332  if first_header:
333  print(
334  "\n+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
335  )
336  print("++++ " + sampleBench.getGpuName())
337  print(
338  "++++ reference("
339  + refBench.getFrontAttribute("run_label")
340  + "): "
341  + refBench.getFrontAttribute("run_version")
342  )
343  print(
344  "++++ sample("
345  + sampleBench.getFrontAttribute("run_label")
346  + "): "
347  + sampleBench.getFrontAttribute("run_version")
348  )
349  print(
350  "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n"
351  )
352  first_header = False
353 
354  analyzer = BenchAnalyzer(
355  refBench, sampleBench, query_attribute
356  )
357  analyzer.findAnomaliesRatio(epsilon)
358  if to_print:
359  printer = PrettyPrint(
360  refBench, sampleBench, query_attribute
361  )
362  printer.printAttribute()
363  else:
364  print(
365  "No sample results for table "
366  + refBench.getRunTableName()
367  + " were found."
368  )
369  print(
370  "======================================================================="
371  )
372 
373  else:
374  print(
375  "\n+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
376  )
377  print("++++ No sample results for GPU " + gpu + " were found.")
378  print(
379  "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n"
380  )
381 
382 
383 if __name__ == "__main__":
384  main(sys.argv[1:])
int open(const char *path, int flags, int mode)
Definition: heavyai_fs.cpp:66