Index: Tools/pybench/pybench.py =================================================================== --- Tools/pybench/pybench.py (revision 75102) +++ Tools/pybench/pybench.py (working copy) @@ -230,7 +230,7 @@ raise ValueError('at least one calibration run is required') self.calibration_runs = calibration_runs if timer is not None: - timer = timer + self.timer = timer # Init variables self.times = [] @@ -537,6 +537,7 @@ if self.verbose: print ' Round %-25i effective absolute overhead' % (i+1) total_eff_time = 0.0 + t0 = timer() for j in range(len(tests)): name, test = tests[j] if self.verbose: @@ -549,39 +550,47 @@ (eff_time * MILLI_SECONDS, abs_time * MILLI_SECONDS, min_overhead * MILLI_SECONDS) - self.roundtimes.append(total_eff_time) + gross_time = timer()-t0 + self.roundtimes.append((total_eff_time, gross_time)) if self.verbose: print (' ' ' ------------------------------') print (' ' - ' Totals: %6.0fms' % + ' Totals: %6.0fms' % (total_eff_time * MILLI_SECONDS)) + print (' ' + 'Cross Totals: %6.0fms' % + (gross_time * MILLI_SECONDS)) print else: print '* Round %i done in %.3f seconds.' % (i+1, - total_eff_time) + gross_time) print def stat(self): - """ Return benchmark run statistics as tuple: + """ Return benchmark run statistics as two tuples: - (minimum round time, - average round time, - maximum round time) + [(minimum net round time, + average net round time, + maximum net round time), + (minimum total round time, + average total round time, + maximum total round time)] - XXX Currently not used, since the benchmark does test - statistics across all rounds. - """ - runs = len(self.roundtimes) - if runs == 0: - return 0.0, 0.0 - min_time = min(self.roundtimes) - total_time = reduce(operator.add, self.roundtimes, 0.0) - avg_time = total_time / float(runs) - max_time = max(self.roundtimes) - return (min_time, avg_time, max_time) + result = [] + for i in range(2): + roundtimes = [t[i] for t in self.roundtimes] + runs = len(roundtimes) + if runs == 0: + result.append((0.0, 0.0, 0.0)) + min_time = min(roundtimes) + total_time = sum(roundtimes) + avg_time = total_time / float(runs) + max_time = max(roundtimes) + result.append((min_time, avg_time, max_time)) + return result def print_header(self, title='Benchmark'): @@ -629,8 +638,25 @@ (total_min_time * MILLI_SECONDS, total_avg_time * MILLI_SECONDS, )) + print + print ('Whole rounds ' + ' minimum average') + print '-' * LINE + stats = self.stat() + print ('Sum of net times: ' + ' %6.0fms %6.0fms' % + (stats[0][0] * MILLI_SECONDS, + stats[0][1] * MILLI_SECONDS, + )) + print ('Gross time: ' + ' %6.0fms %6.0fms' % + (stats[1][0] * MILLI_SECONDS, + stats[1][1] * MILLI_SECONDS, + )) + print + def print_comparison(self, compare_to, hidenoise=0, limitnames=None): # Check benchmark versions @@ -717,33 +743,70 @@ print '-' * LINE # Summarise test results - if not benchmarks_compatible or not tests_compatible: - min_diff, avg_diff = 'n/a', 'n/a' - else: - if other_total_min_time != 0.0: - min_diff = '%+5.1f%%' % ( - ((total_min_time * self.warp) / - (other_total_min_time * compare_to.warp) - 1.0) * PERCENT) + def summarize(title, min_time, other_min_time, + avg_time, other_avg_time): + if not benchmarks_compatible or not tests_compatible: + min_diff, avg_diff = 'n/a', 'n/a' else: - min_diff = 'n/a' - if other_total_avg_time != 0.0: - avg_diff = '%+5.1f%%' % ( - ((total_avg_time * self.warp) / - (other_total_avg_time * compare_to.warp) - 1.0) * PERCENT) - else: - avg_diff = 'n/a' - print ('Totals: ' - ' %5.0fms %5.0fms %7s %5.0fms %5.0fms %7s' % - (total_min_time * MILLI_SECONDS, - (other_total_min_time * compare_to.warp/self.warp - * MILLI_SECONDS), - min_diff, - total_avg_time * MILLI_SECONDS, - (other_total_avg_time * compare_to.warp/self.warp - * MILLI_SECONDS), - avg_diff - )) + if other_min_time != 0.0: + min_diff = '%+5.1f%%' % ( + ((min_time * self.warp) / + (other_min_time * compare_to.warp) - 1.0) * PERCENT) + else: + min_diff = 'n/a' + if other_avg_time != 0.0: + avg_diff = '%+5.1f%%' % ( + ((avg_time * self.warp) / + (other_avg_time * compare_to.warp) - 1.0) * PERCENT) + else: + avg_diff = 'n/a' + print (title + + ' '[len(title):] + + ' %5.0fms %5.0fms %7s %5.0fms %5.0fms %7s' % + (min_time * MILLI_SECONDS, + (other_min_time * compare_to.warp/self.warp + * MILLI_SECONDS), + min_diff, + avg_time * MILLI_SECONDS, + (other_avg_time * compare_to.warp/self.warp + * MILLI_SECONDS), + avg_diff + )) + summarize("Totals:", total_min_time, other_total_min_time, + total_avg_time, other_total_avg_time) print + + #per-round totals + print ('Whole rounds ' + ' minimum run-time average run-time') + print (' ' + ' this other diff this other diff') + print '-' * LINE + + sa = self.stat() + sb = compare_to.stat() + if type(sb) is not list: + sb = [sb] #backwards compatibility with old pickles + min_time = sa[0][0] + other_min_time = sb[0][0] + avg_time = sa[0][1] + other_avg_time = sb[0][1] + summarize('Sum of net times:', min_time, other_min_time, + avg_time, other_avg_time) + + if len(sb) == 1: + #old format + print + return + + min_time = sa[1][0] + other_min_time = sb[1][0] + avg_time = sa[1][1] + other_avg_time = sb[1][1] + + summarize('Gross time:', min_time, other_min_time, + avg_time, other_avg_time) + print print '(this=%s, other=%s)' % (self.name, compare_to.name) print