Index: Tools/pybench/pybench.py =================================================================== --- Tools/pybench/pybench.py (revision 75102) +++ Tools/pybench/pybench.py (working copy) @@ -71,30 +71,46 @@ ALLOW_SKIPPING_CALIBRATION = 1 # Timer types -TIMER_TIME_TIME = 'time.time' -TIMER_TIME_CLOCK = 'time.clock' -TIMER_SYSTIMES_PROCESSTIME = 'systimes.processtime' +TIMER_TIME_TIME = ['time.time', 'time'] +TIMER_TIME_CLOCK = ['time.clock', 'clock'] +TIMER_SYSTIMES_PROCESSTIME = ['systimes.processtime', 'processtime'] # Choose platform default timer if sys.platform[:3] == 'win': # On WinXP this has 2.5ms resolution - TIMER_PLATFORM_DEFAULT = TIMER_TIME_CLOCK + TIMER_PLATFORM_DEFAULT = 'time.clock' else: # On Linux this has 1ms resolution - TIMER_PLATFORM_DEFAULT = TIMER_TIME_TIME + TIMER_PLATFORM_DEFAULT = 'time.time' # Print debug information ? _debug = 0 ### Helpers +class processtime_win(): + def __init__(self): + import ctypes + self.GPT = ctypes.windll.kernel32.GetProcessTimes + self.dummy = ctypes.c_uint64() + self.utime = ctypes.c_uint64() + self.ktime = ctypes.c_uint64() + self.args = (-1, ctypes.byref(self.dummy), ctypes.byref(self.dummy), + ctypes.byref(self.ktime), ctypes.byref(self.utime)) + + def __call__(self): + self.GPT(*self.args) + return (float(self.utime.value)+float(self.ktime.value))*1e-7 + def get_timer(timertype): - if timertype == TIMER_TIME_TIME: + if timertype in TIMER_TIME_TIME: return time.time - elif timertype == TIMER_TIME_CLOCK: + elif timertype in TIMER_TIME_CLOCK: return time.clock - elif timertype == TIMER_SYSTIMES_PROCESSTIME: + elif timertype in TIMER_SYSTIMES_PROCESSTIME: + if sys.platform[:3] == 'win': + return processtime_win() import systimes return systimes.processtime else: @@ -230,7 +246,7 @@ raise ValueError('at least one calibration run is required') self.calibration_runs = calibration_runs if timer is not None: - timer = timer + self.timer = timer # Init variables self.times = [] @@ -537,6 +553,7 @@ if self.verbose: print ' Round %-25i effective absolute overhead' % (i+1) total_eff_time = 0.0 + t0 = timer() for j in range(len(tests)): name, test = tests[j] if self.verbose: @@ -549,17 +566,21 @@ (eff_time * MILLI_SECONDS, abs_time * MILLI_SECONDS, min_overhead * MILLI_SECONDS) - self.roundtimes.append(total_eff_time) + gross_time = timer()-t0 + self.roundtimes.append((total_eff_time, gross_time)) if self.verbose: print (' ' ' ------------------------------') print (' ' - ' Totals: %6.0fms' % + ' Totals: %6.0fms' % (total_eff_time * MILLI_SECONDS)) + print (' ' + 'Cross Totals: %6.0fms' % + (gross_time * MILLI_SECONDS)) print else: print '* Round %i done in %.3f seconds.' % (i+1, - total_eff_time) + gross_time) print def stat(self): @@ -574,14 +595,18 @@ statistics across all rounds. """ - runs = len(self.roundtimes) - if runs == 0: - return 0.0, 0.0 - min_time = min(self.roundtimes) - total_time = reduce(operator.add, self.roundtimes, 0.0) - avg_time = total_time / float(runs) - max_time = max(self.roundtimes) - return (min_time, avg_time, max_time) + result = [] + for i in range(2): + roundtimes = [t[i] for t in self.roundtimes] + runs = len(roundtimes) + if runs == 0: + return 0.0, 0.0 + min_time = min(roundtimes) + total_time = sum(roundtimes) + avg_time = total_time / float(runs) + max_time = max(roundtimes) + result.append((min_time, avg_time, max_time)) + return result def print_header(self, title='Benchmark'): @@ -629,8 +654,25 @@ (total_min_time * MILLI_SECONDS, total_avg_time * MILLI_SECONDS, )) + print + print ('Whole rounds ' + ' minimum average') + print '-' * LINE + stats = self.stat() + print ('Sum of net times: ' + ' %6.0fms %6.0fms' % + (stats[0][0] * MILLI_SECONDS, + stats[0][1] * MILLI_SECONDS, + )) + print ('Gross time: ' + ' %6.0fms %6.0fms' % + (stats[1][0] * MILLI_SECONDS, + stats[1][1] * MILLI_SECONDS, + )) + print + def print_comparison(self, compare_to, hidenoise=0, limitnames=None): # Check benchmark versions @@ -717,33 +759,70 @@ print '-' * LINE # Summarise test results - if not benchmarks_compatible or not tests_compatible: - min_diff, avg_diff = 'n/a', 'n/a' - else: - if other_total_min_time != 0.0: - min_diff = '%+5.1f%%' % ( - ((total_min_time * self.warp) / - (other_total_min_time * compare_to.warp) - 1.0) * PERCENT) + def summarize(title, min_time, other_min_time, + avg_time, other_avg_time): + if not benchmarks_compatible or not tests_compatible: + min_diff, avg_diff = 'n/a', 'n/a' else: - min_diff = 'n/a' - if other_total_avg_time != 0.0: - avg_diff = '%+5.1f%%' % ( - ((total_avg_time * self.warp) / - (other_total_avg_time * compare_to.warp) - 1.0) * PERCENT) - else: - avg_diff = 'n/a' - print ('Totals: ' - ' %5.0fms %5.0fms %7s %5.0fms %5.0fms %7s' % - (total_min_time * MILLI_SECONDS, - (other_total_min_time * compare_to.warp/self.warp - * MILLI_SECONDS), - min_diff, - total_avg_time * MILLI_SECONDS, - (other_total_avg_time * compare_to.warp/self.warp - * MILLI_SECONDS), - avg_diff - )) + if other_min_time != 0.0: + min_diff = '%+5.1f%%' % ( + ((min_time * self.warp) / + (other_min_time * compare_to.warp) - 1.0) * PERCENT) + else: + min_diff = 'n/a' + if other_avg_time != 0.0: + avg_diff = '%+5.1f%%' % ( + ((avg_time * self.warp) / + (other_avg_time * compare_to.warp) - 1.0) * PERCENT) + else: + avg_diff = 'n/a' + print (title + + ' '[len(title):] + + ' %5.0fms %5.0fms %7s %5.0fms %5.0fms %7s' % + (min_time * MILLI_SECONDS, + (other_min_time * compare_to.warp/self.warp + * MILLI_SECONDS), + min_diff, + avg_time * MILLI_SECONDS, + (other_avg_time * compare_to.warp/self.warp + * MILLI_SECONDS), + avg_diff + )) + summarize("Totals:", total_min_time, other_total_min_time, + total_avg_time, other_total_avg_time) print + + #per-round totals + print ('Whole rounds ' + ' minimum run-time average run-time') + print (' ' + ' this other diff this other diff') + print '-' * LINE + + sa = self.stat() + sb = compare_to.stat() + if type(sb) is not list: + sb = [sb] #backwards compatibility + min_time = sa[0][0] + other_min_time = sb[0][0] + avg_time = sa[0][1] + other_avg_time = sb[0][1] + summarize('Sum of net times:', min_time, other_min_time, + avg_time, other_avg_time) + + if len(sb) == 1: + #old format + print + return + + min_time = sa[1][0] + other_min_time = sb[1][0] + avg_time = sa[1][1] + other_avg_time = sb[1][1] + + summarize('Gross time:', min_time, other_min_time, + avg_time, other_avg_time) + print print '(this=%s, other=%s)' % (self.name, compare_to.name) print