diff --git a/perf.py b/perf.py --- a/perf.py +++ b/perf.py @@ -120,6 +120,29 @@ def ported_lib(python): return 'lib' +def supported_timers(python): + """Return a list of supported timers by the given Python interpreter, + in decreasing order of priority. + """ + version = interpreter_version(python) + if version >= '3.3': + return ['perf_counter', 'time'] + else: + return ['time'] + + +def choose_timer(base_python, changed_python): + """Choose the best timer supported by both *base_python* and + *changed_python*. + """ + u = supported_timers(base_python) + v = set(supported_timers(changed_python)) + for timer in u: + if timer in v: + return timer + assert 0 # At least time.time() should always be available + + def avg(seq): return sum(seq) / float(len(seq)) @@ -969,6 +992,9 @@ def BuildEnv(env=None, inherit_env=[]): for k in ("COMSPEC", "SystemRoot"): if k in os.environ and k not in fixed_env: fixed_env[k] = os.environ[k] + # Make hashing deterministic (this may make some benchmarks more + # reproduceable). + fixed_env["PYTHONHASHSEED"] = "1" return fixed_env @@ -1130,7 +1156,8 @@ def MeasureGeneric(python, options, bm_p trials = max(1, int(trials * iteration_scaling)) RemovePycs() - command = python + [bm_path, "-n", trials] + extra_args + bench_args = [bm_path, "-n", trials, "--timer", options.timer] + command = python + bench_args + extra_args output = CallAndCaptureOutput(command, bm_env, track_memory=options.track_memory, inherit_env=options.inherit_env) @@ -2401,6 +2428,7 @@ def main(argv, bench_funcs=BENCH_FUNCS, usage="%prog [options] baseline_python changed_python", description=("Compares the performance of baseline_python with" + " changed_python and prints a report.")) + parser.add_option("-r", "--rigorous", action="store_true", help=("Spend longer running tests to get more" + " accurate results")) @@ -2410,6 +2438,9 @@ def main(argv, bench_funcs=BENCH_FUNCS, help="Print more output") parser.add_option("-m", "--track_memory", action="store_true", help="Track memory usage. This only works on Linux.") + parser.add_option("--timer", action="store", + help="Override timer function.") + parser.add_option("-a", "--args", default="", help=("Pass extra arguments to the python binaries." " If there is a comma in this option's value, the" @@ -2488,6 +2519,10 @@ def main(argv, bench_funcs=BENCH_FUNCS, if options.diff_instrumentation: info("Suppressing performance data due to --diff_instrumentation") + if not options.timer: + options.timer = choose_timer(base_cmd_prefix, changed_cmd_prefix) + info("Automatically selected timer: %s", options.timer) + should_run = ParseBenchmarksOption(options.benchmarks, bench_groups, options.fast) diff --git a/performance/bm_call_method.py b/performance/bm_call_method.py --- a/performance/bm_call_method.py +++ b/performance/bm_call_method.py @@ -113,11 +113,11 @@ class Foo(object): pass -def test_calls(iterations): +def test_calls(iterations, timer): times = [] f = Foo() for _ in xrange(iterations): - t0 = time.time() + t0 = timer() # 20 calls f.foo(1, 2, 3, 4) f.foo(1, 2, 3, 4) @@ -139,7 +139,7 @@ def test_calls(iterations): f.foo(1, 2, 3, 4) f.foo(1, 2, 3, 4) f.foo(1, 2, 3, 4) - t1 = time.time() + t1 = timer() times.append(t1 - t0) return times @@ -153,6 +153,6 @@ if __name__ == "__main__": options, _ = parser.parse_args() # Priming run. - test_calls(1) + test_calls(1, time.time) util.run_benchmark(options, options.num_runs, test_calls) diff --git a/performance/bm_call_method_slots.py b/performance/bm_call_method_slots.py --- a/performance/bm_call_method_slots.py +++ b/performance/bm_call_method_slots.py @@ -117,13 +117,13 @@ class Foo(object): pass -def test_calls(iterations): +def test_calls(iterations, timer): times = [] f = Foo() if hasattr(f, '__dict__'): raise Exception("f has a __dict__ attribute!") for _ in xrange(iterations): - t0 = time.time() + t0 = timer() # 20 calls f.foo(1, 2, 3, 4) f.foo(1, 2, 3, 4) @@ -145,7 +145,7 @@ def test_calls(iterations): f.foo(1, 2, 3, 4) f.foo(1, 2, 3, 4) f.foo(1, 2, 3, 4) - t1 = time.time() + t1 = timer() times.append(t1 - t0) return times @@ -159,6 +159,6 @@ if __name__ == "__main__": options, _ = parser.parse_args() # Priming run. - test_calls(1) + test_calls(1, time.time) util.run_benchmark(options, options.num_runs, test_calls) diff --git a/performance/bm_call_method_unknown.py b/performance/bm_call_method_unknown.py --- a/performance/bm_call_method_unknown.py +++ b/performance/bm_call_method_unknown.py @@ -313,13 +313,13 @@ class Baz(object): pass -def test_calls(iterations): +def test_calls(iterations, timer): times = [] a = Foo() b = Bar() c = Baz() for _ in xrange(iterations): - t0 = time.time() + t0 = timer() # 18 calls a.foo(b, c) b.foo(c, a) @@ -339,7 +339,7 @@ def test_calls(iterations): a.foo(b, c) b.foo(c, a) c.foo(a, b) - t1 = time.time() + t1 = timer() times.append(t1 - t0) return times @@ -353,6 +353,6 @@ if __name__ == "__main__": options, _ = parser.parse_args() # Priming run. - test_calls(1) + test_calls(1, time.time) util.run_benchmark(options, options.num_runs, test_calls) diff --git a/performance/bm_call_simple.py b/performance/bm_call_simple.py --- a/performance/bm_call_simple.py +++ b/performance/bm_call_simple.py @@ -115,10 +115,10 @@ def qux(): pass -def test_calls(iterations): +def test_calls(iterations, timer): times = [] for _ in xrange(iterations): - t0 = time.time() + t0 = timer() # 20 calls foo(1, 2, 3, 4) foo(1, 2, 3, 4) @@ -140,7 +140,7 @@ def test_calls(iterations): foo(1, 2, 3, 4) foo(1, 2, 3, 4) foo(1, 2, 3, 4) - t1 = time.time() + t1 = timer() times.append(t1 - t0) return times @@ -154,6 +154,6 @@ if __name__ == "__main__": options, _ = parser.parse_args() # Priming run. - test_calls(1) + test_calls(1, time.time) util.run_benchmark(options, options.num_runs, test_calls) diff --git a/performance/bm_chameleon.py b/performance/bm_chameleon.py --- a/performance/bm_chameleon.py +++ b/performance/bm_chameleon.py @@ -15,16 +15,15 @@ tal:content="python: d" /> """ % compat.unicode.__name__ -def main(n): +def main(n, timer): tmpl = PageTemplate(BIGTABLE_ZPT) options = {'table': [dict(a=1, b=2, c=3, d=4, e=5, f=6, g=7, h=8, i=9, j=10) for x in range(1000)]} - import time l = [] for k in range(n): - t0 = time.time() + t0 = timer() tmpl(options=options) - l.append(time.time() - t0) + l.append(timer() - t0) return l if __name__ == '__main__': diff --git a/performance/bm_chaos.py b/performance/bm_chaos.py --- a/performance/bm_chaos.py +++ b/performance/bm_chaos.py @@ -199,14 +199,14 @@ class Chaosgame(object): if point.y < self.miny: point.y = self.miny - def create_image_chaos(self, w, h, n): + def create_image_chaos(self, timer, w, h, n): im = [[1] * h for i in range(w)] point = GVector((self.maxx + self.minx) / 2, (self.maxy + self.miny) / 2, 0) colored = 0 times = [] for _ in range(n): - t1 = time.time() + t1 = timer() for i in xrange(5000): point = self.transform_point(point) x = (point.x - self.minx) / self.width * w @@ -218,12 +218,12 @@ class Chaosgame(object): if y == h: y -= 1 im[x][h - y - 1] = 0 - t2 = time.time() + t2 = timer() times.append(t2 - t1) return times -def main(n): +def main(n, timer): splines = [ Spline([ GVector(1.597350, 3.304460, 0.000000), @@ -248,7 +248,7 @@ def main(n): 3, [0, 0, 0, 1, 1, 1]) ] c = Chaosgame(splines, 0.25) - return c.create_image_chaos(1000, 1200, n) + return c.create_image_chaos(timer, 1000, 1200, n) diff --git a/performance/bm_django.py b/performance/bm_django.py --- a/performance/bm_django.py +++ b/performance/bm_django.py @@ -32,7 +32,7 @@ DJANGO_TMPL = Template("""
""") -def test_django(count): +def test_django(count, timer): table = [xrange(150) for _ in xrange(150)] context = Context({"table": table}) @@ -42,9 +42,9 @@ def test_django(count): times = [] for _ in xrange(count): - t0 = time.time() + t0 = timer() data = DJANGO_TMPL.render(context) - t1 = time.time() + t1 = timer() times.append(t1 - t0) return times diff --git a/performance/bm_django_v2.py b/performance/bm_django_v2.py --- a/performance/bm_django_v2.py +++ b/performance/bm_django_v2.py @@ -28,7 +28,7 @@ DJANGO_TMPL = Template("""
""") -def test_django(count): +def test_django(count, timer): table = [xrange(150) for _ in xrange(150)] context = Context({"table": table}) @@ -38,9 +38,9 @@ def test_django(count): times = [] for _ in xrange(count): - t0 = time.time() + t0 = timer() data = DJANGO_TMPL.render(context) - t1 = time.time() + t1 = timer() times.append(t1 - t0) return times diff --git a/performance/bm_elementtree.py b/performance/bm_elementtree.py --- a/performance/bm_elementtree.py +++ b/performance/bm_elementtree.py @@ -179,7 +179,7 @@ def bench_generate(etree, xml_file, xml_ raise RuntimeError("unexpected output detected") -def run_etree_benchmark(iterations, etree, bench_func): +def run_etree_benchmark(iterations, timer, etree, bench_func): times = [] xml_root = build_xml_tree(etree) @@ -195,9 +195,9 @@ def run_etree_benchmark(iterations, etre bench_func(etree, file_path, xml_data, xml_root) for _ in xrange(iterations): - t0 = time.time() + t0 = timer() bench_func(etree, file_path, xml_data, xml_root) - t1 = time.time() + t1 = timer() times.append(t1 - t0) finally: try: diff --git a/performance/bm_fannkuch.py b/performance/bm_fannkuch.py --- a/performance/bm_fannkuch.py +++ b/performance/bm_fannkuch.py @@ -54,12 +54,12 @@ def fannkuch(n): DEFAULT_ARG = 9 -def main(n): +def main(n, timer): times = [] for i in xrange(n): - t0 = time.time() + t0 = timer() fannkuch(DEFAULT_ARG) - tk = time.time() + tk = timer() times.append(tk - t0) return times diff --git a/performance/bm_float.py b/performance/bm_float.py --- a/performance/bm_float.py +++ b/performance/bm_float.py @@ -47,14 +47,14 @@ def benchmark(n): POINTS = 100000 -def main(arg): +def main(arg, timer): # XXX warmup times = [] for i in xrange(arg): - t0 = time.time() + t0 = timer() o = benchmark(POINTS) - tk = time.time() + tk = timer() times.append(tk - t0) return times diff --git a/performance/bm_go.py b/performance/bm_go.py --- a/performance/bm_go.py +++ b/performance/bm_go.py @@ -424,14 +424,14 @@ def versus_cpu(): board = Board() pos = computer_move(board) -def main(n): +def main(n, timer): times = [] for i in range(5): versus_cpu() # warmup for i in range(n): - t1 = time.time() + t1 = timer() versus_cpu() - t2 = time.time() + t2 = timer() times.append(t2 - t1) return times diff --git a/performance/bm_hexiom2.py b/performance/bm_hexiom2.py --- a/performance/bm_hexiom2.py +++ b/performance/bm_hexiom2.py @@ -518,14 +518,14 @@ 2 . 1 . 3 . 2 if output.getvalue() != expected: raise AssertionError("got a wrong answer:\n%s" % output.getvalue()) -def main(n): +def main(n, timer): # only run 1/25th of the requested number of iterations. # with the default n=50 from runner.py, this means twice. l = [] for i in xrange(n): - t0 = time.time() + t0 = timer() run_level36() - time_elapsed = time.time() - t0 + time_elapsed = timer() - t0 l.append(time_elapsed) return l diff --git a/performance/bm_html5lib.py b/performance/bm_html5lib.py --- a/performance/bm_html5lib.py +++ b/performance/bm_html5lib.py @@ -23,16 +23,16 @@ import util import html5lib -def test_html5lib(count, spec_data): +def test_html5lib(count, timer, spec_data): # No warm-up runs for this benchmark; in real life, the parser doesn't get # to warm up (this isn't a daemon process). times = [] for _ in xrange(count): spec_data.seek(0) - t0 = time.time() + t0 = timer() html5lib.parse(spec_data) - t1 = time.time() + t1 = timer() times.append(t1 - t0) return times diff --git a/performance/bm_json.py b/performance/bm_json.py --- a/performance/bm_json.py +++ b/performance/bm_json.py @@ -76,7 +76,7 @@ random_source = random.Random(5) # Fixe DICT_GROUP = [mutate_dict(DICT, random_source) for _ in range(3)] -def test_json_dump(num_obj_copies, json, options): +def test_json_dump(num_obj_copies, timer, json, options): # Warm-up runs. json.dumps(DICT) json.dumps(TUPLE) @@ -85,7 +85,7 @@ def test_json_dump(num_obj_copies, json, loops = num_obj_copies // 20 # We do 20 runs per loop. times = [] for _ in xrange(options.num_runs): - t0 = time.time() + t0 = timer() for _ in xrange(loops): json.dumps(DICT) json.dumps(DICT) @@ -147,12 +147,12 @@ def test_json_dump(num_obj_copies, json, json.dumps(DICT_GROUP) json.dumps(DICT_GROUP) json.dumps(DICT_GROUP) - t1 = time.time() + t1 = timer() times.append(t1 - t0) return times -def test_json_load(num_obj_copies, json, options): +def test_json_load(num_obj_copies, timer, json, options): json_dict = json.dumps(DICT) json_tuple = json.dumps(TUPLE) json_dict_group = json.dumps(DICT_GROUP) @@ -165,7 +165,7 @@ def test_json_load(num_obj_copies, json, loops = num_obj_copies // 20 # We do 20 runs per loop. times = [] for _ in xrange(options.num_runs): - t0 = time.time() + t0 = timer() for _ in xrange(loops): json.loads(json_dict) json.loads(json_dict) @@ -227,7 +227,7 @@ def test_json_load(num_obj_copies, json, json.loads(json_dict_group) json.loads(json_dict_group) json.loads(json_dict_group) - t1 = time.time() + t1 = timer() times.append(t1 - t0) return times diff --git a/performance/bm_json_v2.py b/performance/bm_json_v2.py --- a/performance/bm_json_v2.py +++ b/performance/bm_json_v2.py @@ -17,15 +17,15 @@ HUGE = ([NESTED[0]] * 1000, 100) cases = ['EMPTY', 'SIMPLE', 'NESTED', 'HUGE'] -def main(n): +def main(n, timer): l = [] for i in xrange(n): - t0 = time.time() + t0 = timer() for case in cases: data, count = globals()[case] for i in xrange(count): json.dumps(data) - l.append(time.time() - t0) + l.append(timer() - t0) return l if __name__ == '__main__': diff --git a/performance/bm_logging.py b/performance/bm_logging.py --- a/performance/bm_logging.py +++ b/performance/bm_logging.py @@ -19,11 +19,11 @@ FORMAT = 'important: %s' MESSAGE = 'some important information to be logged' -def test_no_output(iterations, logger): +def test_no_output(iterations, timer, logger): times = [] m = MESSAGE for _ in xrange(iterations): - t0 = time.time() + t0 = timer() for _ in xrange(10000): logger.debug(m) logger.debug(m) @@ -35,16 +35,16 @@ def test_no_output(iterations, logger): logger.debug(m) logger.debug(m) logger.debug(m) - t1 = time.time() + t1 = timer() times.append(t1 - t0) return times -def test_simple_output(iterations, logger): +def test_simple_output(iterations, timer, logger): times = [] m = MESSAGE for _ in xrange(iterations): - t0 = time.time() + t0 = timer() for _ in xrange(1000): logger.warn(m) logger.warn(m) @@ -56,17 +56,17 @@ def test_simple_output(iterations, logge logger.warn(m) logger.warn(m) logger.warn(m) - t1 = time.time() + t1 = timer() times.append(t1 - t0) return times -def test_formatted_output(iterations, logger): +def test_formatted_output(iterations, timer, logger): times = [] f = FORMAT m = MESSAGE for _ in xrange(iterations): - t0 = time.time() + t0 = timer() for _ in xrange(1000): logger.warn(f, m) logger.warn(f, m) @@ -78,7 +78,7 @@ def test_formatted_output(iterations, lo logger.warn(f, m) logger.warn(f, m) logger.warn(f, m) - t1 = time.time() + t1 = timer() times.append(t1 - t0) return times diff --git a/performance/bm_mako.py b/performance/bm_mako.py --- a/performance/bm_mako.py +++ b/performance/bm_mako.py @@ -25,7 +25,7 @@ MAKO_TMPL = Template("""
""") -def test_mako(count): +def test_mako(count, timer): table = [xrange(150) for _ in xrange(150)] # Warm up Mako. @@ -34,9 +34,9 @@ def test_mako(count): times = [] for _ in xrange(count): - t0 = time.time() + t0 = timer() MAKO_TMPL.render(table = table) - t1 = time.time() + t1 = timer() times.append(t1 - t0) return times diff --git a/performance/bm_mako_v2.py b/performance/bm_mako_v2.py --- a/performance/bm_mako_v2.py +++ b/performance/bm_mako_v2.py @@ -118,7 +118,7 @@ velit. Fusce dapibus ligula quis lectus """ -def test_mako(count): +def test_mako(count, timer): lookup = TemplateLookup() lookup.put_string('base.mako', BASE_TEMPLATE) @@ -132,11 +132,11 @@ def test_mako(count): times = [] for i in xrange(count): - t0 = time.time() + t0 = timer() data = template.render(table=table, paragraphs=paragraphs, lorem=LOREM_IPSUM, title=title, img_count=50, xrange=xrange) - t1 = time.time() + t1 = timer() times.append(t1-t0) return times diff --git a/performance/bm_meteor_contest.py b/performance/bm_meteor_contest.py --- a/performance/bm_meteor_contest.py +++ b/performance/bm_meteor_contest.py @@ -131,10 +131,10 @@ def solve(n, i_min, free, curr_board, pi SOLVE_ARG = 60 -def main(n): +def main(n, timer): times = [] for i in xrange(n): - t0 = time.time() + t0 = timer() free = frozenset(xrange(len(board))) curr_board = [-1] * len(board) pieces_left = list(range(len(pieces))) @@ -142,7 +142,7 @@ def main(n): solve(SOLVE_ARG, 0, free, curr_board, pieces_left, solutions) #print len(solutions), 'solutions found\n' #for i in (0, -1): print_board(solutions[i]) - tk = time.time() + tk = timer() times.append(tk - t0) return times diff --git a/performance/bm_nbody.py b/performance/bm_nbody.py --- a/performance/bm_nbody.py +++ b/performance/bm_nbody.py @@ -124,7 +124,7 @@ def offset_momentum(ref, bodies=SYSTEM, v[2] = pz / m -def test_nbody(iterations): +def test_nbody(iterations, timer): # Warm-up runs. report_energy() advance(0.01, 20000) @@ -132,11 +132,11 @@ def test_nbody(iterations): times = [] for _ in xrange(iterations): - t0 = time.time() + t0 = timer() report_energy() advance(0.01, 20000) report_energy() - t1 = time.time() + t1 = timer() times.append(t1 - t0) return times diff --git a/performance/bm_nqueens.py b/performance/bm_nqueens.py --- a/performance/bm_nqueens.py +++ b/performance/bm_nqueens.py @@ -60,16 +60,16 @@ def n_queens(queen_count): yield vec -def test_n_queens(iterations): +def test_n_queens(iterations, timer): # Warm-up runs. list(n_queens(8)) list(n_queens(8)) times = [] for _ in xrange(iterations): - t0 = time.time() + t0 = timer() list(n_queens(8)) - t1 = time.time() + t1 = timer() times.append(t1 - t0) return times diff --git a/performance/bm_pathlib.py b/performance/bm_pathlib.py --- a/performance/bm_pathlib.py +++ b/performance/bm_pathlib.py @@ -43,7 +43,7 @@ def teardown(): shutil.rmtree(TMP_PATH) -def test_pathlib(count): +def test_pathlib(count, timer): base_path = Path(TMP_PATH) # Warm up the filesystem cache and keep some objects in memory. @@ -53,8 +53,8 @@ def test_pathlib(count): assert len(path_objects) == NUM_FILES times = [] - for _ in xrange(count // 2): - t0 = time.time() + for _ in xrange(max(1, count // 2)): + t0 = timer() # Do something simple with each path. for p in base_path: p.st_mtime @@ -64,7 +64,7 @@ def test_pathlib(count): p.st_mtime for p in base_path.glob("*.py"): p.st_mtime - t1 = time.time() + t1 = timer() times.append(t1 - t0) return times diff --git a/performance/bm_pickle.py b/performance/bm_pickle.py --- a/performance/bm_pickle.py +++ b/performance/bm_pickle.py @@ -86,7 +86,7 @@ random_source = random.Random(5) # Fixe DICT_GROUP = [mutate_dict(DICT, random_source) for _ in range(3)] -def test_pickle(num_obj_copies, pickle, options): +def test_pickle(num_obj_copies, timer, pickle, options): # Warm-up runs. pickle.dumps(DICT, options.protocol) pickle.dumps(TUPLE, options.protocol) @@ -95,7 +95,7 @@ def test_pickle(num_obj_copies, pickle, loops = num_obj_copies // 20 # We do 20 runs per loop. times = [] for _ in xrange(options.num_runs): - t0 = time.time() + t0 = timer() for _ in xrange(loops): pickle.dumps(DICT, options.protocol) pickle.dumps(DICT, options.protocol) @@ -157,12 +157,12 @@ def test_pickle(num_obj_copies, pickle, pickle.dumps(DICT_GROUP, options.protocol) pickle.dumps(DICT_GROUP, options.protocol) pickle.dumps(DICT_GROUP, options.protocol) - t1 = time.time() + t1 = timer() times.append(t1 - t0) return times -def test_unpickle(num_obj_copies, pickle, options): +def test_unpickle(num_obj_copies, timer, pickle, options): pickled_dict = pickle.dumps(DICT, options.protocol) pickled_tuple = pickle.dumps(TUPLE, options.protocol) pickled_dict_group = pickle.dumps(DICT_GROUP, options.protocol) @@ -175,7 +175,7 @@ def test_unpickle(num_obj_copies, pickle loops = num_obj_copies // 20 # We do 20 runs per loop. times = [] for _ in xrange(options.num_runs): - t0 = time.time() + t0 = timer() for _ in xrange(loops): pickle.loads(pickled_dict) pickle.loads(pickled_dict) @@ -237,7 +237,7 @@ def test_unpickle(num_obj_copies, pickle pickle.loads(pickled_dict_group) pickle.loads(pickled_dict_group) pickle.loads(pickled_dict_group) - t1 = time.time() + t1 = timer() times.append(t1 - t0) return times @@ -245,7 +245,7 @@ def test_unpickle(num_obj_copies, pickle LIST = [[list(range(10)), list(range(10))] for _ in xrange(10)] -def test_pickle_list(loops, pickle, options): +def test_pickle_list(loops, timer, pickle, options): # Warm-up runs. pickle.dumps(LIST, options.protocol) pickle.dumps(LIST, options.protocol) @@ -253,7 +253,7 @@ def test_pickle_list(loops, pickle, opti loops = loops // 5 # Scale to compensate for the workload. times = [] for _ in xrange(options.num_runs): - t0 = time.time() + t0 = timer() for _ in xrange(loops): pickle.dumps(LIST, options.protocol) pickle.dumps(LIST, options.protocol) @@ -265,12 +265,12 @@ def test_pickle_list(loops, pickle, opti pickle.dumps(LIST, options.protocol) pickle.dumps(LIST, options.protocol) pickle.dumps(LIST, options.protocol) - t1 = time.time() + t1 = timer() times.append(t1 - t0) return times -def test_unpickle_list(loops, pickle, options): +def test_unpickle_list(loops, timer, pickle, options): pickled_list = pickle.dumps(LIST, options.protocol) # Warm-up runs. @@ -280,7 +280,7 @@ def test_unpickle_list(loops, pickle, op loops = loops // 5 # Scale to compensate for the workload. times = [] for _ in xrange(options.num_runs): - t0 = time.time() + t0 = timer() for _ in xrange(loops): pickle.loads(pickled_list) pickle.loads(pickled_list) @@ -292,14 +292,14 @@ def test_unpickle_list(loops, pickle, op pickle.loads(pickled_list) pickle.loads(pickled_list) pickle.loads(pickled_list) - t1 = time.time() + t1 = timer() times.append(t1 - t0) return times MICRO_DICT = dict((key, dict.fromkeys(range(10))) for key in xrange(100)) -def test_pickle_dict(loops, pickle, options): +def test_pickle_dict(loops, timer, pickle, options): # Warm-up runs. pickle.dumps(MICRO_DICT, options.protocol) pickle.dumps(MICRO_DICT, options.protocol) @@ -307,14 +307,14 @@ def test_pickle_dict(loops, pickle, opti loops = max(1, loops // 10) times = [] for _ in xrange(options.num_runs): - t0 = time.time() + t0 = timer() for _ in xrange(loops): pickle.dumps(MICRO_DICT, options.protocol) pickle.dumps(MICRO_DICT, options.protocol) pickle.dumps(MICRO_DICT, options.protocol) pickle.dumps(MICRO_DICT, options.protocol) pickle.dumps(MICRO_DICT, options.protocol) - t1 = time.time() + t1 = timer() times.append(t1 - t0) return times diff --git a/performance/bm_pidigits.py b/performance/bm_pidigits.py --- a/performance/bm_pidigits.py +++ b/performance/bm_pidigits.py @@ -16,7 +16,7 @@ from compat import xrange, imap, next NDIGITS = 2000 -def test_pidgits(iterations): +def test_pidgits(iterations, timer): _map = imap _count = itertools.count _islice = itertools.islice @@ -57,9 +57,9 @@ def test_pidgits(iterations): times = [] for _ in xrange(iterations): - t0 = time.time() + t0 = timer() calc_ndigits(NDIGITS) - t1 = time.time() + t1 = timer() times.append(t1 - t0) return times diff --git a/performance/bm_raytrace.py b/performance/bm_raytrace.py --- a/performance/bm_raytrace.py +++ b/performance/bm_raytrace.py @@ -354,15 +354,14 @@ def _main(): s.addObject(Halfspace(Point(0,0,0), Vector.UP), CheckerboardSurface()) s.render(c) -def main(n): - import time +def main(n, timer): times = [] for i in range(5): _main() # warmup for i in range(n): - t1 = time.time() + t1 = timer() _main() - t2 = time.time() + t2 = timer() times.append(t2 - t1) return times diff --git a/performance/bm_regex_compile.py b/performance/bm_regex_compile.py --- a/performance/bm_regex_compile.py +++ b/performance/bm_regex_compile.py @@ -41,10 +41,10 @@ def capture_regexes(): re.sub = capture_sub try: import bm_regex_effbot - bm_regex_effbot.test_regex_effbot(1) + bm_regex_effbot.test_regex_effbot(1, time.time) import bm_regex_v8 - bm_regex_v8.test_regex_v8(1) + bm_regex_v8.test_regex_v8(1, time.time) finally: re.compile = real_compile re.search = real_search @@ -52,7 +52,7 @@ def capture_regexes(): return regexes -def test_regex_compile(count): +def test_regex_compile(count, timer): try: clear_cache = re._cache.clear except AttributeError: @@ -67,11 +67,11 @@ def test_regex_compile(count): times = [] for _ in xrange(count): - t0 = time.time() + t0 = timer() for regex, flags in regexes: clear_cache() re.compile(regex, flags) - t1 = time.time() + t1 = timer() times.append(t1 - t0) return times @@ -83,4 +83,4 @@ if __name__ == "__main__": util.add_standard_options_to(parser) options, args = parser.parse_args() - util.run_benchmark(options, options.num_runs, test_regex_compile) \ No newline at end of file + util.run_benchmark(options, options.num_runs, test_regex_compile) diff --git a/performance/bm_regex_effbot.py b/performance/bm_regex_effbot.py --- a/performance/bm_regex_effbot.py +++ b/performance/bm_regex_effbot.py @@ -136,7 +136,7 @@ def run_benchmarks(n): re.search(regexs[id], string_tables[n][id]) -def test_regex_effbot(iterations): +def test_regex_effbot(iterations, timer): sizes = init_benchmarks() # Warm up. @@ -145,10 +145,10 @@ def test_regex_effbot(iterations): times = [] for i in xrange(iterations): - t0 = time.time() + t0 = timer() for size in sizes: run_benchmarks(size) - t1 = time.time() + t1 = timer() times.append(t1 - t0) return times diff --git a/performance/bm_regex_v8.py b/performance/bm_regex_v8.py --- a/performance/bm_regex_v8.py +++ b/performance/bm_regex_v8.py @@ -1658,10 +1658,10 @@ def block11(): re.search(r'jvaqbjf', strings[63]) -def test_regex_v8(count): +def test_regex_v8(count, timer): times = [] for i in xrange(count): - t0 = time.time() + t0 = timer() block0() block1() block2() @@ -1674,7 +1674,7 @@ def test_regex_v8(count): block9() block10() block11() - t1 = time.time() + t1 = timer() times.append(t1 - t0) return times diff --git a/performance/bm_richards.py b/performance/bm_richards.py --- a/performance/bm_richards.py +++ b/performance/bm_richards.py @@ -19,16 +19,16 @@ import util from compat import xrange -def test_richards(iterations): +def test_richards(iterations, timer): # Warm-up r = richards.Richards() r.run(iterations=2) times = [] for _ in xrange(iterations): - t0 = time.time() + t0 = timer() r.run(iterations=1) - t1 = time.time() + t1 = timer() times.append(t1 - t0) return times diff --git a/performance/bm_rietveld.py b/performance/bm_rietveld.py --- a/performance/bm_rietveld.py +++ b/performance/bm_rietveld.py @@ -87,14 +87,14 @@ def get_benchmark_data(): return tmpl, context -def test_rietveld(count, tmpl, context): +def test_rietveld(count, timer, tmpl, context): # Warm up Django. tmpl.render(context) tmpl.render(context) times = [] for _ in xrange(count): - t0 = time.time() + t0 = timer() # 30 calls to render, so that we don't measure loop overhead. tmpl.render(context) tmpl.render(context) @@ -126,7 +126,7 @@ def test_rietveld(count, tmpl, context): tmpl.render(context) tmpl.render(context) tmpl.render(context) - t1 = time.time() + t1 = timer() times.append(t1 - t0) return times diff --git a/performance/bm_spambayes.py b/performance/bm_spambayes.py --- a/performance/bm_spambayes.py +++ b/performance/bm_spambayes.py @@ -21,7 +21,7 @@ from spambayes import hammie, mboxutils import util -def test_spambayes(iterations, messages, ham_classifier): +def test_spambayes(iterations, timer, messages, ham_classifier): # Prime the pump. This still leaves some hot functions uncompiled; these # will be noticed as hot during the timed loops below. for msg in messages: @@ -29,10 +29,10 @@ def test_spambayes(iterations, messages, times = [] for _ in xrange(iterations): - t0 = time.time() + t0 = timer() for msg in messages: ham_classifier.score(msg) - t1 = time.time() + t1 = timer() times.append(t1 - t0) return times diff --git a/performance/bm_spectral_norm.py b/performance/bm_spectral_norm.py --- a/performance/bm_spectral_norm.py +++ b/performance/bm_spectral_norm.py @@ -40,10 +40,10 @@ def part_At_times_u(i_u): DEFAULT_N = 130 -def main(n): +def main(n, timer): times = [] for i in xrange(n): - t0 = time.time() + t0 = timer() u = [1] * DEFAULT_N for dummy in xrange(10): @@ -55,7 +55,7 @@ def main(n): for ue, ve in izip(u, v): vBv += ue * ve vv += ve * ve - tk = time.time() + tk = timer() times.append(tk - t0) return times diff --git a/performance/bm_spitfire.py b/performance/bm_spitfire.py --- a/performance/bm_spitfire.py +++ b/performance/bm_spitfire.py @@ -36,7 +36,7 @@ SPITFIRE_SRC = """