diff -r a97acad3bbf7 -r ca1184dec8b5 perf.py --- a/perf.py Wed Feb 03 15:20:02 2016 +0100 +++ b/perf.py Thu Feb 04 14:42:26 2016 +0100 @@ -167,6 +167,38 @@ def choose_timer(base_python, changed_py assert 0 # At least time.time() should always be available +def timer_precision(timer_name): + """Compute the effective precision of a timer at the Python level.""" + + precision = None + points = 0 + if sys.version_info >= (3,): + timeout_timer = time.monotonic + else: + timeout_timer = time.time + timer = getattr(time, timer_name) + timeout = timeout_timer() + 1.0 + previous = timer() + while timeout_timer() < timeout or points < 5: + for loop in range(10): + t1 = timer() + t2 = timer() + dt = t2 - t1 + if 0 < dt: + break + else: + dt = t2 - previous + if dt <= 0.0: + continue + if precision is not None: + precision = min(precision, dt) + else: + precision = dt + points += 1 + previous = timer() + return precision + + def avg(seq): return sum(seq) / float(len(seq)) @@ -815,6 +847,40 @@ def SimpleBenchmark(benchmark_function, A BenchmarkResult object if the benchmark runs succeeded. A BenchmarkError object if either benchmark run failed. """ + + options.num_runs = 1 + options.num_loops = 1 + + print("Calibrating") + if options.rigorous: + min_time = 1.0 + max_time = 100.0 # 100 runs + elif options.fast: + min_time = 0.5 + max_time = 25.0 # 50 runs + else: + min_time = 0.5 + max_time = 50.0 # 100 runs + + # On Windows, timer_precision can be up to 15.6 ms + min_time = max(min_time, options.timer_precision * 100) + + while True: + rawdata = benchmark_function(changed_python, options, + *args, **kwargs) + dt = min(rawdata.runtimes) + if dt > min_time: + break + options.num_loops *= 2 + + # Use a strict minimum of 5 runs, less cannot be reliable + options.num_runs = max(int(math.ceil(max_time / min_time)), 5) + + print("Calibration: num_runs=%s, num_loops=%s " + "(%.2f sec per run > min_time %.2f sec, estimated total: %.1f sec)" + % (options.num_runs, options.num_loops, + dt, min_time, dt * options.num_runs)) + try: changed_data = benchmark_function(changed_python, options, *args, **kwargs) @@ -1183,7 +1249,7 @@ def CallAndCaptureOutput(command, env=No def MeasureGeneric(python, options, bm_path, bm_env=None, - extra_args=[], iteration_scaling=1): + extra_args=[]): """Abstract measurement function for Unladen's bm_* scripts. Based on the values of options.fast/rigorous, will pass -n {5,50,100} to @@ -1198,24 +1264,17 @@ def MeasureGeneric(python, options, bm_p use an empty enviroment. extra_args: optional list of command line args to be given to the benchmark script. - iteration_scaling: optional multiple by which to scale the -n argument - to the benchmark. Returns: RawData instance. """ + if bm_env is None: bm_env = {} - trials = 50 - if options.rigorous: - trials = 100 - elif options.fast: - trials = 5 - trials = max(1, int(trials * iteration_scaling)) - RemovePycs() - bench_args = [bm_path, "-n", trials, "--timer", options.timer] + bench_args = [bm_path, "-n", options.num_runs, "-l", options.num_loops, + "--timer", options.timer] command = python + bench_args + extra_args output = CallAndCaptureOutput(command, bm_env, track_memory=options.track_memory, @@ -2630,6 +2689,7 @@ def main(argv, bench_funcs=BENCH_FUNCS, if not options.timer: options.timer = choose_timer(base_cmd_prefix, changed_cmd_prefix) + options.timer_precision = timer_precision(options.timer) info("Automatically selected timer: %s", options.timer) should_run = ParseBenchmarksOption(options.benchmarks, bench_groups, diff -r a97acad3bbf7 -r ca1184dec8b5 performance/bm_json_v2.py --- a/performance/bm_json_v2.py Wed Feb 03 15:20:02 2016 +0100 +++ b/performance/bm_json_v2.py Thu Feb 04 14:42:26 2016 +0100 @@ -6,25 +6,28 @@ from compat import u_lit, xrange # execution runtime per test case TARGET_RUNTIME = 10 -EMPTY = ({}, 200000) +EMPTY = ({}, 2000) SIMPLE_DATA = {'key1': 0, 'key2': True, 'key3': 'value', 'key4': 'foo', 'key5': 'string'} -SIMPLE = (SIMPLE_DATA, 100000) +SIMPLE = (SIMPLE_DATA, 1000) NESTED_DATA = {'key1': 0, 'key2': SIMPLE[0], 'key3': 'value', 'key4': SIMPLE[0], 'key5': SIMPLE[0], u_lit('key'): u_lit('\u0105\u0107\u017c')} -NESTED = (NESTED_DATA, 100000) -HUGE = ([NESTED[0]] * 1000, 100) +NESTED = (NESTED_DATA, 1000) +HUGE = ([NESTED[0]] * 1000, 1) -cases = ['EMPTY', 'SIMPLE', 'NESTED', 'HUGE'] +cases = [EMPTY, SIMPLE, NESTED, HUGE] -def main(n, timer): +def run_cases(): + for data, case_loops in cases: + for case_loop in xrange(case_loops): + json.dumps(data) + +def main(num_runs, num_loops, timer): l = [] - for i in xrange(n): + for run in xrange(num_runs): t0 = timer() - for case in cases: - data, count = globals()[case] - for i in xrange(count): - json.dumps(data) + for loop in xrange(num_loops): + run_cases() l.append(timer() - t0) return l @@ -36,4 +39,4 @@ if __name__ == '__main__': util.add_standard_options_to(parser) options, args = parser.parse_args() - util.run_benchmark(options, options.num_runs, main) + util.run_benchmark(options, options.num_runs, options.num_loops, main) diff -r a97acad3bbf7 -r ca1184dec8b5 performance/bm_pickle.py --- a/performance/bm_pickle.py Wed Feb 03 15:20:02 2016 +0100 +++ b/performance/bm_pickle.py Thu Feb 04 14:42:26 2016 +0100 @@ -86,17 +86,17 @@ random_source = random.Random(5) # Fixe DICT_GROUP = [mutate_dict(DICT, random_source) for _ in range(3)] -def test_pickle(num_obj_copies, timer, pickle, options): +def test_pickle(num_runs, num_loops, timer, pickle, options): # Warm-up runs. pickle.dumps(DICT, options.protocol) pickle.dumps(TUPLE, options.protocol) pickle.dumps(DICT_GROUP, options.protocol) - loops = num_obj_copies // 20 # We do 20 runs per loop. times = [] - for _ in xrange(options.num_runs): + for run in xrange(num_runs): t0 = timer() - for _ in xrange(loops): + for loop in xrange(num_loops): + # 20 dumps() per loop pickle.dumps(DICT, options.protocol) pickle.dumps(DICT, options.protocol) pickle.dumps(DICT, options.protocol) @@ -162,7 +162,7 @@ def test_pickle(num_obj_copies, timer, p return times -def test_unpickle(num_obj_copies, timer, pickle, options): +def test_unpickle(num_runs, num_loops, timer, pickle, options): pickled_dict = pickle.dumps(DICT, options.protocol) pickled_tuple = pickle.dumps(TUPLE, options.protocol) pickled_dict_group = pickle.dumps(DICT_GROUP, options.protocol) @@ -172,11 +172,11 @@ def test_unpickle(num_obj_copies, timer, pickle.loads(pickled_tuple) pickle.loads(pickled_dict_group) - loops = num_obj_copies // 20 # We do 20 runs per loop. times = [] - for _ in xrange(options.num_runs): + for _ in xrange(num_runs): t0 = timer() - for _ in xrange(loops): + for _ in xrange(num_loops): + # 20 loads() per loop pickle.loads(pickled_dict) pickle.loads(pickled_dict) pickle.loads(pickled_dict) @@ -245,16 +245,15 @@ def test_unpickle(num_obj_copies, timer, LIST = [[list(range(10)), list(range(10))] for _ in xrange(10)] -def test_pickle_list(loops, timer, pickle, options): +def test_pickle_list(num_runs, num_loops, timer, pickle, options): # Warm-up runs. pickle.dumps(LIST, options.protocol) pickle.dumps(LIST, options.protocol) - loops = loops // 5 # Scale to compensate for the workload. times = [] - for _ in xrange(options.num_runs): + for _ in xrange(num_runs): t0 = timer() - for _ in xrange(loops): + for _ in xrange(num_loops): pickle.dumps(LIST, options.protocol) pickle.dumps(LIST, options.protocol) pickle.dumps(LIST, options.protocol) @@ -270,18 +269,17 @@ def test_pickle_list(loops, timer, pickl return times -def test_unpickle_list(loops, timer, pickle, options): +def test_unpickle_list(num_runs, num_loops, timer, pickle, options): pickled_list = pickle.dumps(LIST, options.protocol) # Warm-up runs. pickle.loads(pickled_list) pickle.loads(pickled_list) - loops = loops // 5 # Scale to compensate for the workload. times = [] - for _ in xrange(options.num_runs): + for _ in xrange(num_runs): t0 = timer() - for _ in xrange(loops): + for _ in xrange(num_loops): pickle.loads(pickled_list) pickle.loads(pickled_list) pickle.loads(pickled_list) @@ -299,16 +297,15 @@ def test_unpickle_list(loops, timer, pic MICRO_DICT = dict((key, dict.fromkeys(range(10))) for key in xrange(100)) -def test_pickle_dict(loops, timer, pickle, options): +def test_pickle_dict(num_runs, num_loops, timer, pickle, options): # Warm-up runs. pickle.dumps(MICRO_DICT, options.protocol) pickle.dumps(MICRO_DICT, options.protocol) - loops = max(1, loops // 10) times = [] - for _ in xrange(options.num_runs): + for _ in xrange(num_runs): t0 = timer() - for _ in xrange(loops): + for _ in xrange(num_loops): pickle.dumps(MICRO_DICT, options.protocol) pickle.dumps(MICRO_DICT, options.protocol) pickle.dumps(MICRO_DICT, options.protocol) @@ -344,7 +341,6 @@ if __name__ == "__main__": raise RuntimeError("Need to specify one of %s" % benchmarks) if options.use_cpickle: - num_obj_copies = 8000 # C accelerators are enabled by default on 3.x if sys.version_info < (3,): import cPickle as pickle @@ -355,12 +351,9 @@ if __name__ == "__main__": else: if sys.version_info >= (3,): sys.modules['_pickle'] = None - num_obj_copies = 200 import pickle if not is_module_accelerated(pickle): raise RuntimeError("Unexpected C accelerators for pickle") - if options.protocol > 0: - num_obj_copies *= 2 # Compensate for faster protocols. - - util.run_benchmark(options, num_obj_copies, benchmark, pickle, options) + util.run_benchmark(options, options.num_runs, options.num_loops, + benchmark, pickle, options) diff -r a97acad3bbf7 -r ca1184dec8b5 performance/bm_regex_v8.py --- a/performance/bm_regex_v8.py Wed Feb 03 15:20:02 2016 +0100 +++ b/performance/bm_regex_v8.py Thu Feb 04 14:42:26 2016 +0100 @@ -357,11 +357,11 @@ def block1(): re.sub(r'(\s)+e', '', '9.0 e115', 1) re.sub(r'.', '', 'k', 1) - # This prints a unicode escape where the V8 version prints the + # This prints a unicode escape where the V8 version prints the # unicode character. regexs[17].sub(r'', strings[2], subcount[17]) - # This prints a unicode escape where the V8 version prints the + # This prints a unicode escape where the V8 version prints the # unicode character. regexs[17].sub(r'', strings[3], subcount[17]) @@ -605,7 +605,7 @@ def block4(): regexs[34].search(strings[21]) regexs[34].search(strings[22]) - #FIXME + #FIXME # The \{0,65534} should be a * # There's a current python bug that will stop the regex compilation # when a * appears there http://bugs.python.org/issue6156. @@ -714,7 +714,7 @@ def block6(): re.sub(r'(?i)##\/n##', '', strings[32], 0) - # This prints a unicode escape where the V8 version + # This prints a unicode escape where the V8 version # prints the unicode character. re.sub(r'#~#argjbexybtb#~#', '', strings[33], 0) @@ -1422,7 +1422,7 @@ def block11(): regexs[39].sub(r'', 'anzr', subcount[39]) # This prints something different to the V8 version - # The V8 version is escaping different things in the string that + # The V8 version is escaping different things in the string that # has the substitutions performed on it. # # V8 treats /\S/ like / + escaped S + / @@ -1658,22 +1658,23 @@ def block11(): re.search(r'jvaqbjf', strings[63]) -def test_regex_v8(count, timer): +def test_regex_v8(count, num_loops, timer): times = [] for i in xrange(count): t0 = timer() - block0() - block1() - block2() - block3() - block4() - block5() - block6() - block7() - block8() - block9() - block10() - block11() + for loop in xrange(num_loops): + block0() + block1() + block2() + block3() + block4() + block5() + block6() + block7() + block8() + block9() + block10() + block11() t1 = timer() times.append(t1 - t0) return times @@ -1686,4 +1687,5 @@ if __name__ == '__main__': util.add_standard_options_to(parser) options, args = parser.parse_args() - util.run_benchmark(options, options.num_runs, test_regex_v8) + util.run_benchmark(options, options.num_runs, options.num_loops, + test_regex_v8) diff -r a97acad3bbf7 -r ca1184dec8b5 performance/util.py --- a/performance/util.py Wed Feb 03 15:20:02 2016 +0100 +++ b/performance/util.py Thu Feb 04 14:42:26 2016 +0100 @@ -12,7 +12,7 @@ import time from compat import reduce, print_ -def run_benchmark(options, num_runs, bench_func, *args): +def run_benchmark(options, num_runs, num_loops, bench_func, *args): """Run the given benchmark, print results to stdout. Args: @@ -26,10 +26,10 @@ def run_benchmark(options, num_runs, ben if options.profile: import cProfile prof = cProfile.Profile() - prof.runcall(bench_func, num_runs, timer, *args) + prof.runcall(bench_func, num_runs, num_loops, timer, *args) prof.print_stats(sort=options.profile_sort) else: - data = bench_func(num_runs, timer, *args) + data = bench_func(num_runs, num_loops, timer, *args) if options.take_geo_mean: product = reduce(operator.mul, data, 1) print_(math.pow(product, 1.0 / len(data))) @@ -48,6 +48,8 @@ def add_standard_options_to(parser): """ parser.add_option("-n", action="store", type="int", default=100, dest="num_runs", help="Number of times to run the test.") + parser.add_option("-l", action="store", type="int", default=1, + dest="num_loops", help="Number of loops.") parser.add_option("--profile", action="store_true", help="Run the benchmark through cProfile.") parser.add_option("--profile_sort", action="store", type="str",