diff -r 8a9b86071c15 perf.py --- a/perf.py Fri Dec 04 14:15:34 2015 -0800 +++ b/perf.py Wed Feb 03 12:16:17 2016 +0100 @@ -143,6 +143,38 @@ def choose_timer(base_python, changed_py assert 0 # At least time.time() should always be available +def timer_precision(timer_name): + """Compute the effective precision of a timer at the Python level.""" + + precision = None + points = 0 + if sys.version_info >= (3,): + timeout_timer = time.monotonic + else: + timeout_timer = time.time + timer = getattr(time, timer_name) + timeout = timeout_timer() + 1.0 + previous = timer() + while timeout_timer() < timeout or points < 5: + for loop in range(10): + t1 = timer() + t2 = timer() + dt = t2 - t1 + if 0 < dt: + break + else: + dt = t2 - previous + if dt <= 0.0: + continue + if precision is not None: + precision = min(precision, dt) + else: + precision = dt + points += 1 + previous = timer() + return precision + + def avg(seq): return sum(seq) / float(len(seq)) @@ -791,6 +823,32 @@ def SimpleBenchmark(benchmark_function, A BenchmarkResult object if the benchmark runs succeeded. A BenchmarkError object if either benchmark run failed. """ + + options.num_runs = 1 + options.num_loops = 1 + + print("Calibrating") + if options.rigorous: + min_time = 1.0 + elif options.fast: + min_time = 0.1 + else: + min_time = 0.5 + + # On Windows, timer_precision * 100 can be up to 1.5 seconds + min_time = max(min_time, options.timer_precision * 100) + + while True: + rawdata = benchmark_function(changed_python, options, + *args, **kwargs) + dt = min(rawdata.runtimes) + if dt > min_time: + break + options.num_loops *= 2 + options.num_runs = 10 + print("Calibrating => num_runs=%s, num_loops=%s (%.2f sec < %.2f sec)" + % (options.num_runs, options.num_loops, min_time, dt)) + try: changed_data = benchmark_function(changed_python, options, *args, **kwargs) @@ -1159,7 +1217,7 @@ def CallAndCaptureOutput(command, env=No def MeasureGeneric(python, options, bm_path, bm_env=None, - extra_args=[], iteration_scaling=1): + extra_args=[]): """Abstract measurement function for Unladen's bm_* scripts. Based on the values of options.fast/rigorous, will pass -n {5,50,100} to @@ -1174,24 +1232,17 @@ def MeasureGeneric(python, options, bm_p use an empty enviroment. extra_args: optional list of command line args to be given to the benchmark script. - iteration_scaling: optional multiple by which to scale the -n argument - to the benchmark. Returns: RawData instance. """ + if bm_env is None: bm_env = {} - trials = 50 - if options.rigorous: - trials = 100 - elif options.fast: - trials = 5 - trials = max(1, int(trials * iteration_scaling)) - RemovePycs() - bench_args = [bm_path, "-n", trials, "--timer", options.timer] + bench_args = [bm_path, "-n", options.num_runs, "-l", options.num_loops, + "--timer", options.timer] command = python + bench_args + extra_args output = CallAndCaptureOutput(command, bm_env, track_memory=options.track_memory, @@ -2590,6 +2641,7 @@ def main(argv, bench_funcs=BENCH_FUNCS, if not options.timer: options.timer = choose_timer(base_cmd_prefix, changed_cmd_prefix) + options.timer_precision = timer_precision(options.timer) info("Automatically selected timer: %s", options.timer) should_run = ParseBenchmarksOption(options.benchmarks, bench_groups, diff -r 8a9b86071c15 performance/bm_json_v2.py --- a/performance/bm_json_v2.py Fri Dec 04 14:15:34 2015 -0800 +++ b/performance/bm_json_v2.py Wed Feb 03 12:16:17 2016 +0100 @@ -6,25 +6,28 @@ from compat import u_lit, xrange # execution runtime per test case TARGET_RUNTIME = 10 -EMPTY = ({}, 200000) +EMPTY = ({}, 2000) SIMPLE_DATA = {'key1': 0, 'key2': True, 'key3': 'value', 'key4': 'foo', 'key5': 'string'} -SIMPLE = (SIMPLE_DATA, 100000) +SIMPLE = (SIMPLE_DATA, 1000) NESTED_DATA = {'key1': 0, 'key2': SIMPLE[0], 'key3': 'value', 'key4': SIMPLE[0], 'key5': SIMPLE[0], u_lit('key'): u_lit('\u0105\u0107\u017c')} -NESTED = (NESTED_DATA, 100000) -HUGE = ([NESTED[0]] * 1000, 100) +NESTED = (NESTED_DATA, 1000) +HUGE = ([NESTED[0]] * 1000, 1) -cases = ['EMPTY', 'SIMPLE', 'NESTED', 'HUGE'] +cases = [EMPTY, SIMPLE, NESTED, HUGE] -def main(n, timer): +def run_cases(): + for data, case_loops in cases: + for case_loop in xrange(case_loops): + json.dumps(data) + +def main(num_runs, num_loops, timer): l = [] - for i in xrange(n): + for run in xrange(num_runs): t0 = timer() - for case in cases: - data, count = globals()[case] - for i in xrange(count): - json.dumps(data) + for loop in xrange(num_loops): + run_cases() l.append(timer() - t0) return l @@ -36,4 +39,4 @@ if __name__ == '__main__': util.add_standard_options_to(parser) options, args = parser.parse_args() - util.run_benchmark(options, options.num_runs, main) + util.run_benchmark(options, options.num_runs, options.num_loops, main) diff -r 8a9b86071c15 performance/bm_regex_v8.py --- a/performance/bm_regex_v8.py Fri Dec 04 14:15:34 2015 -0800 +++ b/performance/bm_regex_v8.py Wed Feb 03 12:16:17 2016 +0100 @@ -357,11 +357,11 @@ def block1(): re.sub(r'(\s)+e', '', '9.0 e115', 1) re.sub(r'.', '', 'k', 1) - # This prints a unicode escape where the V8 version prints the + # This prints a unicode escape where the V8 version prints the # unicode character. regexs[17].sub(r'', strings[2], subcount[17]) - # This prints a unicode escape where the V8 version prints the + # This prints a unicode escape where the V8 version prints the # unicode character. regexs[17].sub(r'', strings[3], subcount[17]) @@ -605,7 +605,7 @@ def block4(): regexs[34].search(strings[21]) regexs[34].search(strings[22]) - #FIXME + #FIXME # The \{0,65534} should be a * # There's a current python bug that will stop the regex compilation # when a * appears there http://bugs.python.org/issue6156. @@ -714,7 +714,7 @@ def block6(): re.sub(r'(?i)##\/n##', '', strings[32], 0) - # This prints a unicode escape where the V8 version + # This prints a unicode escape where the V8 version # prints the unicode character. re.sub(r'#~#argjbexybtb#~#', '', strings[33], 0) @@ -1422,7 +1422,7 @@ def block11(): regexs[39].sub(r'', 'anzr', subcount[39]) # This prints something different to the V8 version - # The V8 version is escaping different things in the string that + # The V8 version is escaping different things in the string that # has the substitutions performed on it. # # V8 treats /\S/ like / + escaped S + / @@ -1658,22 +1658,23 @@ def block11(): re.search(r'jvaqbjf', strings[63]) -def test_regex_v8(count, timer): +def test_regex_v8(count, num_loops, timer): times = [] for i in xrange(count): t0 = timer() - block0() - block1() - block2() - block3() - block4() - block5() - block6() - block7() - block8() - block9() - block10() - block11() + for loop in xrange(num_loops): + block0() + block1() + block2() + block3() + block4() + block5() + block6() + block7() + block8() + block9() + block10() + block11() t1 = timer() times.append(t1 - t0) return times @@ -1686,4 +1687,5 @@ if __name__ == '__main__': util.add_standard_options_to(parser) options, args = parser.parse_args() - util.run_benchmark(options, options.num_runs, test_regex_v8) + util.run_benchmark(options, options.num_runs, options.num_loops, + test_regex_v8) diff -r 8a9b86071c15 performance/util.py --- a/performance/util.py Fri Dec 04 14:15:34 2015 -0800 +++ b/performance/util.py Wed Feb 03 12:16:17 2016 +0100 @@ -12,7 +12,7 @@ import time from compat import reduce, print_ -def run_benchmark(options, num_runs, bench_func, *args): +def run_benchmark(options, num_runs, num_loops, bench_func, *args): """Run the given benchmark, print results to stdout. Args: @@ -26,10 +26,10 @@ def run_benchmark(options, num_runs, ben if options.profile: import cProfile prof = cProfile.Profile() - prof.runcall(bench_func, num_runs, timer, *args) + prof.runcall(bench_func, num_runs, num_loops, timer, *args) prof.print_stats(sort=options.profile_sort) else: - data = bench_func(num_runs, timer, *args) + data = bench_func(num_runs, num_loops, timer, *args) if options.take_geo_mean: product = reduce(operator.mul, data, 1) print_(math.pow(product, 1.0 / len(data))) @@ -48,6 +48,8 @@ def add_standard_options_to(parser): """ parser.add_option("-n", action="store", type="int", default=100, dest="num_runs", help="Number of times to run the test.") + parser.add_option("-l", action="store", type="int", default=1, + dest="num_loops", help="Number of loops.") parser.add_option("--profile", action="store_true", help="Run the benchmark through cProfile.") parser.add_option("--profile_sort", action="store", type="str",