diff -r 9923b81a1d34 perf.py --- a/perf.py Wed Feb 03 14:19:18 2016 -0600 +++ b/perf.py Mon Sep 12 11:04:21 2016 -0700 @@ -818,6 +818,8 @@ try: changed_data = benchmark_function(changed_python, options, *args, **kwargs) + if options.testonly: + return if options.raw: return FormatRawData(changed_data, options) base_data = benchmark_function(base_python, options, @@ -1215,9 +1217,9 @@ extra_args=[], iteration_scaling=1): """Abstract measurement function for Unladen's bm_* scripts. - Based on the values of options.fast/rigorous, will pass -n {5,50,100} to - the benchmark script. MeasureGeneric takes care of parsing out the running - times from the memory usage data. + Based on the values of options.testonly/fast/rigorous, will pass -n + {1,5,50,100} to the benchmark script. MeasureGeneric takes care of + parsing out the running times from the memory usage data. Args: python: start of the argv list for running Python. @@ -1237,7 +1239,10 @@ bm_env = {} trials = 50 - if options.rigorous: + if options.testonly: + trials = 1 + iteration_scaling = min(1, iteration_scaling) + elif options.rigorous: trials = 100 elif options.fast: trials = 5 @@ -1323,6 +1328,16 @@ PYBENCH_PATH = Relative("performance/pybench/pybench.py") PYBENCH_ENV = BuildEnv(inherit_env=options.inherit_env) + if options.testonly: + with open(os.devnull, "wb") as dev_null: + subprocess.check_call(LogCall(changed_python + [ + PYBENCH_PATH, + "-n", "1", + "-C", "0", + ]), stdout=dev_null, + env=PYBENCH_ENV) + return + try: with contextlib.nested(open(os.devnull, "wb"), TemporaryFilename(prefix="baseline."), @@ -1435,7 +1450,7 @@ inherit_env=options.inherit_env) # This can be compressed, but it's harder to understand. - if options.fast: + if options.testonly or options.fast: trials = 1 target = fast_target elif options.rigorous: @@ -1461,7 +1476,9 @@ hg_env = BuildEnv({"PYTHONPATH": hg_path}, options.inherit_env) trials = 500 - if options.rigorous: + if options.testonly: + trials = 1 + elif options.rigorous: trials = 1000 elif options.fast: trials = 100 @@ -1480,7 +1497,9 @@ bzr_env = BuildEnv({"PYTHONPATH": bzr_path}, options.inherit_env) trials = 100 - if options.rigorous: + if options.testonly: + trials = 1 + elif options.rigorous: trials = 200 elif options.fast: trials = 10 @@ -1694,6 +1713,8 @@ try: changed_data = MeasureSpitfire(changed_python, options, spitfire_env, extra_args) + if options.testonly: + return if options.raw: return FormatRawData(changed_data, options) base_data = MeasureSpitfire(base_python, options, @@ -2090,7 +2111,9 @@ @VersionRange() def BM_normal_startup(base_python, changed_python, options): - if options.rigorous: + if options.testonly: + num_loops = 1 + elif options.rigorous: num_loops = 100 elif options.fast: num_loops = 5 @@ -2100,6 +2123,8 @@ opts = [] changed_data = MeasureStartup(changed_python, opts, num_loops, options.track_memory, options.inherit_env) + if options.testonly: + return if options.raw: return FormatRawData(changed_data, options) base_data = MeasureStartup(base_python, opts, num_loops, @@ -2109,7 +2134,9 @@ @VersionRange() def BM_startup_nosite(base_python, changed_python, options): - if options.rigorous: + if options.testonly: + num_loops = 1 + elif options.rigorous: num_loops = 200 elif options.fast: num_loops = 10 @@ -2119,6 +2146,8 @@ opts = ["-S"] changed_data = MeasureStartup(changed_python, opts, num_loops, options.track_memory, options.inherit_env) + if options.testonly: + return if options.raw: return FormatRawData(changed_data, options) base_data = MeasureStartup(base_python, opts, num_loops, @@ -2310,7 +2339,7 @@ options.inherit_env) trials = 5 - if options.fast: + if options.testonly or options.fast: trials = 1 elif options.rigorous: trials = 10 @@ -2570,6 +2599,9 @@ " accurate results")) parser.add_option("-f", "--fast", action="store_true", help="Get rough answers quickly") + parser.add_option("--testonly", action="store_true", + help="Run each benchmark only once. Provide only " + "baseline_python, not changed_python") parser.add_option("-v", "--verbose", action="store_true", help="Print more output") parser.add_option("-m", "--track_memory", action="store_true", @@ -2636,7 +2668,7 @@ options, args = parser.parse_args(argv) - expected = 1 if options.raw else 2 + expected = 1 if options.raw or options.testonly else 2 if len(args) != expected: parser.error("incorrect number of arguments") if expected == 1: @@ -2652,16 +2684,19 @@ options.experiment_label = options.changed_binary base_args, changed_args = ParsePythonArgsOption(options.args) - if options.raw: + if options.raw or options.testonly: if base_args != changed_args: - parser.error('provide args for only one interpreter in raw mode') + parser.error('provide args for only one interpreter ' + 'in raw or testonly mode') if options.track_memory: # XXX this might be worth fixing someday? - parser.error('raw mode is not compatible with memory tracking') + parser.error('raw and testonly modes are not compatible with ' + 'memory tracking') if options.diff_instrumentation: - parser.error('raw mode is not compatible with instrumentation') + parser.error('raw and testonly modes are not compatible with ' + 'instrumentation') if options.csv: - parser.error('raw mode does not support csv output') + parser.error('raw and testonly modes do not support csv output') base_cmd_prefix = [base] + base_args changed_cmd_prefix = [changed] + changed_args @@ -2714,6 +2749,9 @@ print("Report on %s" % " ".join(platform.uname())) if multiprocessing: print("Total CPU cores:", multiprocessing.cpu_count()) + if options.testonly: + print("Successfully ran benchmarks: %s" % " ".join(to_run)) + return if options.raw: for name, result in results: print() diff -r 9923b81a1d34 performance/pybench/pybench.py --- a/performance/pybench/pybench.py Wed Feb 03 14:19:18 2016 -0600 +++ b/performance/pybench/pybench.py Mon Sep 12 11:04:21 2016 -0700 @@ -220,10 +220,11 @@ # Set parameters if warp is not None: - self.rounds = int(self.rounds / warp) - if self.rounds == 0: - raise ValueError('warp factor set too high') + rounds = int(self.rounds / warp) + if rounds == 0: + raise ValueError('warp factor set too high (%s vs %s)' % (warp, self.rounds)) self.warp = warp + self.rounds = rounds if calibration_runs is not None: if (not ALLOW_SKIPPING_CALIBRATION and calibration_runs < 1):