diff -r 43f8a0f5edd3 perf.py --- a/perf.py Fri Nov 23 23:40:58 2012 +0100 +++ b/perf.py Sun Mar 10 10:54:20 2013 -0400 @@ -2109,7 +2109,7 @@ "regex", "richards", "spectral_norm", "startup_nosite", "telco", "threading", "unpack_sequence"], # After 2to3-conversion - "py3k": ["2to3", "2n3", "chameleon", "mako_v2"] + "py3k": ["2to3", "2n3", "chameleon", "mako_v2"], } SLOW_BENCHMARKS = ["hexiom2"] @@ -2138,6 +2138,7 @@ Args: benchmarks_opt: the string passed to the -b option on the command line. + bench_groups: the collection of benchmark groups to pull from Returns: A set() of the names of the benchmarks to run. @@ -2215,19 +2216,30 @@ parser.values.output_style = value -def main(argv, bench_funcs=BENCH_FUNCS, bench_groups=BENCH_GROUPS): +def CreateBenchGroups(bench_funcs=BENCH_FUNCS, bench_groups=BENCH_GROUPS): bench_groups = bench_groups.copy() all_benchmarks = bench_funcs.keys() bench_groups["all"] = all_benchmarks + return bench_groups + + +def main(argv, bench_funcs=BENCH_FUNCS, bench_groups=BENCH_GROUPS): + bench_groups = CreateBenchGroups(bench_funcs, bench_groups) + + # Calculate the lengths of expanded benchmark names for all groups + bench_counts = {} + for name in bench_groups: + bench_counts[name] = sum(1 for _ in + _ExpandBenchmarkName(name, bench_groups)) # Prettify the displayed benchmark list: first the benchmark groups by # decreasing number of benches, then individual benchmarks by # lexicographic order. pretty_benchmarks = ["%s(%d)" % (name, nbenchs) for nbenchs, name in sorted( - ((len(v), k) for (k, v) in bench_groups.items()), + ((v, k) for (k, v) in bench_counts.items()), reverse=True)] - pretty_benchmarks.extend(sorted(all_benchmarks)) + pretty_benchmarks.extend(sorted(bench_groups["all"])) parser = optparse.OptionParser( usage="%prog [options] baseline_python changed_python", diff -r 43f8a0f5edd3 test_perf.py --- a/test_perf.py Fri Nov 23 23:40:58 2012 +0100 +++ b/test_perf.py Sun Mar 10 10:54:20 2013 -0400 @@ -112,36 +112,49 @@ def testParseBenchmarksOption(self): # perf.py, no -b option. - should_run = perf.ParseBenchmarksOption("") - self.assertEqual(should_run, set(["2to3", "django", "slowpickle", - "slowspitfire", "slowunpickle"])) + bench_groups = perf.CreateBenchGroups() + should_run = perf.ParseBenchmarksOption("", bench_groups) + self.assertEqual(should_run, set(["2to3", "django", "nbody", + "slowpickle", "slowspitfire", + "slowunpickle", "spambayes"])) # perf.py -b 2to3 - should_run = perf.ParseBenchmarksOption("2to3") + should_run = perf.ParseBenchmarksOption("2to3", bench_groups) self.assertEqual(should_run, set(["2to3"])) # perf.py -b 2to3,pybench - should_run = perf.ParseBenchmarksOption("2to3,pybench") + should_run = perf.ParseBenchmarksOption("2to3,pybench", bench_groups) self.assertEqual(should_run, set(["2to3", "pybench"])) # perf.py -b -2to3 - should_run = perf.ParseBenchmarksOption("-2to3") - self.assertEqual(should_run, set(["django", "slowspitfire", - "slowpickle", "slowunpickle"])) + should_run = perf.ParseBenchmarksOption("-2to3", bench_groups) + self.assertEqual(should_run, set(["django", "nbody", "slowspitfire", + "slowpickle", "slowunpickle", + "spambayes"])) # perf.py -b all - should_run = perf.ParseBenchmarksOption("all") + should_run = perf.ParseBenchmarksOption("all", bench_groups) self.assertTrue("django" in should_run, should_run) self.assertTrue("pybench" in should_run, should_run) # perf.py -b -2to3,all - should_run = perf.ParseBenchmarksOption("-2to3,all") + should_run = perf.ParseBenchmarksOption("-2to3,all", bench_groups) self.assertTrue("django" in should_run, should_run) self.assertTrue("pybench" in should_run, should_run) self.assertFalse("2to3" in should_run, should_run) # Error conditions - self.assertRaises(ValueError, perf.ParseBenchmarksOption, "-all") + self.assertRaises(ValueError, perf.ParseBenchmarksOption, "-all", + bench_groups) + + + def testBenchmarkCounts(self): + bench_groups = {"top": ["middle1", "middle2"], + "middle1": ["bottom1", "bottom2"], + "middle2": ["bottom3"]} + found = list(perf._ExpandBenchmarkName("top", bench_groups)) + self.assertEqual(["bottom1", "bottom2", "bottom3"], found) + if __name__ == "__main__": unittest.main()