diff -r 75751d838789 perf.py --- a/perf.py Fri Nov 27 11:38:22 2015 -0600 +++ b/perf.py Thu Dec 03 12:14:05 2015 +0200 @@ -1501,6 +1501,17 @@ return SimpleBenchmark(MeasureDjangoV2, *args, **kwargs) +def MeasureDjangoV3(python, options): + bm_path = Relative("performance/bm_django_v3.py", python, options) + django_path = Relative("lib/Django-1.9", python, options) + bm_env = {"PYTHONPATH": django_path} + return MeasureGeneric(python, options, bm_path, bm_env) + +@VersionRange() +def BM_Django_v3(*args, **kwargs): + return SimpleBenchmark(MeasureDjangoV3, *args, **kwargs) + + def MeasureFloat(python, options): bm_path = Relative("performance/bm_float.py") return MeasureGeneric(python, options, bm_path) @@ -2295,7 +2306,7 @@ # If you update the default group, be sure to update the module docstring, too. # An "all" group which includes every benchmark perf.py knows about is generated # automatically. -BENCH_GROUPS = {"default": ["2to3", "chameleon_v2", "django_v2", "nbody", +BENCH_GROUPS = {"default": ["2to3", "chameleon_v2", "django_v3", "nbody", "tornado_http", "fastpickle", "fastunpickle", "regex_v8", "json_dump_v2", "json_load"], "startup": ["normal_startup", "startup_nosite", @@ -2313,7 +2324,7 @@ "calls": ["call_simple", "call_method", "call_method_slots", "call_method_unknown"], "math": ["float", "nbody", "pidigits"], - "template" : ["slowspitfire", "django_v2", "mako_v2"], + "template" : ["slowspitfire", "django_v3", "mako_v2"], "logging": ["silent_logging", "simple_logging", "formatted_logging"], # These are removed from the "all" group