diff -r 75751d838789 perf.py --- a/perf.py Fri Nov 27 11:38:22 2015 -0600 +++ b/perf.py Thu Dec 03 12:27:07 2015 +0200 @@ -1501,6 +1501,17 @@ return SimpleBenchmark(MeasureDjangoV2, *args, **kwargs) +def MeasureDjangoV3(python, options): + bm_path = Relative("performance/bm_django_v3.py", python, options) + django_path = Relative("lib/Django-1.9", python, options) + bm_env = {"PYTHONPATH": django_path} + return MeasureGeneric(python, options, bm_path, bm_env) + +@VersionRange() +def BM_Django_v3(*args, **kwargs): + return SimpleBenchmark(MeasureDjangoV3, *args, **kwargs) + + def MeasureFloat(python, options): bm_path = Relative("performance/bm_float.py") return MeasureGeneric(python, options, bm_path) @@ -2295,7 +2306,7 @@ # If you update the default group, be sure to update the module docstring, too. # An "all" group which includes every benchmark perf.py knows about is generated # automatically. -BENCH_GROUPS = {"default": ["2to3", "chameleon_v2", "django_v2", "nbody", +BENCH_GROUPS = {"default": ["2to3", "chameleon_v2", "django_v3", "nbody", "tornado_http", "fastpickle", "fastunpickle", "regex_v8", "json_dump_v2", "json_load"], "startup": ["normal_startup", "startup_nosite", @@ -2313,7 +2324,7 @@ "calls": ["call_simple", "call_method", "call_method_slots", "call_method_unknown"], "math": ["float", "nbody", "pidigits"], - "template" : ["slowspitfire", "django_v2", "mako_v2"], + "template" : ["slowspitfire", "django_v3", "mako_v2"], "logging": ["silent_logging", "simple_logging", "formatted_logging"], # These are removed from the "all" group diff -r 75751d838789 performance/bm_django_v3.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/performance/bm_django_v3.py Thu Dec 03 12:27:07 2015 +0200 @@ -0,0 +1,57 @@ +#!/usr/bin/python + +"""Wrapper script for testing the performance of the Django template system. + +This is intended to support Unladen Swallow's perf.py + +This will have Django generate a 100x100 table as many times as you +specify (via the -n flag). The raw times to generate the template will be +dumped to stdout. This is more convenient for Unladen Swallow's uses: it +allows us to keep all our stats in perf.py. +""" + +import optparse +import time + +from compat import xrange +import util + +from django.conf import settings +import django +settings.configure() +django.setup() +from django.template import Context, Template + + +DJANGO_TMPL = Template(""" +{% for row in table %} +{% for col in row %}{% endfor %} +{% endfor %} +
{{ col|escape }}
+""") + +def test_django(count, timer): + table = [xrange(150) for _ in xrange(150)] + context = Context({"table": table}) + + # Warm up Django. + DJANGO_TMPL.render(context) + DJANGO_TMPL.render(context) + + times = [] + for _ in xrange(count): + t0 = timer() + data = DJANGO_TMPL.render(context) + t1 = timer() + times.append(t1 - t0) + return times + + +if __name__ == "__main__": + parser = optparse.OptionParser( + usage="%prog [options]", + description=("Test the performance of Django templates.")) + util.add_standard_options_to(parser) + options, args = parser.parse_args() + + util.run_benchmark(options, options.num_runs, test_django)