diff -r 2979f5ce6a0c perf.py --- a/perf.py Fri Jul 17 14:29:12 2015 -0700 +++ b/perf.py Wed Aug 26 10:12:19 2015 +0300 @@ -1439,18 +1439,27 @@ def BM_Django(*args, **kwargs): return SimpleBenchmark(MeasureDjango, *args, **kwargs) - def MeasureDjangoV2(python, options): bm_path = Relative("performance/bm_django_v2.py", python, options) django_path = Relative("lib/Django-1.5", python, options) bm_env = {"PYTHONPATH": django_path} return MeasureGeneric(python, options, bm_path, bm_env) -@VersionRange() +def MeasureDjangoV3(python, options): + bm_path = Relative("performance/bm_django_v3.py", python, options) + django_path = Relative("lib/Django-1.9", python, options) + bm_env = {"PYTHONPATH": django_path} + return MeasureGeneric(python, options, bm_path, bm_env) + +@VersionRange(None, '3.5') def BM_Django_v2(*args, **kwargs): return SimpleBenchmark(MeasureDjangoV2, *args, **kwargs) +def BM_Django_v3(*args, **kwargs): + return SimpleBenchmark(MeasureDjangoV3, *args, **kwargs) + + def MeasureFloat(python, options): bm_path = Relative("performance/bm_float.py") return MeasureGeneric(python, options, bm_path) @@ -2245,7 +2254,7 @@ # If you update the default group, be sure to update the module docstring, too. # An "all" group which includes every benchmark perf.py knows about is generated # automatically. -BENCH_GROUPS = {"default": ["2to3", "django_v2", "nbody", "spambayes", +BENCH_GROUPS = {"default": ["2to3", "django_v3", "nbody", "spambayes", "tornado_http", "fastpickle", "fastunpickle", "html5lib", "regex_v8", "etree", "json_dump_v2", "json_load"], @@ -2264,7 +2273,7 @@ "calls": ["call_simple", "call_method", "call_method_slots", "call_method_unknown"], "math": ["float", "nbody", "pidigits"], - "template" : ["slowspitfire", "django_v2", "mako_v2"], + "template" : ["slowspitfire", "django_v3", "mako_v2"], "logging": ["silent_logging", "simple_logging", "formatted_logging"], # These are removed from the "all" group diff -r 2979f5ce6a0c performance/bm_django_v3.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/performance/bm_django_v3.py Wed Aug 26 10:12:19 2015 +0300 @@ -0,0 +1,57 @@ +#!/usr/bin/python + +"""Wrapper script for testing the performance of the Django template system. + +This is intended to support Unladen Swallow's perf.py + +This will have Django generate a 100x100 table as many times as you +specify (via the -n flag). The raw times to generate the template will be +dumped to stdout. This is more convenient for Unladen Swallow's uses: it +allows us to keep all our stats in perf.py. +""" + +import optparse +import time + +from compat import xrange +import util + +from django.conf import settings +import django +settings.configure() +django.setup() +from django.template import Context, Template + + +DJANGO_TMPL = Template(""" +{% for row in table %} +{% for col in row %}{% endfor %} +{% endfor %} +
{{ col|escape }}
+""") + +def test_django(count, timer): + table = [xrange(150) for _ in xrange(150)] + context = Context({"table": table}) + + # Warm up Django. + DJANGO_TMPL.render(context) + DJANGO_TMPL.render(context) + + times = [] + for _ in xrange(count): + t0 = timer() + data = DJANGO_TMPL.render(context) + t1 = timer() + times.append(t1 - t0) + return times + + +if __name__ == "__main__": + parser = optparse.OptionParser( + usage="%prog [options]", + description=("Test the performance of Django templates.")) + util.add_standard_options_to(parser) + options, args = parser.parse_args() + + util.run_benchmark(options, options.num_runs, test_django)