diff -r 0be296605165 -r faa88c50a3d2 .gitignore --- a/.gitignore Wed May 23 22:26:55 2012 +0200 +++ b/.gitignore Wed May 23 21:09:05 2012 +0200 @@ -5,18 +5,14 @@ *.pyd *.pyo *.rej -*.swp *~ -.gdb_history Doc/build/ Doc/tools/docutils/ -Doc/tools/jinja/ Doc/tools/jinja2/ Doc/tools/pygments/ Doc/tools/sphinx/ Lib/lib2to3/*.pickle Lib/_sysconfigdata.py -Lib/plat-mac/errors.rsrc.df.rsrc Makefile Makefile.pre Misc/python.pc @@ -35,34 +31,19 @@ PCbuild/*.o PCbuild/*.pdb PCbuild/Win32-temp-* -PCbuild/amd64/ -.purify Parser/pgen __pycache__ autom4te.cache build/ -buildno -config.cache -config.log -config.status -config.status.lineno -core -db_home config.log config.status libpython*.a libpython*.so* -platform pybuilddir.txt pyconfig.h python -python.exe python-gdb.py -python.exe-gdb.py -reflog.txt -.svn/ tags -TAGS .coverage coverage/ htmlcov/ diff -r 0be296605165 -r faa88c50a3d2 .hgignore --- a/.hgignore Wed May 23 22:26:55 2012 +0200 +++ b/.hgignore Wed May 23 21:09:05 2012 +0200 @@ -32,6 +32,7 @@ Modules/config.c Modules/ld_so_aix$ Parser/pgen$ +PCbuild/amd64/ ^core ^python-gdb.py ^python.exe-gdb.py @@ -55,12 +56,6 @@ PC/pythonnt_rc*.h PC/*.obj PC/*.exe -PC/*/*.user -PC/*/*.ncb -PC/*/*.suo -PC/*/Win32-temp-* -PC/*/x64-temp-* -PC/*/amd64 PCbuild/*.exe PCbuild/*.dll PCbuild/*.pdb @@ -74,8 +69,6 @@ PCbuild/*.*sdf PCbuild/Win32-temp-* PCbuild/x64-temp-* -PCbuild/amd64 -BuildLog.htm __pycache__ Modules/_testembed .coverage diff -r 0be296605165 -r faa88c50a3d2 Doc/c-api/exceptions.rst --- a/Doc/c-api/exceptions.rst Wed May 23 22:26:55 2012 +0200 +++ b/Doc/c-api/exceptions.rst Wed May 23 21:09:05 2012 +0200 @@ -471,6 +471,10 @@ set by ``raise ... from ...``) associated with the exception as a new reference, as accessible from Python through :attr:`__cause__`. + If there is no cause associated, this returns *NULL* (from Python + ``__cause__ is Ellipsis``). If the cause is :const:`None`, the default + exception display routines stop showing the context chain. + .. c:function:: void PyException_SetCause(PyObject *ex, PyObject *ctx) @@ -478,7 +482,9 @@ it. There is no type check to make sure that *ctx* is either an exception instance or :const:`None`. This steals a reference to *ctx*. - :attr:`__suppress_context__` is implicitly set to ``True`` by this function. + If the cause is set to :const:`None` the default exception display + routines will not display this exception's context, and will not follow the + chain any further. .. _unicodeexceptions: diff -r 0be296605165 -r faa88c50a3d2 Doc/c-api/import.rst --- a/Doc/c-api/import.rst Wed May 23 22:26:55 2012 +0200 +++ b/Doc/c-api/import.rst Wed May 23 21:09:05 2012 +0200 @@ -30,13 +30,13 @@ .. c:function:: PyObject* PyImport_ImportModuleNoBlock(const char *name) - This function is a deprecated alias of :c:func:`PyImport_ImportModule`. - - .. versionchanged:: 3.3 - This function used to fail immediately when the import lock was held - by another thread. In Python 3.3 though, the locking scheme switched - to per-module locks for most purposes, so this function's special - behaviour isn't needed anymore. + This version of :c:func:`PyImport_ImportModule` does not block. It's intended + to be used in C functions that import other modules to execute a function. + The import may block if another thread holds the import lock. The function + :c:func:`PyImport_ImportModuleNoBlock` never blocks. It first tries to fetch + the module from sys.modules and falls back to :c:func:`PyImport_ImportModule` + unless the lock is held, in which case the function will raise an + :exc:`ImportError`. .. c:function:: PyObject* PyImport_ImportModuleEx(char *name, PyObject *globals, PyObject *locals, PyObject *fromlist) diff -r 0be296605165 -r faa88c50a3d2 Doc/library/__future__.rst --- a/Doc/library/__future__.rst Wed May 23 22:26:55 2012 +0200 +++ b/Doc/library/__future__.rst Wed May 23 21:09:05 2012 +0200 @@ -75,7 +75,7 @@ | division | 2.2.0a2 | 3.0 | :pep:`238`: | | | | | *Changing the Division Operator* | +------------------+-------------+--------------+---------------------------------------------+ -| absolute_import | 2.5.0a1 | 3.0 | :pep:`328`: | +| absolute_import | 2.5.0a1 | 2.7 | :pep:`328`: | | | | | *Imports: Multi-Line and Absolute/Relative* | +------------------+-------------+--------------+---------------------------------------------+ | with_statement | 2.5.0a1 | 2.6 | :pep:`343`: | diff -r 0be296605165 -r faa88c50a3d2 Doc/library/asynchat.rst --- a/Doc/library/asynchat.rst Wed May 23 22:26:55 2012 +0200 +++ b/Doc/library/asynchat.rst Wed May 23 21:09:05 2012 +0200 @@ -197,9 +197,6 @@ marshalled, after setting the channel terminator to ``None`` to ensure that any extraneous data sent by the web client are ignored. :: - - import asynchat - class http_request_handler(asynchat.async_chat): def __init__(self, sock, addr, sessions, log): diff -r 0be296605165 -r faa88c50a3d2 Doc/library/asyncore.rst --- a/Doc/library/asyncore.rst Wed May 23 22:26:55 2012 +0200 +++ b/Doc/library/asyncore.rst Wed May 23 21:09:05 2012 +0200 @@ -277,7 +277,7 @@ Here is a very basic HTTP client that uses the :class:`dispatcher` class to implement its socket handling:: - import asyncore + import asyncore, socket class HTTPClient(asyncore.dispatcher): @@ -317,6 +317,7 @@ connections and dispatches the incoming connections to a handler:: import asyncore + import socket class EchoHandler(asyncore.dispatcher_with_send): @@ -340,3 +341,4 @@ server = EchoServer('localhost', 8080) asyncore.loop() + diff -r 0be296605165 -r faa88c50a3d2 Doc/library/contextlib.rst --- a/Doc/library/contextlib.rst Wed May 23 22:26:55 2012 +0200 +++ b/Doc/library/contextlib.rst Wed May 23 21:09:05 2012 +0200 @@ -12,11 +12,8 @@ statement. For more information see also :ref:`typecontextmanager` and :ref:`context-managers`. +Functions provided: -Utilities ---------- - -Functions and classes provided: .. decorator:: contextmanager @@ -171,280 +168,6 @@ .. versionadded:: 3.2 -.. class:: ExitStack() - - A context manager that is designed to make it easy to programmatically - combine other context managers and cleanup functions, especially those - that are optional or otherwise driven by input data. - - For example, a set of files may easily be handled in a single with - statement as follows:: - - with ExitStack() as stack: - files = [stack.enter_context(open(fname)) for fname in filenames] - # All opened files will automatically be closed at the end of - # the with statement, even if attempts to open files later - # in the list throw an exception - - Each instance maintains a stack of registered callbacks that are called in - reverse order when the instance is closed (either explicitly or implicitly - at the end of a ``with`` statement). Note that callbacks are *not* invoked - implicitly when the context stack instance is garbage collected. - - This stack model is used so that context managers that acquire their - resources in their ``__init__`` method (such as file objects) can be - handled correctly. - - Since registered callbacks are invoked in the reverse order of - registration, this ends up behaving as if multiple nested ``with`` - statements had been used with the registered set of callbacks. This even - extends to exception handling - if an inner callback suppresses or replaces - an exception, then outer callbacks will be passed arguments based on that - updated state. - - This is a relatively low level API that takes care of the details of - correctly unwinding the stack of exit callbacks. It provides a suitable - foundation for higher level context managers that manipulate the exit - stack in application specific ways. - - .. versionadded:: 3.3 - - .. method:: enter_context(cm) - - Enters a new context manager and adds its :meth:`__exit__` method to - the callback stack. The return value is the result of the context - manager's own :meth:`__enter__` method. - - These context managers may suppress exceptions just as they normally - would if used directly as part of a ``with`` statement. - - .. method:: push(exit) - - Adds a context manager's :meth:`__exit__` method to the callback stack. - - As ``__enter__`` is *not* invoked, this method can be used to cover - part of an :meth:`__enter__` implementation with a context manager's own - :meth:`__exit__` method. - - If passed an object that is not a context manager, this method assumes - it is a callback with the same signature as a context manager's - :meth:`__exit__` method and adds it directly to the callback stack. - - By returning true values, these callbacks can suppress exceptions the - same way context manager :meth:`__exit__` methods can. - - The passed in object is returned from the function, allowing this - method to be used is a function decorator. - - .. method:: callback(callback, *args, **kwds) - - Accepts an arbitrary callback function and arguments and adds it to - the callback stack. - - Unlike the other methods, callbacks added this way cannot suppress - exceptions (as they are never passed the exception details). - - The passed in callback is returned from the function, allowing this - method to be used is a function decorator. - - .. method:: pop_all() - - Transfers the callback stack to a fresh :class:`ExitStack` instance - and returns it. No callbacks are invoked by this operation - instead, - they will now be invoked when the new stack is closed (either - explicitly or implicitly). - - For example, a group of files can be opened as an "all or nothing" - operation as follows:: - - with ExitStack() as stack: - files = [stack.enter_context(open(fname)) for fname in filenames] - close_files = stack.pop_all().close - # If opening any file fails, all previously opened files will be - # closed automatically. If all files are opened successfully, - # they will remain open even after the with statement ends. - # close_files() can then be invoked explicitly to close them all - - .. method:: close() - - Immediately unwinds the callback stack, invoking callbacks in the - reverse order of registration. For any context managers and exit - callbacks registered, the arguments passed in will indicate that no - exception occurred. - - -Examples and Recipes --------------------- - -This section describes some examples and recipes for making effective use of -the tools provided by :mod:`contextlib`. - - -Cleaning up in an ``__enter__`` implementation -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -As noted in the documentation of :meth:`ExitStack.push`, this -method can be useful in cleaning up an already allocated resource if later -steps in the :meth:`__enter__` implementation fail. - -Here's an example of doing this for a context manager that accepts resource -acquisition and release functions, along with an optional validation function, -and maps them to the context management protocol:: - - from contextlib import contextmanager, ExitStack - - class ResourceManager(object): - - def __init__(self, acquire_resource, release_resource, check_resource_ok=None): - self.acquire_resource = acquire_resource - self.release_resource = release_resource - if check_resource_ok is None: - def check_resource_ok(resource): - return True - self.check_resource_ok = check_resource_ok - - @contextmanager - def _cleanup_on_error(self): - with ExitStack() as stack: - stack.push(self) - yield - # The validation check passed and didn't raise an exception - # Accordingly, we want to keep the resource, and pass it - # back to our caller - stack.pop_all() - - def __enter__(self): - resource = self.acquire_resource() - with self._cleanup_on_error(): - if not self.check_resource_ok(resource): - msg = "Failed validation for {!r}" - raise RuntimeError(msg.format(resource)) - return resource - - def __exit__(self, *exc_details): - # We don't need to duplicate any of our resource release logic - self.release_resource() - - -Replacing any use of ``try-finally`` and flag variables -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -A pattern you will sometimes see is a ``try-finally`` statement with a flag -variable to indicate whether or not the body of the ``finally`` clause should -be executed. In its simplest form (that can't already be handled just by -using an ``except`` clause instead), it looks something like this:: - - cleanup_needed = True - try: - result = perform_operation() - if result: - cleanup_needed = False - finally: - if cleanup_needed: - cleanup_resources() - -As with any ``try`` statement based code, this can cause problems for -development and review, because the setup code and the cleanup code can end -up being separated by arbitrarily long sections of code. - -:class:`ExitStack` makes it possible to instead register a callback for -execution at the end of a ``with`` statement, and then later decide to skip -executing that callback:: - - from contextlib import ExitStack - - with ExitStack() as stack: - stack.callback(cleanup_resources) - result = perform_operation() - if result: - stack.pop_all() - -This allows the intended cleanup up behaviour to be made explicit up front, -rather than requiring a separate flag variable. - -If a particular application uses this pattern a lot, it can be simplified -even further by means of a small helper class:: - - from contextlib import ExitStack - - class Callback(ExitStack): - def __init__(self, callback, *args, **kwds): - super(Callback, self).__init__() - self.callback(callback, *args, **kwds) - - def cancel(self): - self.pop_all() - - with Callback(cleanup_resources) as cb: - result = perform_operation() - if result: - cb.cancel() - -If the resource cleanup isn't already neatly bundled into a standalone -function, then it is still possible to use the decorator form of -:meth:`ExitStack.callback` to declare the resource cleanup in -advance:: - - from contextlib import ExitStack - - with ExitStack() as stack: - @stack.callback - def cleanup_resources(): - ... - result = perform_operation() - if result: - stack.pop_all() - -Due to the way the decorator protocol works, a callback function -declared this way cannot take any parameters. Instead, any resources to -be released must be accessed as closure variables - - -Using a context manager as a function decorator -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -:class:`ContextDecorator` makes it possible to use a context manager in -both an ordinary ``with`` statement and also as a function decorator. - -For example, it is sometimes useful to wrap functions or groups of statements -with a logger that can track the time of entry and time of exit. Rather than -writing both a function decorator and a context manager for the task, -inheriting from :class:`ContextDecorator` provides both capabilities in a -single definition:: - - from contextlib import ContextDecorator - import logging - - logging.basicConfig(level=logging.INFO) - - class track_entry_and_exit(ContextDecorator): - def __init__(self, name): - self.name = name - - def __enter__(self): - logging.info('Entering: {}'.format(name)) - - def __exit__(self, exc_type, exc, exc_tb): - logging.info('Exiting: {}'.format(name)) - -Instances of this class can be used as both a context manager:: - - with track_entry_and_exit('widget loader'): - print('Some time consuming activity goes here') - load_widget() - -And also as a function decorator:: - - @track_entry_and_exit('widget loader') - def activity(): - print('Some time consuming activity goes here') - load_widget() - -Note that there is one additional limitation when using context managers -as function decorators: there's no way to access the return value of -:meth:`__enter__`. If that value is needed, then it is still necessary to use -an explicit ``with`` statement. - .. seealso:: :pep:`0343` - The "with" statement diff -r 0be296605165 -r faa88c50a3d2 Doc/library/datetime.rst --- a/Doc/library/datetime.rst Wed May 23 22:26:55 2012 +0200 +++ b/Doc/library/datetime.rst Wed May 23 21:09:05 2012 +0200 @@ -12,34 +12,28 @@ The :mod:`datetime` module supplies classes for manipulating dates and times in both simple and complex ways. While date and time arithmetic is supported, the focus of the implementation is on efficient attribute extraction for output -formatting and manipulation. For related functionality, see also the -:mod:`time` and :mod:`calendar` modules. +formatting and manipulation. For related +functionality, see also the :mod:`time` and :mod:`calendar` modules. -There are two kinds of date and time objects: "naive" and "aware". +There are two kinds of date and time objects: "naive" and "aware". This +distinction refers to whether the object has any notion of time zone, daylight +saving time, or other kind of algorithmic or political time adjustment. Whether +a naive :class:`.datetime` object represents Coordinated Universal Time (UTC), +local time, or time in some other timezone is purely up to the program, just +like it's up to the program whether a particular number represents metres, +miles, or mass. Naive :class:`.datetime` objects are easy to understand and to +work with, at the cost of ignoring some aspects of reality. -An aware object has sufficient knowledge of applicable algorithmic and -political time adjustments, such as time zone and daylight saving time -information, to locate itself relative to other aware objects. An aware object -is used to represent a specific moment in time that is not open to -interpretation [#]_. - -A naive object does not contain enough information to unambiguously locate -itself relative to other date/time objects. Whether a naive object represents -Coordinated Universal Time (UTC), local time, or time in some other timezone is -purely up to the program, just like it is up to the program whether a -particular number represents metres, miles, or mass. Naive objects are easy to -understand and to work with, at the cost of ignoring some aspects of reality. - -For applications requiring aware objects, :class:`.datetime` and :class:`.time` -objects have an optional time zone information attribute, :attr:`tzinfo`, that -can be set to an instance of a subclass of the abstract :class:`tzinfo` class. -These :class:`tzinfo` objects capture information about the offset from UTC -time, the time zone name, and whether Daylight Saving Time is in effect. Note -that only one concrete :class:`tzinfo` class, the :class:`timezone` class, is -supplied by the :mod:`datetime` module. The :class:`timezone` class can -represent simple timezones with fixed offset from UTC, such as UTC itself or -North American EST and EDT timezones. Supporting timezones at deeper levels of -detail is up to the application. The rules for time adjustment across the +For applications requiring more, :class:`.datetime` and :class:`.time` objects +have an optional time zone information attribute, :attr:`tzinfo`, that can be +set to an instance of a subclass of the abstract :class:`tzinfo` class. These +:class:`tzinfo` objects capture information about the offset from UTC time, the +time zone name, and whether Daylight Saving Time is in effect. Note that only +one concrete :class:`tzinfo` class, the :class:`timezone` class, is supplied by the +:mod:`datetime` module. The :class:`timezone` class can represent simple +timezones with fixed offset from UTC such as UTC itself or North American EST and +EDT timezones. Supporting timezones at whatever level of detail is +required is up to the application. The rules for time adjustment across the world are more political than rational, change frequently, and there is no standard suitable for every application aside from UTC. @@ -120,13 +114,10 @@ Objects of the :class:`date` type are always naive. -An object of type :class:`.time` or :class:`.datetime` may be naive or aware. -A :class:`.datetime` object *d* is aware if ``d.tzinfo`` is not ``None`` and -``d.tzinfo.utcoffset(d)`` does not return ``None``. If ``d.tzinfo`` is -``None``, or if ``d.tzinfo`` is not ``None`` but ``d.tzinfo.utcoffset(d)`` -returns ``None``, *d* is naive. A :class:`.time` object *t* is aware -if ``t.tzinfo`` is not ``None`` and ``t.tzinfo.utcoffset(None)`` does not return -``None``. Otherwise, *t* is naive. +An object *d* of type :class:`.time` or :class:`.datetime` may be naive or aware. +*d* is aware if ``d.tzinfo`` is not ``None`` and ``d.tzinfo.utcoffset(d)`` does +not return ``None``. If ``d.tzinfo`` is ``None``, or if ``d.tzinfo`` is not +``None`` but ``d.tzinfo.utcoffset(d)`` returns ``None``, *d* is naive. The distinction between naive and aware doesn't apply to :class:`timedelta` objects. @@ -1855,7 +1846,3 @@ When the ``%z`` directive is provided to the :meth:`strptime` method, an aware :class:`.datetime` object will be produced. The ``tzinfo`` of the result will be set to a :class:`timezone` instance. - -.. rubric:: Footnotes - -.. [#] If, that is, we ignore the effects of Relativity diff -r 0be296605165 -r faa88c50a3d2 Doc/library/email.generator.rst --- a/Doc/library/email.generator.rst Wed May 23 22:26:55 2012 +0200 +++ b/Doc/library/email.generator.rst Wed May 23 21:09:05 2012 +0200 @@ -17,10 +17,10 @@ standards-compliant way, should handle MIME and non-MIME email messages just fine, and is designed so that the transformation from flat text, to a message structure via the :class:`~email.parser.Parser` class, and back to flat text, -is idempotent (the input is identical to the output) [#]_. On the other hand, -using the Generator on a :class:`~email.message.Message` constructed by program -may result in changes to the :class:`~email.message.Message` object as defaults -are filled in. +is idempotent (the input is identical to the output). On the other hand, using +the Generator on a :class:`~email.message.Message` constructed by program may +result in changes to the :class:`~email.message.Message` object as defaults are +filled in. :class:`bytes` output can be generated using the :class:`BytesGenerator` class. If the message object structure contains non-ASCII bytes, this generator's @@ -197,7 +197,7 @@ representing the part. -.. class:: DecodedGenerator(outfp, mangle_from_=True, maxheaderlen=78, fmt=None) +.. class:: DecodedGenerator(outfp[, mangle_from_=True, maxheaderlen=78, fmt=None) This class, derived from :class:`Generator` walks through all the subparts of a message. If the subpart is of main type :mimetype:`text`, then it prints the @@ -223,12 +223,3 @@ The default value for *fmt* is ``None``, meaning :: [Non-text (%(type)s) part of message omitted, filename %(filename)s] - - -.. rubric:: Footnotes - -.. [#] This statement assumes that you use the appropriate setting for the - ``unixfrom`` argument, and that you set maxheaderlen=0 (which will - preserve whatever the input line lengths were). It is also not strictly - true, since in many cases runs of whitespace in headers are collapsed - into single blanks. The latter is a bug that will eventually be fixed. diff -r 0be296605165 -r faa88c50a3d2 Doc/library/exceptions.rst --- a/Doc/library/exceptions.rst Wed May 23 22:26:55 2012 +0200 +++ b/Doc/library/exceptions.rst Wed May 23 21:09:05 2012 +0200 @@ -39,17 +39,18 @@ new exception is not handled the traceback that is eventually displayed will include the originating exception(s) and the final exception. -This implicit exception chain can be made explicit by using :keyword:`from` with -:keyword:`raise`. The single argument to :keyword:`from` must be an exception -or ``None``. It will be set as :attr:`__cause__` on the raised exception. -Setting :attr:`__cause__` implicitly sets the :attr:`__suppress_context__` to -``True``. If :attr:`__cause__` is an exception, it will be displayed. If -:attr:`__cause__` is present or :attr:`__suppress_context__` has a true value, -:attr:`__context__` will not be displayed. +This implicit exception chain can be made explicit by using :keyword:`from` +with :keyword:`raise`. The single argument to :keyword:`from` must be an +exception or :const:`None`, and it will be set as :attr:`__cause__` on the +raised exception. If :attr:`__cause__` is an exception it will be displayed +instead of :attr:`__context__`; if :attr:`__cause__` is None, +:attr:`__context__` will not be displayed by the default exception handling +code. (Note: the default value for :attr:`__context__` is :const:`None`, +while the default value for :attr:`__cause__` is :const:`Ellipsis`.) -In either case, the default exception handling code will not display any of the -remaining links in the :attr:`__context__` chain if :attr:`__cause__` has been -set. +In either case, the default exception handling code will not display +any of the remaining links in the :attr:`__context__` chain if +:attr:`__cause__` has been set. Base classes diff -r 0be296605165 -r faa88c50a3d2 Doc/library/functions.rst --- a/Doc/library/functions.rst Wed May 23 22:26:55 2012 +0200 +++ b/Doc/library/functions.rst Wed May 23 21:09:05 2012 +0200 @@ -797,19 +797,17 @@ *mode* is an optional string that specifies the mode in which the file is opened. It defaults to ``'r'`` which means open for reading in text mode. Other common values are ``'w'`` for writing (truncating the file if it - already exists), ``'x'`` for exclusive creation and ``'a'`` for appending - (which on *some* Unix systems, means that *all* writes append to the end of - the file regardless of the current seek position). In text mode, if - *encoding* is not specified the encoding used is platform dependent. (For - reading and writing raw bytes use binary mode and leave *encoding* - unspecified.) The available modes are: + already exists), and ``'a'`` for appending (which on *some* Unix systems, + means that *all* writes append to the end of the file regardless of the + current seek position). In text mode, if *encoding* is not specified the + encoding used is platform dependent. (For reading and writing raw bytes use + binary mode and leave *encoding* unspecified.) The available modes are: ========= =============================================================== Character Meaning --------- --------------------------------------------------------------- ``'r'`` open for reading (default) ``'w'`` open for writing, truncating the file first - ``'x'`` open for exclusive creation, failing if the file already exists ``'a'`` open for writing, appending to the end of the file if it exists ``'b'`` binary mode ``'t'`` text mode (default) @@ -900,7 +898,6 @@ .. versionchanged:: 3.3 The *opener* parameter was added. - The ``'x'`` mode was added. The type of file object returned by the :func:`open` function depends on the mode. When :func:`open` is used to open a file in a text mode (``'w'``, @@ -929,8 +926,6 @@ .. versionchanged:: 3.3 :exc:`IOError` used to be raised, it is now an alias of :exc:`OSError`. - :exc:`FileExistsError` is now raised if the file opened in exclusive - creation mode (``'x'``) already exists. .. XXX works for bytes too, but should it? @@ -1329,12 +1324,10 @@ Accordingly, :func:`super` is undefined for implicit lookups using statements or operators such as ``super()[name]``. - Also note that, aside from the zero argument form, :func:`super` is not - limited to use inside methods. The two argument form specifies the - arguments exactly and makes the appropriate references. The zero - argument form only works inside a class definition, as the compiler fills - in the necessary details to correctly retrieve the class being defined, - as well as accessing the current instance for ordinary methods. + Also note that :func:`super` is not limited to use inside methods. The two + argument form specifies the arguments exactly and makes the appropriate + references. The zero argument form automatically searches the stack frame + for the class (``__class__``) and the first argument. For practical suggestions on how to design cooperative classes using :func:`super`, see `guide to using super() diff -r 0be296605165 -r faa88c50a3d2 Doc/library/functools.rst --- a/Doc/library/functools.rst Wed May 23 22:26:55 2012 +0200 +++ b/Doc/library/functools.rst Wed May 23 21:09:05 2012 +0200 @@ -40,7 +40,7 @@ .. versionadded:: 3.2 -.. decorator:: lru_cache(maxsize=128, typed=False) +.. decorator:: lru_cache(maxsize=100, typed=False) Decorator to wrap a function with a memoizing callable that saves up to the *maxsize* most recent calls. It can save time when an expensive or I/O bound diff -r 0be296605165 -r faa88c50a3d2 Doc/library/hmac.rst --- a/Doc/library/hmac.rst Wed May 23 22:26:55 2012 +0200 +++ b/Doc/library/hmac.rst Wed May 23 21:09:05 2012 +0200 @@ -83,7 +83,6 @@ contents of the inputs via a timing attack, it does leak the length of the inputs. However, this generally is not a security risk. - .. versionadded:: 3.3 .. seealso:: diff -r 0be296605165 -r faa88c50a3d2 Doc/library/http.client.rst --- a/Doc/library/http.client.rst Wed May 23 22:26:55 2012 +0200 +++ b/Doc/library/http.client.rst Wed May 23 21:09:05 2012 +0200 @@ -339,15 +339,6 @@ | :const:`UPGRADE_REQUIRED` | ``426`` | HTTP Upgrade to TLS, | | | | :rfc:`2817`, Section 6 | +------------------------------------------+---------+-----------------------------------------------------------------------+ -| :const:`PRECONDITION_REQUIRED` | ``428`` | Additional HTTP Status Codes, | -| | | :rfc:`6585`, Section 3 | -+------------------------------------------+---------+-----------------------------------------------------------------------+ -| :const:`TOO_MANY_REQUESTS` | ``429`` | Additional HTTP Status Codes, | -| | | :rfc:`6585`, Section 4 | -+------------------------------------------+---------+-----------------------------------------------------------------------+ -| :const:`REQUEST_HEADER_FIELDS_TOO_LARGE` | ``431`` | Additional HTTP Status Codes, | -| | | :rfc:`6585`, Section 5 | -+------------------------------------------+---------+-----------------------------------------------------------------------+ | :const:`INTERNAL_SERVER_ERROR` | ``500`` | HTTP/1.1, `RFC 2616, Section | | | | 10.5.1 | | | | `_ | @@ -378,12 +369,6 @@ | :const:`NOT_EXTENDED` | ``510`` | An HTTP Extension Framework, | | | | :rfc:`2774`, Section 7 | +------------------------------------------+---------+-----------------------------------------------------------------------+ -| :const:`NETWORK_AUTHENTICATION_REQUIRED` | ``511`` | Additional HTTP Status Codes, | -| | | :rfc:`6585`, Section 6 | -+------------------------------------------+---------+-----------------------------------------------------------------------+ - -.. versionchanged:: 3.3 - Added codes ``428``, ``429``, ``431`` and ``511`` from :rfc:`6585`. .. data:: responses diff -r 0be296605165 -r faa88c50a3d2 Doc/library/http.cookiejar.rst --- a/Doc/library/http.cookiejar.rst Wed May 23 22:26:55 2012 +0200 +++ b/Doc/library/http.cookiejar.rst Wed May 23 21:09:05 2012 +0200 @@ -707,7 +707,7 @@ The :class:`Cookie` class also defines the following method: -.. method:: Cookie.is_expired(now=None) +.. method:: Cookie.is_expired([now=None]) True if cookie has passed the time at which the server requested it should expire. If *now* is given (in seconds since the epoch), return whether the diff -r 0be296605165 -r faa88c50a3d2 Doc/library/imp.rst --- a/Doc/library/imp.rst Wed May 23 22:26:55 2012 +0200 +++ b/Doc/library/imp.rst Wed May 23 21:09:05 2012 +0200 @@ -107,6 +107,37 @@ in ``sys.modules``. +.. function:: lock_held() + + Return ``True`` if the import lock is currently held, else ``False``. On + platforms without threads, always return ``False``. + + On platforms with threads, a thread executing an import holds an internal lock + until the import is complete. This lock blocks other threads from doing an + import until the original import completes, which in turn prevents other threads + from seeing incomplete module objects constructed by the original thread while + in the process of completing its import (and the imports, if any, triggered by + that). + + +.. function:: acquire_lock() + + Acquire the interpreter's import lock for the current thread. This lock should + be used by import hooks to ensure thread-safety when importing modules. + + Once a thread has acquired the import lock, the same thread may acquire it + again without blocking; the thread must release it once for each time it has + acquired it. + + On platforms without threads, this function does nothing. + + +.. function:: release_lock() + + Release the interpreter's import lock. On platforms without threads, this + function does nothing. + + .. function:: reload(module) Reload a previously imported *module*. The argument must be a module object, so @@ -204,49 +235,6 @@ magic number, as returned by :func:`get_magic`. -The following functions help interact with the import system's internal -locking mechanism. Locking semantics of imports are an implementation -detail which may vary from release to release. However, Python ensures -that circular imports work without any deadlocks. - -.. versionchanged:: 3.3 - In Python 3.3, the locking scheme has changed to per-module locks for - the most part. A global import lock is kept for some critical tasks, - such as initializing the per-module locks. - - -.. function:: lock_held() - - Return ``True`` if the global import lock is currently held, else - ``False``. On platforms without threads, always return ``False``. - - On platforms with threads, a thread executing an import first holds a - global import lock, then sets up a per-module lock for the rest of the - import. This blocks other threads from importing the same module until - the original import completes, preventing other threads from seeing - incomplete module objects constructed by the original thread. An - exception is made for circular imports, which by construction have to - expose an incomplete module object at some point. - -.. function:: acquire_lock() - - Acquire the interpreter's global import lock for the current thread. - This lock should be used by import hooks to ensure thread-safety when - importing modules. - - Once a thread has acquired the import lock, the same thread may acquire it - again without blocking; the thread must release it once for each time it has - acquired it. - - On platforms without threads, this function does nothing. - - -.. function:: release_lock() - - Release the interpreter's global import lock. On platforms without - threads, this function does nothing. - - The following constants with integer values, defined in this module, are used to indicate the search result of :func:`find_module`. diff -r 0be296605165 -r faa88c50a3d2 Doc/library/json.rst --- a/Doc/library/json.rst Wed May 23 22:26:55 2012 +0200 +++ b/Doc/library/json.rst Wed May 23 21:09:05 2012 +0200 @@ -209,13 +209,10 @@ (e.g. :class:`float`). *parse_constant*, if specified, will be called with one of the following - strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. - This can be used to raise an exception if invalid JSON numbers + strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``, ``'null'``, ``'true'``, + ``'false'``. This can be used to raise an exception if invalid JSON numbers are encountered. - .. versionchanged:: 3.1 - *parse_constant* doesn't get called on 'null', 'true', 'false' anymore. - To use a custom :class:`JSONDecoder` subclass, specify it with the ``cls`` kwarg; otherwise :class:`JSONDecoder` is used. Additional keyword arguments will be passed to the constructor of the class. diff -r 0be296605165 -r faa88c50a3d2 Doc/library/logging.rst --- a/Doc/library/logging.rst Wed May 23 22:26:55 2012 +0200 +++ b/Doc/library/logging.rst Wed May 23 21:09:05 2012 +0200 @@ -962,8 +962,7 @@ effect is to disable all logging calls of severity *lvl* and below, so that if you call it with a value of INFO, then all INFO and DEBUG events would be discarded, whereas those of severity WARNING and above would be processed - according to the logger's effective level. To undo the effect of a call to - ``logging.disable(lvl)``, call ``logging.disable(logging.NOTSET)``. + according to the logger's effective level. .. function:: addLevelName(lvl, levelName) diff -r 0be296605165 -r faa88c50a3d2 Doc/library/multiprocessing.rst --- a/Doc/library/multiprocessing.rst Wed May 23 22:26:55 2012 +0200 +++ b/Doc/library/multiprocessing.rst Wed May 23 21:09:05 2012 +0200 @@ -120,7 +120,9 @@ print(q.get()) # prints "[42, None, 'hello']" p.join() - Queues are thread and process safe. + Queues are thread and process safe, but note that they must never + be instantiated as a side effect of importing a module: this can lead + to a deadlock! (see :ref:`threaded-imports`) **Pipes** diff -r 0be296605165 -r faa88c50a3d2 Doc/library/nis.rst --- a/Doc/library/nis.rst Wed May 23 22:26:55 2012 +0200 +++ b/Doc/library/nis.rst Wed May 23 21:09:05 2012 +0200 @@ -17,7 +17,7 @@ The :mod:`nis` module defines the following functions: -.. function:: match(key, mapname, domain=default_domain) +.. function:: match(key, mapname[, domain=default_domain]) Return the match for *key* in map *mapname*, or raise an error (:exc:`nis.error`) if there is none. Both should be strings, *key* is 8-bit @@ -30,7 +30,7 @@ unspecified, lookup is in the default NIS domain. -.. function:: cat(mapname, domain=default_domain) +.. function:: cat(mapname[, domain=default_domain]) Return a dictionary mapping *key* to *value* such that ``match(key, mapname)==value``. Note that both keys and values of the dictionary are @@ -42,7 +42,7 @@ unspecified, lookup is in the default NIS domain. -.. function:: maps(domain=default_domain) +.. function:: maps([domain=default_domain]) Return a list of all valid maps. diff -r 0be296605165 -r faa88c50a3d2 Doc/library/os.rst --- a/Doc/library/os.rst Wed May 23 22:26:55 2012 +0200 +++ b/Doc/library/os.rst Wed May 23 21:09:05 2012 +0200 @@ -1786,7 +1786,7 @@ Availability: Unix. -.. function:: mknod(filename[, mode=0o600[, device=0]]) +.. function:: mknod(filename[, mode=0o600[, device]]) Create a filesystem node (file, device special file or named pipe) named *filename*. *mode* specifies both the permissions to use and the type of node diff -r 0be296605165 -r faa88c50a3d2 Doc/library/ossaudiodev.rst --- a/Doc/library/ossaudiodev.rst Wed May 23 22:26:55 2012 +0200 +++ b/Doc/library/ossaudiodev.rst Wed May 23 21:09:05 2012 +0200 @@ -281,7 +281,7 @@ simple calculations. -.. method:: oss_audio_device.setparameters(format, nchannels, samplerate[, strict=False]) +.. method:: oss_audio_device.setparameters(format, nchannels, samplerate [, strict=False]) Set the key audio sampling parameters---sample format, number of channels, and sampling rate---in one method call. *format*, *nchannels*, and *samplerate* diff -r 0be296605165 -r faa88c50a3d2 Doc/library/packaging.compiler.rst --- a/Doc/library/packaging.compiler.rst Wed May 23 22:26:55 2012 +0200 +++ b/Doc/library/packaging.compiler.rst Wed May 23 21:09:05 2012 +0200 @@ -255,7 +255,7 @@ that the runtime linker may search by default. - .. method:: CCompiler.define_macro(name, value=None) + .. method:: CCompiler.define_macro(name[, value=None]) Define a preprocessor macro for all compilations driven by this compiler object. The optional parameter *value* should be a string; if it is not @@ -298,7 +298,7 @@ (a list) to do the job. - .. method:: CCompiler.find_library_file(dirs, lib, debug=0) + .. method:: CCompiler.find_library_file(dirs, lib[, debug=0]) Search the specified list of directories for a static or shared library file *lib* and return the full path to that file. If *debug* is true, look for a @@ -306,7 +306,7 @@ ``None`` if *lib* wasn't found in any of the specified directories. - .. method:: CCompiler.has_function(funcname, includes=None, include_dirs=None, libraries=None, library_dirs=None) + .. method:: CCompiler.has_function(funcname [, includes=None, include_dirs=None, libraries=None, library_dirs=None]) Return a boolean indicating whether *funcname* is supported on the current platform. The optional arguments can be used to augment the compilation @@ -361,7 +361,7 @@ The following methods invoke stages in the build process. - .. method:: CCompiler.compile(sources, output_dir=None, macros=None, include_dirs=None, debug=0, extra_preargs=None, extra_postargs=None, depends=None) + .. method:: CCompiler.compile(sources[, output_dir=None, macros=None, include_dirs=None, debug=0, extra_preargs=None, extra_postargs=None, depends=None]) Compile one or more source files. Generates object files (e.g. transforms a :file:`.c` file to a :file:`.o` file.) @@ -405,7 +405,7 @@ Raises :exc:`CompileError` on failure. - .. method:: CCompiler.create_static_lib(objects, output_libname, output_dir=None, debug=0, target_lang=None) + .. method:: CCompiler.create_static_lib(objects, output_libname[, output_dir=None, debug=0, target_lang=None]) Link a bunch of stuff together to create a static library file. The "bunch of stuff" consists of the list of object files supplied as *objects*, the extra @@ -427,7 +427,7 @@ Raises :exc:`LibError` on failure. - .. method:: CCompiler.link(target_desc, objects, output_filename, output_dir=None, libraries=None, library_dirs=None, runtime_library_dirs=None, export_symbols=None, debug=0, extra_preargs=None, extra_postargs=None, build_temp=None, target_lang=None) + .. method:: CCompiler.link(target_desc, objects, output_filename[, output_dir=None, libraries=None, library_dirs=None, runtime_library_dirs=None, export_symbols=None, debug=0, extra_preargs=None, extra_postargs=None, build_temp=None, target_lang=None]) Link a bunch of stuff together to create an executable or shared library file. @@ -469,28 +469,28 @@ Raises :exc:`LinkError` on failure. - .. method:: CCompiler.link_executable(objects, output_progname, output_dir=None, libraries=None, library_dirs=None, runtime_library_dirs=None, debug=0, extra_preargs=None, extra_postargs=None, target_lang=None) + .. method:: CCompiler.link_executable(objects, output_progname[, output_dir=None, libraries=None, library_dirs=None, runtime_library_dirs=None, debug=0, extra_preargs=None, extra_postargs=None, target_lang=None]) Link an executable. *output_progname* is the name of the file executable, while *objects* are a list of object filenames to link in. Other arguments are as for the :meth:`link` method. - .. method:: CCompiler.link_shared_lib(objects, output_libname, output_dir=None, libraries=None, library_dirs=None, runtime_library_dirs=None, export_symbols=None, debug=0, extra_preargs=None, extra_postargs=None, build_temp=None, target_lang=None) + .. method:: CCompiler.link_shared_lib(objects, output_libname[, output_dir=None, libraries=None, library_dirs=None, runtime_library_dirs=None, export_symbols=None, debug=0, extra_preargs=None, extra_postargs=None, build_temp=None, target_lang=None]) Link a shared library. *output_libname* is the name of the output library, while *objects* is a list of object filenames to link in. Other arguments are as for the :meth:`link` method. - .. method:: CCompiler.link_shared_object(objects, output_filename, output_dir=None, libraries=None, library_dirs=None, runtime_library_dirs=None, export_symbols=None, debug=0, extra_preargs=None, extra_postargs=None, build_temp=None, target_lang=None) + .. method:: CCompiler.link_shared_object(objects, output_filename[, output_dir=None, libraries=None, library_dirs=None, runtime_library_dirs=None, export_symbols=None, debug=0, extra_preargs=None, extra_postargs=None, build_temp=None, target_lang=None]) Link a shared object. *output_filename* is the name of the shared object that will be created, while *objects* is a list of object filenames to link in. Other arguments are as for the :meth:`link` method. - .. method:: CCompiler.preprocess(source, output_file=None, macros=None, include_dirs=None, extra_preargs=None, extra_postargs=None) + .. method:: CCompiler.preprocess(source[, output_file=None, macros=None, include_dirs=None, extra_preargs=None, extra_postargs=None]) Preprocess a single C/C++ source file, named in *source*. Output will be written to file named *output_file*, or *stdout* if *output_file* not supplied. @@ -505,14 +505,14 @@ use by the various concrete subclasses. - .. method:: CCompiler.executable_filename(basename, strip_dir=0, output_dir='') + .. method:: CCompiler.executable_filename(basename[, strip_dir=0, output_dir='']) Returns the filename of the executable for the given *basename*. Typically for non-Windows platforms this is the same as the basename, while Windows will get a :file:`.exe` added. - .. method:: CCompiler.library_filename(libname, lib_type='static', strip_dir=0, output_dir='') + .. method:: CCompiler.library_filename(libname[, lib_type='static', strip_dir=0, output_dir='']) Returns the filename for the given library name on the current platform. On Unix a library with *lib_type* of ``'static'`` will typically be of the form @@ -520,18 +520,18 @@ :file:`liblibname.so`. - .. method:: CCompiler.object_filenames(source_filenames, strip_dir=0, output_dir='') + .. method:: CCompiler.object_filenames(source_filenames[, strip_dir=0, output_dir='']) Returns the name of the object files for the given source files. *source_filenames* should be a list of filenames. - .. method:: CCompiler.shared_object_filename(basename, strip_dir=0, output_dir='') + .. method:: CCompiler.shared_object_filename(basename[, strip_dir=0, output_dir='']) Returns the name of a shared object file for the given file name *basename*. - .. method:: CCompiler.execute(func, args, msg=None, level=1) + .. method:: CCompiler.execute(func, args[, msg=None, level=1]) Invokes :func:`packaging.util.execute` This method invokes a Python function *func* with the given arguments *args*, after logging and taking into account @@ -544,7 +544,7 @@ the given command. XXX see also. - .. method:: CCompiler.mkpath(name, mode=511) + .. method:: CCompiler.mkpath(name[, mode=511]) Invokes :func:`packaging.dir_util.mkpath`. This creates a directory and any missing ancestor directories. XXX see also. diff -r 0be296605165 -r faa88c50a3d2 Doc/library/packaging.fancy_getopt.rst --- a/Doc/library/packaging.fancy_getopt.rst Wed May 23 22:26:55 2012 +0200 +++ b/Doc/library/packaging.fancy_getopt.rst Wed May 23 21:09:05 2012 +0200 @@ -33,7 +33,7 @@ ``sys.argv[1:]`` if you pass ``None`` as *args*. -.. class:: FancyGetopt(option_table=None) +.. class:: FancyGetopt([option_table=None]) The option_table is a list of 3-tuples: ``(long_option, short_option, help_string)`` @@ -46,7 +46,7 @@ The :class:`FancyGetopt` class provides the following methods: -.. method:: FancyGetopt.getopt(args=None, object=None) +.. method:: FancyGetopt.getopt([args=None, object=None]) Parse command-line options in args. Store as attributes on *object*. @@ -67,7 +67,7 @@ yet. -.. method:: FancyGetopt.generate_help(header=None) +.. method:: FancyGetopt.generate_help([header=None]) Generate help text (a list of strings, one per suggested line of output) from the option table for this :class:`FancyGetopt` object. diff -r 0be296605165 -r faa88c50a3d2 Doc/library/select.rst --- a/Doc/library/select.rst Wed May 23 22:26:55 2012 +0200 +++ b/Doc/library/select.rst Wed May 23 21:09:05 2012 +0200 @@ -267,7 +267,7 @@ Remove a registered file descriptor from the epoll object. -.. method:: epoll.poll(timeout=-1, maxevents=-1) +.. method:: epoll.poll([timeout=-1[, maxevents=-1]]) Wait for events. timeout in seconds (float) diff -r 0be296605165 -r faa88c50a3d2 Doc/library/shutil.rst --- a/Doc/library/shutil.rst Wed May 23 22:26:55 2012 +0200 +++ b/Doc/library/shutil.rst Wed May 23 21:09:05 2012 +0200 @@ -47,7 +47,7 @@ be copied. -.. function:: copyfile(src, dst, symlinks=False) +.. function:: copyfile(src, dst[, symlinks=False]) Copy the contents (no metadata) of the file named *src* to a file named *dst*. *dst* must be the complete target file name; look at @@ -67,7 +67,7 @@ Added *symlinks* argument. -.. function:: copymode(src, dst, symlinks=False) +.. function:: copymode(src, dst[, symlinks=False]) Copy the permission bits from *src* to *dst*. The file contents, owner, and group are unaffected. *src* and *dst* are path names given as strings. If @@ -78,7 +78,7 @@ .. versionchanged:: 3.3 Added *symlinks* argument. -.. function:: copystat(src, dst, symlinks=False) +.. function:: copystat(src, dst[, symlinks=False]) Copy the permission bits, last access time, last modification time, and flags from *src* to *dst*. The file contents, owner, and group are unaffected. *src* @@ -89,7 +89,7 @@ .. versionchanged:: 3.3 Added *symlinks* argument. -.. function:: copy(src, dst, symlinks=False)) +.. function:: copy(src, dst[, symlinks=False])) Copy the file *src* to the file or directory *dst*. If *dst* is a directory, a file with the same basename as *src* is created (or overwritten) in the @@ -100,7 +100,7 @@ .. versionchanged:: 3.3 Added *symlinks* argument. -.. function:: copy2(src, dst, symlinks=False) +.. function:: copy2(src, dst[, symlinks=False]) Similar to :func:`shutil.copy`, but metadata is copied as well. This is similar to the Unix command :program:`cp -p`. If *symlinks* is true, diff -r 0be296605165 -r faa88c50a3d2 Doc/library/sqlite3.rst --- a/Doc/library/sqlite3.rst Wed May 23 22:26:55 2012 +0200 +++ b/Doc/library/sqlite3.rst Wed May 23 21:09:05 2012 +0200 @@ -526,7 +526,7 @@ or :const:`None` when no more data is available. -.. method:: Cursor.fetchmany(size=cursor.arraysize) +.. method:: Cursor.fetchmany([size=cursor.arraysize]) Fetches the next set of rows of a query result, returning a list. An empty list is returned when no more rows are available. diff -r 0be296605165 -r faa88c50a3d2 Doc/library/stat.rst --- a/Doc/library/stat.rst Wed May 23 22:26:55 2012 +0200 +++ b/Doc/library/stat.rst Wed May 23 21:09:05 2012 +0200 @@ -104,16 +104,6 @@ if __name__ == '__main__': walktree(sys.argv[1], visitfile) -An additional utility function is provided to covert a file's mode in a human -readable string: - -.. function:: filemode(mode) - - Convert a file's mode to a string of the form '-rwxrwxrwx'. - - .. versionadded:: 3.3 - - All the variables below are simply symbolic indexes into the 10-tuple returned by :func:`os.stat`, :func:`os.fstat` or :func:`os.lstat`. @@ -354,3 +344,4 @@ The file is a snapshot file. See the \*BSD or Mac OS systems man page :manpage:`chflags(2)` for more information. + diff -r 0be296605165 -r faa88c50a3d2 Doc/library/stdtypes.rst --- a/Doc/library/stdtypes.rst Wed May 23 22:26:55 2012 +0200 +++ b/Doc/library/stdtypes.rst Wed May 23 21:09:05 2012 +0200 @@ -2996,10 +2996,11 @@ The Ellipsis Object ------------------- -This object is commonly used by slicing (see :ref:`slicings`). It supports no -special operations. There is exactly one ellipsis object, named -:const:`Ellipsis` (a built-in name). ``type(Ellipsis)()`` produces the -:const:`Ellipsis` singleton. +This object is commonly used by slicing (see :ref:`slicings`), but may also +be used in other situations where a sentinel value other than :const:`None` +is needed. It supports no special operations. There is exactly one ellipsis +object, named :const:`Ellipsis` (a built-in name). ``type(Ellipsis)()`` +produces the :const:`Ellipsis` singleton. It is written as ``Ellipsis`` or ``...``. diff -r 0be296605165 -r faa88c50a3d2 Doc/library/textwrap.rst --- a/Doc/library/textwrap.rst Wed May 23 22:26:55 2012 +0200 +++ b/Doc/library/textwrap.rst Wed May 23 21:09:05 2012 +0200 @@ -107,15 +107,6 @@ expanded to spaces using the :meth:`expandtabs` method of *text*. - .. attribute:: tabsize - - (default: ``8``) If :attr:`expand_tabs` is true, then all tab characters - in *text* will be expanded to zero or more spaces, depending on the - current column and the given tab size. - - .. versionadded:: 3.3 - - .. attribute:: replace_whitespace (default: ``True``) If true, each whitespace character (as defined by diff -r 0be296605165 -r faa88c50a3d2 Doc/library/threading.rst --- a/Doc/library/threading.rst Wed May 23 22:26:55 2012 +0200 +++ b/Doc/library/threading.rst Wed May 23 21:09:05 2012 +0200 @@ -426,12 +426,15 @@ Acquire a lock, blocking or non-blocking. - When invoked with the *blocking* argument set to ``True`` (the default), - block until the lock is unlocked, then set it to locked and return ``True``. + When invoked without arguments, block until the lock is unlocked, then set it to + locked, and return true. - When invoked with the *blocking* argument set to ``False``, do not block. - If a call with *blocking* set to ``True`` would block, return ``False`` - immediately; otherwise, set the lock to locked and return ``True``. + When invoked with the *blocking* argument set to true, do the same thing as when + called without arguments, and return true. + + When invoked with the *blocking* argument set to false, do not block. If a call + without an argument would block, return false immediately; otherwise, do the + same thing as when called without arguments, and return true. When invoked with the floating-point *timeout* argument set to a positive value, block for at most the number of seconds specified by *timeout* @@ -996,3 +999,27 @@ Currently, :class:`Lock`, :class:`RLock`, :class:`Condition`, :class:`Semaphore`, and :class:`BoundedSemaphore` objects may be used as :keyword:`with` statement context managers. + + +.. _threaded-imports: + +Importing in threaded code +-------------------------- + +While the import machinery is thread-safe, there are two key restrictions on +threaded imports due to inherent limitations in the way that thread-safety is +provided: + +* Firstly, other than in the main module, an import should not have the + side effect of spawning a new thread and then waiting for that thread in + any way. Failing to abide by this restriction can lead to a deadlock if + the spawned thread directly or indirectly attempts to import a module. +* Secondly, all import attempts must be completed before the interpreter + starts shutting itself down. This can be most easily achieved by only + performing imports from non-daemon threads created through the threading + module. Daemon threads and threads created directly with the thread + module will require some other form of synchronization to ensure they do + not attempt imports after system shutdown has commenced. Failure to + abide by this restriction will lead to intermittent exceptions and + crashes during interpreter shutdown (as the late imports attempt to + access machinery which is no longer in a valid state). diff -r 0be296605165 -r faa88c50a3d2 Doc/library/time.rst --- a/Doc/library/time.rst Wed May 23 22:26:55 2012 +0200 +++ b/Doc/library/time.rst Wed May 23 21:09:05 2012 +0200 @@ -62,9 +62,9 @@ the units in which their value or argument is expressed. E.g. on most Unix systems, the clock "ticks" only 50 or 100 times a second. -* On the other hand, the precision of :func:`.time` and :func:`sleep` is better +* On the other hand, the precision of :func:`time` and :func:`sleep` is better than their Unix equivalents: times are expressed as floating point numbers, - :func:`.time` returns the most accurate time available (using Unix + :func:`time` returns the most accurate time available (using Unix :c:func:`gettimeofday` where available), and :func:`sleep` will accept a time with a nonzero fraction (Unix :c:func:`select` is used to implement this, where available). @@ -256,7 +256,7 @@ Convert a time expressed in seconds since the epoch to a string representing local time. If *secs* is not provided or :const:`None`, the current time as - returned by :func:`.time` is used. ``ctime(secs)`` is equivalent to + returned by :func:`time` is used. ``ctime(secs)`` is equivalent to ``asctime(localtime(secs))``. Locale information is not used by :func:`ctime`. @@ -284,7 +284,7 @@ Convert a time expressed in seconds since the epoch to a :class:`struct_time` in UTC in which the dst flag is always zero. If *secs* is not provided or - :const:`None`, the current time as returned by :func:`.time` is used. Fractions + :const:`None`, the current time as returned by :func:`time` is used. Fractions of a second are ignored. See above for a description of the :class:`struct_time` object. See :func:`calendar.timegm` for the inverse of this function. @@ -293,7 +293,7 @@ .. function:: localtime([secs]) Like :func:`gmtime` but converts to local time. If *secs* is not provided or - :const:`None`, the current time as returned by :func:`.time` is used. The dst + :const:`None`, the current time as returned by :func:`time` is used. The dst flag is set to ``1`` when DST applies to the given time. @@ -302,7 +302,7 @@ This is the inverse function of :func:`localtime`. Its argument is the :class:`struct_time` or full 9-tuple (since the dst flag is needed; use ``-1`` as the dst flag if it is unknown) which expresses the time in *local* time, not - UTC. It returns a floating point number, for compatibility with :func:`.time`. + UTC. It returns a floating point number, for compatibility with :func:`time`. If the input value cannot be represented as a valid time, either :exc:`OverflowError` or :exc:`ValueError` will be raised (which depends on whether the invalid value is caught by Python or the underlying C libraries). diff -r 0be296605165 -r faa88c50a3d2 Doc/library/types.rst --- a/Doc/library/types.rst Wed May 23 22:26:55 2012 +0200 +++ b/Doc/library/types.rst Wed May 23 21:09:05 2012 +0200 @@ -1,5 +1,5 @@ -:mod:`types` --- Dynamic type creation and names for built-in types -=================================================================== +:mod:`types` --- Names for built-in types +========================================= .. module:: types :synopsis: Names for built-in types. @@ -8,72 +8,20 @@ -------------- -This module defines utility function to assist in dynamic creation of -new types. +This module defines names for some object types that are used by the standard +Python interpreter, but not exposed as builtins like :class:`int` or +:class:`str` are. Also, it does not include some of the types that arise +transparently during processing such as the ``listiterator`` type. -It also defines names for some object types that are used by the standard -Python interpreter, but not exposed as builtins like :class:`int` or -:class:`str` are. +Typical use is for :func:`isinstance` or :func:`issubclass` checks. - -Dynamic Type Creation ---------------------- - -.. function:: new_class(name, bases=(), kwds=None, exec_body=None) - - Creates a class object dynamically using the appropriate metaclass. - - The arguments are the components that make up a class definition: the - class name, the base classes (in order), the keyword arguments (such as - ``metaclass``) and the callback function to populate the class namespace. - - The *exec_body* callback should accept the class namespace as its sole - argument and update the namespace directly with the class contents. - - .. versionadded:: 3.3 - -.. function:: prepare_class(name, bases=(), kwds=None) - - Calculates the appropriate metaclass and creates the class namespace. - - The arguments are the components that make up a class definition: the - class name, the base classes (in order) and the keyword arguments (such as - ``metaclass``). - - The return value is a 3-tuple: ``metaclass, namespace, kwds`` - - *metaclass* is the appropriate metaclass - *namespace* is the prepared class namespace - *kwds* is an updated copy of the passed in *kwds* argument with any - ``'metaclass'`` entry removed. If no *kwds* argument is passed in, this - will be an empty dict. - - .. versionadded:: 3.3 - -.. seealso:: - - :pep:`3115` - Metaclasses in Python 3000 - Introduced the ``__prepare__`` namespace hook - - -Standard Interpreter Types --------------------------- - -This module provides names for many of the types that are required to -implement a Python interpreter. It deliberately avoids including some of -the types that arise only incidentally during processing such as the -``listiterator`` type. - -Typical use is of these names is for :func:`isinstance` or -:func:`issubclass` checks. - -Standard names are defined for the following types: +The module defines the following names: .. data:: FunctionType LambdaType - The type of user-defined functions and functions created by - :keyword:`lambda` expressions. + The type of user-defined functions and functions created by :keyword:`lambda` + expressions. .. data:: GeneratorType diff -r 0be296605165 -r faa88c50a3d2 Doc/library/urllib.request.rst --- a/Doc/library/urllib.request.rst Wed May 23 22:26:55 2012 +0200 +++ b/Doc/library/urllib.request.rst Wed May 23 21:09:05 2012 +0200 @@ -16,7 +16,7 @@ The :mod:`urllib.request` module defines the following functions: -.. function:: urlopen(url, data=None[, timeout], *, cafile=None, capath=None, cadefault=True) +.. function:: urlopen(url, data=None[, timeout], *, cafile=None, capath=None) Open the URL *url*, which can be either a string or a :class:`Request` object. @@ -53,15 +53,9 @@ point to a directory of hashed certificate files. More information can be found in :meth:`ssl.SSLContext.load_verify_locations`. - The *cadefault* parameter specifies whether to fall back to loading a - default certificate store defined by the underlying OpenSSL library if the - *cafile* and *capath* parameters are omitted. This will only work on - some non-Windows platforms. - .. warning:: - If neither *cafile* nor *capath* is specified, and *cadefault* is False, - an HTTPS request will not do any verification of the server's - certificate. + If neither *cafile* nor *capath* is specified, an HTTPS request + will not do any verification of the server's certificate. This function returns a file-like object that works as a :term:`context manager`, with two additional methods from the :mod:`urllib.response` module @@ -98,9 +92,6 @@ .. versionadded:: 3.2 *data* can be an iterable object. - .. versionchanged:: 3.3 - *cadefault* was added. - .. function:: install_opener(opener) Install an :class:`OpenerDirector` instance as the default global opener. diff -r 0be296605165 -r faa88c50a3d2 Doc/reference/datamodel.rst --- a/Doc/reference/datamodel.rst Wed May 23 22:26:55 2012 +0200 +++ b/Doc/reference/datamodel.rst Wed May 23 21:09:05 2012 +0200 @@ -35,19 +35,12 @@ Every object has an identity, a type and a value. An object's *identity* never changes once it has been created; you may think of it as the object's address in memory. The ':keyword:`is`' operator compares the identity of two objects; the -:func:`id` function returns an integer representing its identity. - -.. impl-detail:: - - For CPython, ``id(x)`` is the memory address where ``x`` is stored. - +:func:`id` function returns an integer representing its identity (currently +implemented as its address). An object's :dfn:`type` is also unchangeable. [#]_ An object's type determines the operations that the object supports (e.g., "does it have a length?") and also defines the possible values for objects of that type. The :func:`type` function returns an object's type (which is an object -itself). Like its identity, an object's :dfn:`type` is also unchangeable. -[#]_ - -The *value* of some objects can change. Objects whose value can +itself). The *value* of some objects can change. Objects whose value can change are said to be *mutable*; objects whose value is unchangeable once they are created are called *immutable*. (The value of an immutable container object that contains a reference to a mutable object can change when the latter's value @@ -1262,10 +1255,10 @@ immutable (if the object's hash value changes, it will be in the wrong hash bucket). + User-defined classes have :meth:`__eq__` and :meth:`__hash__` methods by default; with them, all objects compare unequal (except with themselves) - and ``x.__hash__()`` returns an appropriate value such that ``x == y`` - implies both that ``x is y`` and ``hash(x) == hash(y)``. + and ``x.__hash__()`` returns ``id(x)``. Classes which inherit a :meth:`__hash__` method from a parent class but change the meaning of :meth:`__eq__` such that the hash value returned is no @@ -1557,115 +1550,53 @@ Customizing class creation -------------------------- -By default, classes are constructed using :func:`type`. The class body is -executed in a new namespace and the class name is bound locally to the -result of ``type(name, bases, namespace)``. +By default, classes are constructed using :func:`type`. A class definition is +read into a separate namespace and the value of class name is bound to the +result of ``type(name, bases, dict)``. -The class creation process can be customised by passing the ``metaclass`` -keyword argument in the class definition line, or by inheriting from an -existing class that included such an argument. In the following example, -both ``MyClass`` and ``MySubclass`` are instances of ``Meta``:: +When the class definition is read, if a callable ``metaclass`` keyword argument +is passed after the bases in the class definition, the callable given will be +called instead of :func:`type`. If other keyword arguments are passed, they +will also be passed to the metaclass. This allows classes or functions to be +written which monitor or alter the class creation process: - class Meta(type): - pass +* Modifying the class dictionary prior to the class being created. - class MyClass(metaclass=Meta): - pass +* Returning an instance of another class -- essentially performing the role of a + factory function. - class MySubclass(MyClass): - pass +These steps will have to be performed in the metaclass's :meth:`__new__` method +-- :meth:`type.__new__` can then be called from this method to create a class +with different properties. This example adds a new element to the class +dictionary before creating the class:: -Any other keyword arguments that are specified in the class definition are -passed through to all metaclass operations described below. + class metacls(type): + def __new__(mcs, name, bases, dict): + dict['foo'] = 'metacls was here' + return type.__new__(mcs, name, bases, dict) -When a class definition is executed, the following steps occur: +You can of course also override other class methods (or add new methods); for +example defining a custom :meth:`__call__` method in the metaclass allows custom +behavior when the class is called, e.g. not always creating a new instance. -* the appropriate metaclass is determined -* the class namespace is prepared -* the class body is executed -* the class object is created +If the metaclass has a :meth:`__prepare__` attribute (usually implemented as a +class or static method), it is called before the class body is evaluated with +the name of the class and a tuple of its bases for arguments. It should return +an object that supports the mapping interface that will be used to store the +namespace of the class. The default is a plain dictionary. This could be used, +for example, to keep track of the order that class attributes are declared in by +returning an ordered dictionary. -Determining the appropriate metaclass -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +The appropriate metaclass is determined by the following precedence rules: -The appropriate metaclass for a class definition is determined as follows: +* If the ``metaclass`` keyword argument is passed with the bases, it is used. -* if no bases and no explicit metaclass are given, then :func:`type` is used -* if an explicit metaclass is given and it is *not* an instance of - :func:`type`, then it is used directly as the metaclass -* if an instance of :func:`type` is given as the explicit metaclass, or - bases are defined, then the most derived metaclass is used +* Otherwise, if there is at least one base class, its metaclass is used. -The most derived metaclass is selected from the explicitly specified -metaclass (if any) and the metaclasses (i.e. ``type(cls)``) of all specified -base classes. The most derived metaclass is one which is a subtype of *all* -of these candidate metaclasses. If none of the candidate metaclasses meets -that criterion, then the class definition will fail with ``TypeError``. - - -Preparing the class namespace -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Once the appropriate metaclass has been identified, then the class namespace -is prepared. If the metaclass has a ``__prepare__`` attribute, it is called -as ``namespace = metaclass.__prepare__(name, bases, **kwds)`` (where the -additional keyword arguments, if any, come from the class definition). - -If the metaclass has no ``__prepare__`` attribute, then the class namespace -is initialised as an empty :func:`dict` instance. - -.. seealso:: - - :pep:`3115` - Metaclasses in Python 3000 - Introduced the ``__prepare__`` namespace hook - - -Executing the class body -^^^^^^^^^^^^^^^^^^^^^^^^ - -The class body is executed (approximately) as -``exec(body, globals(), namespace)``. The key difference from a normal -call to :func:`exec` is that lexical scoping allows the class body (including -any methods) to reference names from the current and outer scopes when the -class definition occurs inside a function. - -However, even when the class definition occurs inside the function, methods -defined inside the class still cannot see names defined at the class scope. -Class variables must be accessed through the first parameter of instance or -class methods, and cannot be accessed at all from static methods. - - -Creating the class object -^^^^^^^^^^^^^^^^^^^^^^^^^ - -Once the class namespace has been populated by executing the class body, -the class object is created by calling -``metaclass(name, bases, namespace, **kwds)`` (the additional keywords -passed here are the same as those passed to ``__prepare__``). - -This class object is the one that will be referenced by the zero-argument -form of :func:`super`. ``__class__`` is an implicit closure reference -created by the compiler if any methods in a class body refer to either -``__class__`` or ``super``. This allows the zero argument form of -:func:`super` to correctly identify the class being defined based on -lexical scoping, while the class or instance that was used to make the -current call is identified based on the first argument passed to the method. - -After the class object is created, it is passed to the class decorators -included in the class definition (if any) and the resulting object is bound -in the local namespace as the defined class. - -.. seealso:: - - :pep:`3135` - New super - Describes the implicit ``__class__`` closure reference - - -Metaclass example -^^^^^^^^^^^^^^^^^ +* Otherwise, the default metaclass (:class:`type`) is used. The potential uses for metaclasses are boundless. Some ideas that have been -explored include logging, interface checking, automatic delegation, automatic +explored including logging, interface checking, automatic delegation, automatic property creation, proxies, frameworks, and automatic resource locking/synchronization. @@ -1678,9 +1609,9 @@ def __prepare__(metacls, name, bases, **kwds): return collections.OrderedDict() - def __new__(cls, name, bases, namespace, **kwds): - result = type.__new__(cls, name, bases, dict(namespace)) - result.members = tuple(namespace) + def __new__(cls, name, bases, classdict): + result = type.__new__(cls, name, bases, dict(classdict)) + result.members = tuple(classdict) return result class A(metaclass=OrderedClass): diff -r 0be296605165 -r faa88c50a3d2 Doc/whatsnew/3.3.rst --- a/Doc/whatsnew/3.3.rst Wed May 23 22:26:55 2012 +0200 +++ b/Doc/whatsnew/3.3.rst Wed May 23 21:09:05 2012 +0200 @@ -573,23 +573,6 @@ .. XXX mention new error messages for passing wrong number of arguments to functions -A Finer-Grained Import Lock -=========================== - -Previous versions of CPython have always relied on a global import lock. -This led to unexpected annoyances, such as deadlocks when importing a module -would trigger code execution in a different thread as a side-effect. -Clumsy workarounds were sometimes employed, such as the -:c:func:`PyImport_ImportModuleNoBlock` C API function. - -In Python 3.3, importing a module takes a per-module lock. This correctly -serializes importation of a given module from multiple threads (preventing -the exposure of incompletely initialized modules), while eliminating the -aforementioned annoyances. - -(contributed by Antoine Pitrou in :issue:`9260`.) - - New and Improved Modules ======================== @@ -696,21 +679,6 @@ .. XXX addition of __slots__ to ABCs not recorded here: internal detail -contextlib ----------- - -:class:`~collections.ExitStack` now provides a solid foundation for -programmatic manipulation of context managers and similar cleanup -functionality. Unlike the previous ``contextlib.nested`` API (which was -deprecated and removed), the new API is designed to work correctly -regardless of whether context managers acquire their resources in -their ``__init__`` method (for example, file objects) or in their -``__enter__`` method (for example, synchronisation objects from the -:mod:`threading` module). - -(:issue:`13585`) - - crypt ----- @@ -866,15 +834,6 @@ (Contributed by David Townshend in :issue:`12760`) -ipaddress ---------- - -The new :mod:`ipaddress` module provides tools for creating and manipulating -objects representing IPv4 and IPv6 addresses, networks and interfaces (i.e. -an IP address associated with a specific IP subnet). - -(Contributed by Google and Peter Moody in :pep:`3144`) - lzma ---- @@ -1218,14 +1177,6 @@ (Contributed by Colin Marc in :issue:`14204`) -stat ----- - -- The undocumented tarfile.filemode function has been moved to - :func:`stat.filemode`. It can be used to convert a file's mode to a string of - the form '-rwxrwxrwx'. - - (Contributed by Giampaolo Rodolà in :issue:`14807`) sys --- @@ -1263,10 +1214,6 @@ (:issue:`14386`) -The new functions `types.new_class` and `types.prepare_class` provide support -for PEP 3115 compliant dynamic type creation. (:issue:`14588`) - - urllib ------ @@ -1305,10 +1252,6 @@ * repeating a single ASCII letter and getting a substring of a ASCII strings is 4 times faster -* UTF-8 and UTF-16 decoding is now 2x to 4x faster. - - (contributed by Serhiy Storchaka, :issue:`14624` and :issue:`14738`.) - Build and C API Changes ======================= @@ -1455,6 +1398,11 @@ .. XXX add a point about hash randomization and that it's always on in 3.3 +* :issue:`14205`: A dict lookup now raises a :exc:`RuntimeError` if the dict is + modified during the lookup. If you implement your own comparison function for + objects used as dict keys and the dict is shared by multiple threads, access + to the dict should be protected by a lock. + * :issue:`12326`: On Linux, sys.platform doesn't contain the major version anymore. It is now always 'linux', instead of 'linux2' or 'linux3' depending on the Linux version used to build Python. Replace sys.platform == 'linux2' diff -r 0be296605165 -r faa88c50a3d2 Include/asdl.h --- a/Include/asdl.h Wed May 23 22:26:55 2012 +0200 +++ b/Include/asdl.h Wed May 23 21:09:05 2012 +0200 @@ -15,17 +15,17 @@ /* XXX A sequence should be typed so that its use can be typechecked. */ typedef struct { - Py_ssize_t size; + int size; void *elements[1]; } asdl_seq; typedef struct { - Py_ssize_t size; + int size; int elements[1]; } asdl_int_seq; -asdl_seq *asdl_seq_new(Py_ssize_t size, PyArena *arena); -asdl_int_seq *asdl_int_seq_new(Py_ssize_t size, PyArena *arena); +asdl_seq *asdl_seq_new(int size, PyArena *arena); +asdl_int_seq *asdl_int_seq_new(int size, PyArena *arena); #define asdl_seq_GET(S, I) (S)->elements[(I)] #define asdl_seq_LEN(S) ((S) == NULL ? 0 : (S)->size) diff -r 0be296605165 -r faa88c50a3d2 Include/complexobject.h --- a/Include/complexobject.h Wed May 23 22:26:55 2012 +0200 +++ b/Include/complexobject.h Wed May 23 21:09:05 2012 +0200 @@ -63,10 +63,12 @@ /* Format the object based on the format_spec, as defined in PEP 3101 (Advanced String Formatting). */ #ifndef Py_LIMITED_API -PyAPI_FUNC(PyObject *) _PyComplex_FormatAdvanced(PyObject *obj, - PyObject *format_spec, - Py_ssize_t start, - Py_ssize_t end); +PyAPI_FUNC(int) _PyComplex_FormatAdvancedWriter( + PyObject *obj, + PyObject *format_spec, + Py_ssize_t start, + Py_ssize_t end, + _PyUnicodeWriter *writer); #endif #ifdef __cplusplus diff -r 0be296605165 -r faa88c50a3d2 Include/floatobject.h --- a/Include/floatobject.h Wed May 23 22:26:55 2012 +0200 +++ b/Include/floatobject.h Wed May 23 21:09:05 2012 +0200 @@ -112,10 +112,12 @@ /* Format the object based on the format_spec, as defined in PEP 3101 (Advanced String Formatting). */ -PyAPI_FUNC(PyObject *) _PyFloat_FormatAdvanced(PyObject *obj, - PyObject *format_spec, - Py_ssize_t start, - Py_ssize_t end); +PyAPI_FUNC(int) _PyFloat_FormatAdvancedWriter( + PyObject *obj, + PyObject *format_spec, + Py_ssize_t start, + Py_ssize_t end, + _PyUnicodeWriter *writer); #endif /* Py_LIMITED_API */ #ifdef __cplusplus diff -r 0be296605165 -r faa88c50a3d2 Include/longobject.h --- a/Include/longobject.h Wed May 23 22:26:55 2012 +0200 +++ b/Include/longobject.h Wed May 23 21:09:05 2012 +0200 @@ -153,12 +153,20 @@ appending a base prefix of 0[box] if base is 2, 8 or 16. */ PyAPI_FUNC(PyObject *) _PyLong_Format(PyObject *aa, int base); +PyAPI_FUNC(int) _PyLong_FormatWriter( + PyObject *aa, + int base, + int alternate, + _PyUnicodeWriter *writer); + /* Format the object based on the format_spec, as defined in PEP 3101 (Advanced String Formatting). */ -PyAPI_FUNC(PyObject *) _PyLong_FormatAdvanced(PyObject *obj, - PyObject *format_spec, - Py_ssize_t start, - Py_ssize_t end); +PyAPI_FUNC(int) _PyLong_FormatAdvancedWriter( + PyObject *obj, + PyObject *format_spec, + Py_ssize_t start, + Py_ssize_t end, + _PyUnicodeWriter *writer); #endif /* Py_LIMITED_API */ /* These aren't really part of the long object, but they're handy. The diff -r 0be296605165 -r faa88c50a3d2 Include/pyerrors.h --- a/Include/pyerrors.h Wed May 23 22:26:55 2012 +0200 +++ b/Include/pyerrors.h Wed May 23 21:09:05 2012 +0200 @@ -10,8 +10,7 @@ /* PyException_HEAD defines the initial segment of every exception class. */ #define PyException_HEAD PyObject_HEAD PyObject *dict;\ PyObject *args; PyObject *traceback;\ - PyObject *context; PyObject *cause;\ - int suppress_context; + PyObject *context; PyObject *cause; typedef struct { PyException_HEAD @@ -115,6 +114,7 @@ /* Cause manipulation (PEP 3134) */ PyAPI_FUNC(PyObject *) PyException_GetCause(PyObject *); PyAPI_FUNC(void) PyException_SetCause(PyObject *, PyObject *); +PyAPI_FUNC(int) _PyException_SetCauseChecked(PyObject *, PyObject *); /* Context manipulation (PEP 3134) */ PyAPI_FUNC(PyObject *) PyException_GetContext(PyObject *); diff -r 0be296605165 -r faa88c50a3d2 Include/structmember.h --- a/Include/structmember.h Wed May 23 22:26:55 2012 +0200 +++ b/Include/structmember.h Wed May 23 21:09:05 2012 +0200 @@ -16,41 +16,42 @@ pointer is NULL. */ typedef struct PyMemberDef { - char *name; - int type; - Py_ssize_t offset; - int flags; - char *doc; + /* Current version, use this */ + char *name; + int type; + Py_ssize_t offset; + int flags; + char *doc; } PyMemberDef; /* Types */ -#define T_SHORT 0 -#define T_INT 1 -#define T_LONG 2 -#define T_FLOAT 3 -#define T_DOUBLE 4 -#define T_STRING 5 -#define T_OBJECT 6 +#define T_SHORT 0 +#define T_INT 1 +#define T_LONG 2 +#define T_FLOAT 3 +#define T_DOUBLE 4 +#define T_STRING 5 +#define T_OBJECT 6 /* XXX the ordering here is weird for binary compatibility */ -#define T_CHAR 7 /* 1-character string */ -#define T_BYTE 8 /* 8-bit signed int */ +#define T_CHAR 7 /* 1-character string */ +#define T_BYTE 8 /* 8-bit signed int */ /* unsigned variants: */ -#define T_UBYTE 9 -#define T_USHORT 10 -#define T_UINT 11 -#define T_ULONG 12 +#define T_UBYTE 9 +#define T_USHORT 10 +#define T_UINT 11 +#define T_ULONG 12 /* Added by Jack: strings contained in the structure */ -#define T_STRING_INPLACE 13 +#define T_STRING_INPLACE 13 /* Added by Lillo: bools contained in the structure (assumed char) */ -#define T_BOOL 14 +#define T_BOOL 14 -#define T_OBJECT_EX 16 /* Like T_OBJECT, but raises AttributeError - when the value is NULL, instead of - converting to None. */ +#define T_OBJECT_EX 16 /* Like T_OBJECT, but raises AttributeError + when the value is NULL, instead of + converting to None. */ #ifdef HAVE_LONG_LONG -#define T_LONGLONG 17 +#define T_LONGLONG 17 #define T_ULONGLONG 18 #endif /* HAVE_LONG_LONG */ @@ -59,10 +60,10 @@ /* Flags */ -#define READONLY 1 -#define READ_RESTRICTED 2 +#define READONLY 1 +#define READ_RESTRICTED 2 #define PY_WRITE_RESTRICTED 4 -#define RESTRICTED (READ_RESTRICTED | PY_WRITE_RESTRICTED) +#define RESTRICTED (READ_RESTRICTED | PY_WRITE_RESTRICTED) /* Current API, use this */ diff -r 0be296605165 -r faa88c50a3d2 Include/unicodeobject.h --- a/Include/unicodeobject.h Wed May 23 22:26:55 2012 +0200 +++ b/Include/unicodeobject.h Wed May 23 21:09:05 2012 +0200 @@ -648,6 +648,17 @@ Py_ssize_t from_start, Py_ssize_t how_many ); + +/* Unsafe version of PyUnicode_CopyCharacters(): don't check + arguments and so may crash parameters are invalid (e.g. if the output + string is too short). */ +PyAPI_FUNC(void) _PyUnicode_FastCopyCharacters( + PyObject *to, + Py_ssize_t to_start, + PyObject *from, + Py_ssize_t from_start, + Py_ssize_t how_many + ); #endif /* Fill a string with a character: write fill_char into @@ -865,12 +876,68 @@ ); #ifndef Py_LIMITED_API +typedef struct { + PyObject *buffer; + void *data; + enum PyUnicode_Kind kind; + Py_UCS4 maxchar; + Py_ssize_t size; + Py_ssize_t pos; + /* minimum length of the buffer when overallocation is enabled, + see _PyUnicodeWriter_Init() */ + Py_ssize_t min_length; + struct { + unsigned char overallocate:1; + /* If readonly is 1, buffer is a shared string (cannot be modified) + and size is set to 0. */ + unsigned char readonly:1; + } flags; +} _PyUnicodeWriter ; + +/* Initialize a Unicode writer. + + min_length is used by _PyUnicodeWriter_Prepare() as the minimum length of + the buffer when overallocation is enabled (overallocate=1) */ +PyAPI_FUNC(void) +_PyUnicodeWriter_Init(_PyUnicodeWriter *writer, Py_ssize_t min_length); + +/* Prepare the buffer for to write 'length' characters + with the specified maximum character. + + Return 0 on success, raise an exception and return -1 on error. */ +#define _PyUnicodeWriter_Prepare(WRITER, LENGTH, MAXCHAR) \ + (((MAXCHAR) <= (WRITER)->maxchar \ + && (LENGTH) <= (WRITER)->size - (WRITER)->pos) \ + ? 0 \ + : (((LENGTH) == 0) \ + ? 0 \ + : _PyUnicodeWriter_PrepareInternal((WRITER), (LENGTH), (MAXCHAR)))) + +/* Don't call this function directly, use the _PyUnicodeWriter_Prepare() macro + instead. */ +PyAPI_FUNC(int) +_PyUnicodeWriter_PrepareInternal(_PyUnicodeWriter *writer, + Py_ssize_t length, Py_UCS4 maxchar); + +PyAPI_FUNC(int) +_PyUnicodeWriter_WriteStr(_PyUnicodeWriter *writer, PyObject *str); + +PyAPI_FUNC(PyObject *) +_PyUnicodeWriter_Finish(_PyUnicodeWriter *writer); + +PyAPI_FUNC(void) +_PyUnicodeWriter_Dealloc(_PyUnicodeWriter *writer); +#endif + +#ifndef Py_LIMITED_API /* Format the object based on the format_spec, as defined in PEP 3101 (Advanced String Formatting). */ -PyAPI_FUNC(PyObject *) _PyUnicode_FormatAdvanced(PyObject *obj, - PyObject *format_spec, - Py_ssize_t start, - Py_ssize_t end); +PyAPI_FUNC(int) _PyUnicode_FormatAdvancedWriter( + PyObject *obj, + PyObject *format_spec, + Py_ssize_t start, + Py_ssize_t end, + _PyUnicodeWriter *writer); #endif PyAPI_FUNC(void) PyUnicode_InternInPlace(PyObject **); diff -r 0be296605165 -r faa88c50a3d2 Lib/__future__.py --- a/Lib/__future__.py Wed May 23 22:26:55 2012 +0200 +++ b/Lib/__future__.py Wed May 23 21:09:05 2012 +0200 @@ -114,7 +114,7 @@ CO_FUTURE_DIVISION) absolute_import = _Feature((2, 5, 0, "alpha", 1), - (3, 0, 0, "alpha", 0), + (2, 7, 0, "alpha", 0), CO_FUTURE_ABSOLUTE_IMPORT) with_statement = _Feature((2, 5, 0, "alpha", 1), diff -r 0be296605165 -r faa88c50a3d2 Lib/_strptime.py --- a/Lib/_strptime.py Wed May 23 22:26:55 2012 +0200 +++ b/Lib/_strptime.py Wed May 23 21:09:05 2012 +0200 @@ -444,10 +444,8 @@ else: tz = value break - leap_year_fix = False if year is None and month == 2 and day == 29: year = 1904 # 1904 is first leap year of 20th century - leap_year_fix = True elif year is None: year = 1900 # If we know the week of the year and what day of that week, we can figure @@ -478,12 +476,6 @@ else: gmtoff = None - if leap_year_fix: - # the caller didn't supply a year but asked for Feb 29th. We couldn't - # use the default of 1900 for computations. We set it back to ensure - # that February 29th is smaller than March 1st. - year = 1900 - return (year, month, day, hour, minute, second, weekday, julian, tz, gmtoff, tzname), fraction diff -r 0be296605165 -r faa88c50a3d2 Lib/contextlib.py --- a/Lib/contextlib.py Wed May 23 22:26:55 2012 +0200 +++ b/Lib/contextlib.py Wed May 23 21:09:05 2012 +0200 @@ -1,10 +1,9 @@ """Utilities for with-statement contexts. See PEP 343.""" import sys -from collections import deque from functools import wraps -__all__ = ["contextmanager", "closing", "ContextDecorator", "ExitStack"] +__all__ = ["contextmanager", "closing", "ContextDecorator"] class ContextDecorator(object): @@ -13,12 +12,12 @@ def _recreate_cm(self): """Return a recreated instance of self. - Allows an otherwise one-shot context manager like + Allows otherwise one-shot context managers like _GeneratorContextManager to support use as - a decorator via implicit recreation. + decorators via implicit recreation. - This is a private interface just for _GeneratorContextManager. - See issue #11647 for details. + Note: this is a private interface just for _GCM in 3.2 but will be + renamed and documented for third party use in 3.3 """ return self @@ -139,118 +138,3 @@ return self.thing def __exit__(self, *exc_info): self.thing.close() - - -# Inspired by discussions on http://bugs.python.org/issue13585 -class ExitStack(object): - """Context manager for dynamic management of a stack of exit callbacks - - For example: - - with ExitStack() as stack: - files = [stack.enter_context(open(fname)) for fname in filenames] - # All opened files will automatically be closed at the end of - # the with statement, even if attempts to open files later - # in the list throw an exception - - """ - def __init__(self): - self._exit_callbacks = deque() - - def pop_all(self): - """Preserve the context stack by transferring it to a new instance""" - new_stack = type(self)() - new_stack._exit_callbacks = self._exit_callbacks - self._exit_callbacks = deque() - return new_stack - - def _push_cm_exit(self, cm, cm_exit): - """Helper to correctly register callbacks to __exit__ methods""" - def _exit_wrapper(*exc_details): - return cm_exit(cm, *exc_details) - _exit_wrapper.__self__ = cm - self.push(_exit_wrapper) - - def push(self, exit): - """Registers a callback with the standard __exit__ method signature - - Can suppress exceptions the same way __exit__ methods can. - - Also accepts any object with an __exit__ method (registering a call - to the method instead of the object itself) - """ - # We use an unbound method rather than a bound method to follow - # the standard lookup behaviour for special methods - _cb_type = type(exit) - try: - exit_method = _cb_type.__exit__ - except AttributeError: - # Not a context manager, so assume its a callable - self._exit_callbacks.append(exit) - else: - self._push_cm_exit(exit, exit_method) - return exit # Allow use as a decorator - - def callback(self, callback, *args, **kwds): - """Registers an arbitrary callback and arguments. - - Cannot suppress exceptions. - """ - def _exit_wrapper(exc_type, exc, tb): - callback(*args, **kwds) - # We changed the signature, so using @wraps is not appropriate, but - # setting __wrapped__ may still help with introspection - _exit_wrapper.__wrapped__ = callback - self.push(_exit_wrapper) - return callback # Allow use as a decorator - - def enter_context(self, cm): - """Enters the supplied context manager - - If successful, also pushes its __exit__ method as a callback and - returns the result of the __enter__ method. - """ - # We look up the special methods on the type to match the with statement - _cm_type = type(cm) - _exit = _cm_type.__exit__ - result = _cm_type.__enter__(cm) - self._push_cm_exit(cm, _exit) - return result - - def close(self): - """Immediately unwind the context stack""" - self.__exit__(None, None, None) - - def __enter__(self): - return self - - def __exit__(self, *exc_details): - if not self._exit_callbacks: - return - # This looks complicated, but it is really just - # setting up a chain of try-expect statements to ensure - # that outer callbacks still get invoked even if an - # inner one throws an exception - def _invoke_next_callback(exc_details): - # Callbacks are removed from the list in FIFO order - # but the recursion means they're invoked in LIFO order - cb = self._exit_callbacks.popleft() - if not self._exit_callbacks: - # Innermost callback is invoked directly - return cb(*exc_details) - # More callbacks left, so descend another level in the stack - try: - suppress_exc = _invoke_next_callback(exc_details) - except: - suppress_exc = cb(*sys.exc_info()) - # Check if this cb suppressed the inner exception - if not suppress_exc: - raise - else: - # Check if inner cb suppressed the original exception - if suppress_exc: - exc_details = (None, None, None) - suppress_exc = cb(*exc_details) or suppress_exc - return suppress_exc - # Kick off the recursive chain - return _invoke_next_callback(exc_details) diff -r 0be296605165 -r faa88c50a3d2 Lib/functools.py --- a/Lib/functools.py Wed May 23 22:26:55 2012 +0200 +++ b/Lib/functools.py Wed May 23 21:09:05 2012 +0200 @@ -166,7 +166,7 @@ def __hash__(self): return self.hashvalue -def lru_cache(maxsize=128, typed=False): +def lru_cache(maxsize=100, typed=False): """Least-recently-used cache decorator. If *maxsize* is set to None, the LRU features are disabled and the cache diff -r 0be296605165 -r faa88c50a3d2 Lib/http/client.py --- a/Lib/http/client.py Wed May 23 22:26:55 2012 +0200 +++ b/Lib/http/client.py Wed May 23 21:09:05 2012 +0200 @@ -141,9 +141,6 @@ LOCKED = 423 FAILED_DEPENDENCY = 424 UPGRADE_REQUIRED = 426 -PRECONDITION_REQUIRED = 428 -TOO_MANY_REQUESTS = 429 -REQUEST_HEADER_FIELDS_TOO_LARGE = 431 # server error INTERNAL_SERVER_ERROR = 500 @@ -154,7 +151,6 @@ HTTP_VERSION_NOT_SUPPORTED = 505 INSUFFICIENT_STORAGE = 507 NOT_EXTENDED = 510 -NETWORK_AUTHENTICATION_REQUIRED = 511 # Mapping status codes to official W3C names responses = { @@ -196,9 +192,6 @@ 415: 'Unsupported Media Type', 416: 'Requested Range Not Satisfiable', 417: 'Expectation Failed', - 428: 'Precondition Required', - 429: 'Too Many Requests', - 431: 'Request Header Fields Too Large', 500: 'Internal Server Error', 501: 'Not Implemented', @@ -206,7 +199,6 @@ 503: 'Service Unavailable', 504: 'Gateway Timeout', 505: 'HTTP Version Not Supported', - 511: 'Network Authentication Required', } # maximal amount of data to read at one time in _safe_read @@ -1076,7 +1068,7 @@ self.putrequest(method, url, **skips) - if body is not None and ('content-length' not in header_names): + if body and ('content-length' not in header_names): self._set_content_length(body) for hdr, value in headers.items(): self.putheader(hdr, value) diff -r 0be296605165 -r faa88c50a3d2 Lib/http/cookies.py --- a/Lib/http/cookies.py Wed May 23 22:26:55 2012 +0200 +++ b/Lib/http/cookies.py Wed May 23 21:09:05 2012 +0200 @@ -301,7 +301,7 @@ from time import gmtime, time now = time() year, month, day, hh, mm, ss, wd, y, z = gmtime(now + future) - return "%s, %02d %3s %4d %02d:%02d:%02d GMT" % \ + return "%s, %02d-%3s-%4d %02d:%02d:%02d GMT" % \ (weekdayname[wd], day, monthname[month], year, hh, mm, ss) @@ -439,7 +439,7 @@ (?P # Start of group 'val' "(?:[^\\"]|\\.)*" # Any doublequoted string | # or - \w{3},\s[\w\d\s-]{9,11}\s[\d:]{8}\sGMT # Special case for "expires" attr + \w{3},\s[\w\d-]{9,11}\s[\d:]{8}\sGMT # Special case for "expires" attr | # or """ + _LegalCharsPatt + r"""* # Any word or empty string ) # End of group 'val' diff -r 0be296605165 -r faa88c50a3d2 Lib/http/server.py --- a/Lib/http/server.py Wed May 23 22:26:55 2012 +0200 +++ b/Lib/http/server.py Wed May 23 21:09:05 2012 +0200 @@ -573,7 +573,7 @@ # Table mapping response codes to messages; entries have the # form {code: (shortmessage, longmessage)}. - # See RFC 2616 and 6585. + # See RFC 2616. responses = { 100: ('Continue', 'Request received, please continue'), 101: ('Switching Protocols', @@ -628,12 +628,6 @@ 'Cannot satisfy request range.'), 417: ('Expectation Failed', 'Expect condition could not be satisfied.'), - 428: ('Precondition Required', - 'The origin server requires the request to be conditional.'), - 429: ('Too Many Requests', 'The user has sent too many requests ' - 'in a given amount of time ("rate limiting").'), - 431: ('Request Header Fields Too Large', 'The server is unwilling to ' - 'process the request because its header fields are too large.'), 500: ('Internal Server Error', 'Server got itself in trouble'), 501: ('Not Implemented', @@ -644,8 +638,6 @@ 504: ('Gateway Timeout', 'The gateway server did not receive a timely response'), 505: ('HTTP Version Not Supported', 'Cannot fulfill request.'), - 511: ('Network Authentication Required', - 'The client needs to authenticate to gain network access.'), } diff -r 0be296605165 -r faa88c50a3d2 Lib/importlib/_bootstrap.py --- a/Lib/importlib/_bootstrap.py Wed May 23 22:26:55 2012 +0200 +++ b/Lib/importlib/_bootstrap.py Wed May 23 21:09:05 2012 +0200 @@ -159,145 +159,6 @@ return type(_io)(name) -# Module-level locking ######################################################## - -# A dict mapping module names to weakrefs of _ModuleLock instances -_module_locks = {} -# A dict mapping thread ids to _ModuleLock instances -_blocking_on = {} - - -class _DeadlockError(RuntimeError): - pass - - -class _ModuleLock: - """A recursive lock implementation which is able to detect deadlocks - (e.g. thread 1 trying to take locks A then B, and thread 2 trying to - take locks B then A). - """ - - def __init__(self, name): - self.lock = _thread.allocate_lock() - self.wakeup = _thread.allocate_lock() - self.name = name - self.owner = None - self.count = 0 - self.waiters = 0 - - def has_deadlock(self): - # Deadlock avoidance for concurrent circular imports. - me = _thread.get_ident() - tid = self.owner - while True: - lock = _blocking_on.get(tid) - if lock is None: - return False - tid = lock.owner - if tid == me: - return True - - def acquire(self): - """ - Acquire the module lock. If a potential deadlock is detected, - a _DeadlockError is raised. - Otherwise, the lock is always acquired and True is returned. - """ - tid = _thread.get_ident() - _blocking_on[tid] = self - try: - while True: - with self.lock: - if self.count == 0 or self.owner == tid: - self.owner = tid - self.count += 1 - return True - if self.has_deadlock(): - raise _DeadlockError("deadlock detected by %r" % self) - if self.wakeup.acquire(False): - self.waiters += 1 - # Wait for a release() call - self.wakeup.acquire() - self.wakeup.release() - finally: - del _blocking_on[tid] - - def release(self): - tid = _thread.get_ident() - with self.lock: - if self.owner != tid: - raise RuntimeError("cannot release un-acquired lock") - assert self.count > 0 - self.count -= 1 - if self.count == 0: - self.owner = None - if self.waiters: - self.waiters -= 1 - self.wakeup.release() - - def __repr__(self): - return "_ModuleLock(%r) at %d" % (self.name, id(self)) - - -class _DummyModuleLock: - """A simple _ModuleLock equivalent for Python builds without - multi-threading support.""" - - def __init__(self, name): - self.name = name - self.count = 0 - - def acquire(self): - self.count += 1 - return True - - def release(self): - if self.count == 0: - raise RuntimeError("cannot release un-acquired lock") - self.count -= 1 - - def __repr__(self): - return "_DummyModuleLock(%r) at %d" % (self.name, id(self)) - - -# The following two functions are for consumption by Python/import.c. - -def _get_module_lock(name): - """Get or create the module lock for a given module name. - - Should only be called with the import lock taken.""" - lock = None - if name in _module_locks: - lock = _module_locks[name]() - if lock is None: - if _thread is None: - lock = _DummyModuleLock(name) - else: - lock = _ModuleLock(name) - def cb(_): - del _module_locks[name] - _module_locks[name] = _weakref.ref(lock, cb) - return lock - -def _lock_unlock_module(name): - """Release the global import lock, and acquires then release the - module lock for a given module name. - This is used to ensure a module is completely initialized, in the - event it is being imported by another thread. - - Should only be called with the import lock taken.""" - lock = _get_module_lock(name) - _imp.release_lock() - try: - lock.acquire() - except _DeadlockError: - # Concurrent circular import, we'll accept a partially initialized - # module object. - pass - else: - lock.release() - - # Finder/loader utility code ################################################## _PYCACHE = '__pycache__' @@ -403,15 +264,12 @@ else: module.__package__ = fullname.rpartition('.')[0] try: - module.__initializing__ = True # If __package__ was not set above, __import__() will do it later. return fxn(self, module, *args, **kwargs) except: if not is_reload: del sys.modules[fullname] raise - finally: - module.__initializing__ = False _wrap(module_for_loader_wrapper, fxn) return module_for_loader_wrapper @@ -1074,8 +932,7 @@ if not sys.meta_path: _warnings.warn('sys.meta_path is empty', ImportWarning) for finder in sys.meta_path: - with _ImportLockContext(): - loader = finder.find_module(name, path) + loader = finder.find_module(name, path) if loader is not None: # The parent import may have already imported this module. if name not in sys.modules: @@ -1105,7 +962,8 @@ _ERR_MSG = 'No module named {!r}' -def _find_and_load_unlocked(name, import_): +def _find_and_load(name, import_): + """Find and load the module.""" path = None parent = name.rpartition('.')[0] if parent: @@ -1151,19 +1009,6 @@ return module -def _find_and_load(name, import_): - """Find and load the module, and release the import lock.""" - try: - lock = _get_module_lock(name) - finally: - _imp.release_lock() - lock.acquire() - try: - return _find_and_load_unlocked(name, import_) - finally: - lock.release() - - def _gcd_import(name, package=None, level=0): """Import and return the module based on its name, the package the call is being made from, and the level adjustment. @@ -1176,17 +1021,17 @@ _sanity_check(name, package, level) if level > 0: name = _resolve_name(name, package, level) - _imp.acquire_lock() - if name not in sys.modules: + with _ImportLockContext(): + try: + module = sys.modules[name] + if module is None: + message = ("import of {} halted; " + "None in sys.modules".format(name)) + raise ImportError(message, name=name) + return module + except KeyError: + pass # Don't want to chain the exception return _find_and_load(name, _gcd_import) - module = sys.modules[name] - if module is None: - _imp.release_lock() - message = ("import of {} halted; " - "None in sys.modules".format(name)) - raise ImportError(message, name=name) - _lock_unlock_module(name) - return module def _handle_fromlist(module, fromlist, import_): @@ -1304,17 +1149,7 @@ continue else: raise ImportError('importlib requires posix or nt') - - try: - thread_module = BuiltinImporter.load_module('_thread') - except ImportError: - # Python was built without threads - thread_module = None - weakref_module = BuiltinImporter.load_module('_weakref') - setattr(self_module, '_os', os_module) - setattr(self_module, '_thread', thread_module) - setattr(self_module, '_weakref', weakref_module) setattr(self_module, 'path_sep', path_sep) setattr(self_module, 'path_separators', set(path_separators)) # Constants diff -r 0be296605165 -r faa88c50a3d2 Lib/importlib/test/test_locks.py --- a/Lib/importlib/test/test_locks.py Wed May 23 22:26:55 2012 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,115 +0,0 @@ -from importlib import _bootstrap -import time -import unittest -import weakref - -from test import support - -try: - import threading -except ImportError: - threading = None -else: - from test import lock_tests - - -LockType = _bootstrap._ModuleLock -DeadlockError = _bootstrap._DeadlockError - - -if threading is not None: - class ModuleLockAsRLockTests(lock_tests.RLockTests): - locktype = staticmethod(lambda: LockType("some_lock")) - - # _is_owned() unsupported - test__is_owned = None - # acquire(blocking=False) unsupported - test_try_acquire = None - test_try_acquire_contended = None - # `with` unsupported - test_with = None - # acquire(timeout=...) unsupported - test_timeout = None - # _release_save() unsupported - test_release_save_unacquired = None - -else: - class ModuleLockAsRLockTests(unittest.TestCase): - pass - - -@unittest.skipUnless(threading, "threads needed for this test") -class DeadlockAvoidanceTests(unittest.TestCase): - - def run_deadlock_avoidance_test(self, create_deadlock): - NLOCKS = 10 - locks = [LockType(str(i)) for i in range(NLOCKS)] - pairs = [(locks[i], locks[(i+1)%NLOCKS]) for i in range(NLOCKS)] - if create_deadlock: - NTHREADS = NLOCKS - else: - NTHREADS = NLOCKS - 1 - barrier = threading.Barrier(NTHREADS) - results = [] - def _acquire(lock): - """Try to acquire the lock. Return True on success, False on deadlock.""" - try: - lock.acquire() - except DeadlockError: - return False - else: - return True - def f(): - a, b = pairs.pop() - ra = _acquire(a) - barrier.wait() - rb = _acquire(b) - results.append((ra, rb)) - if rb: - b.release() - if ra: - a.release() - lock_tests.Bunch(f, NTHREADS).wait_for_finished() - self.assertEqual(len(results), NTHREADS) - return results - - def test_deadlock(self): - results = self.run_deadlock_avoidance_test(True) - # One of the threads detected a potential deadlock on its second - # acquire() call. - self.assertEqual(results.count((True, False)), 1) - self.assertEqual(results.count((True, True)), len(results) - 1) - - def test_no_deadlock(self): - results = self.run_deadlock_avoidance_test(False) - self.assertEqual(results.count((True, False)), 0) - self.assertEqual(results.count((True, True)), len(results)) - - -class LifetimeTests(unittest.TestCase): - - def test_lock_lifetime(self): - name = "xyzzy" - self.assertNotIn(name, _bootstrap._module_locks) - lock = _bootstrap._get_module_lock(name) - self.assertIn(name, _bootstrap._module_locks) - wr = weakref.ref(lock) - del lock - support.gc_collect() - self.assertNotIn(name, _bootstrap._module_locks) - self.assertIs(wr(), None) - - def test_all_locks(self): - support.gc_collect() - self.assertEqual(0, len(_bootstrap._module_locks)) - - -@support.reap_threads -def test_main(): - support.run_unittest(ModuleLockAsRLockTests, - DeadlockAvoidanceTests, - LifetimeTests) - - -if __name__ == '__main__': - test_main() diff -r 0be296605165 -r faa88c50a3d2 Lib/ipaddress.py --- a/Lib/ipaddress.py Wed May 23 22:26:55 2012 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,2204 +0,0 @@ -#!/usr/bin/python3 -# -# Copyright 2007 Google Inc. -# Licensed to PSF under a Contributor Agreement. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. See the License for the specific language governing -# permissions and limitations under the License. - -"""A fast, lightweight IPv4/IPv6 manipulation library in Python. - -This library is used to create/poke/manipulate IPv4 and IPv6 addresses -and networks. - -""" - -__version__ = '1.0' - -import struct - -IPV4LENGTH = 32 -IPV6LENGTH = 128 - - -class AddressValueError(ValueError): - """A Value Error related to the address.""" - - -class NetmaskValueError(ValueError): - """A Value Error related to the netmask.""" - - -def ip_address(address, version=None): - """Take an IP string/int and return an object of the correct type. - - Args: - address: A string or integer, the IP address. Either IPv4 or - IPv6 addresses may be supplied; integers less than 2**32 will - be considered to be IPv4 by default. - version: An integer, 4 or 6. If set, don't try to automatically - determine what the IP address type is. Important for things - like ip_address(1), which could be IPv4, '192.0.2.1', or IPv6, - '2001:db8::1'. - - Returns: - An IPv4Address or IPv6Address object. - - Raises: - ValueError: if the *address* passed isn't either a v4 or a v6 - address, or if the version is not None, 4, or 6. - - """ - if version is not None: - if version == 4: - return IPv4Address(address) - elif version == 6: - return IPv6Address(address) - else: - raise ValueError() - - try: - return IPv4Address(address) - except (AddressValueError, NetmaskValueError): - pass - - try: - return IPv6Address(address) - except (AddressValueError, NetmaskValueError): - pass - - raise ValueError('%r does not appear to be an IPv4 or IPv6 address' % - address) - - -def ip_network(address, version=None, strict=True): - """Take an IP string/int and return an object of the correct type. - - Args: - address: A string or integer, the IP network. Either IPv4 or - IPv6 networks may be supplied; integers less than 2**32 will - be considered to be IPv4 by default. - version: An integer, 4 or 6. If set, don't try to automatically - determine what the IP address type is. Important for things - like ip_network(1), which could be IPv4, '192.0.2.1/32', or IPv6, - '2001:db8::1/128'. - - Returns: - An IPv4Network or IPv6Network object. - - Raises: - ValueError: if the string passed isn't either a v4 or a v6 - address. Or if the network has host bits set. Or if the version - is not None, 4, or 6. - - """ - if version is not None: - if version == 4: - return IPv4Network(address, strict) - elif version == 6: - return IPv6Network(address, strict) - else: - raise ValueError() - - try: - return IPv4Network(address, strict) - except (AddressValueError, NetmaskValueError): - pass - - try: - return IPv6Network(address, strict) - except (AddressValueError, NetmaskValueError): - pass - - raise ValueError('%r does not appear to be an IPv4 or IPv6 network' % - address) - - -def ip_interface(address, version=None): - """Take an IP string/int and return an object of the correct type. - - Args: - address: A string or integer, the IP address. Either IPv4 or - IPv6 addresses may be supplied; integers less than 2**32 will - be considered to be IPv4 by default. - version: An integer, 4 or 6. If set, don't try to automatically - determine what the IP address type is. Important for things - like ip_interface(1), which could be IPv4, '192.0.2.1/32', or IPv6, - '2001:db8::1/128'. - - Returns: - An IPv4Interface or IPv6Interface object. - - Raises: - ValueError: if the string passed isn't either a v4 or a v6 - address. Or if the version is not None, 4, or 6. - - Notes: - The IPv?Interface classes describe an Address on a particular - Network, so they're basically a combination of both the Address - and Network classes. - """ - if version is not None: - if version == 4: - return IPv4Interface(address) - elif version == 6: - return IPv6Interface(address) - else: - raise ValueError() - - try: - return IPv4Interface(address) - except (AddressValueError, NetmaskValueError): - pass - - try: - return IPv6Interface(address) - except (AddressValueError, NetmaskValueError): - pass - - raise ValueError('%r does not appear to be an IPv4 or IPv6 network' % - address) - - -def v4_int_to_packed(address): - """Represent an address as 4 packed bytes in network (big-endian) order. - - Args: - address: An integer representation of an IPv4 IP address. - - Returns: - The integer address packed as 4 bytes in network (big-endian) order. - - Raises: - ValueError: If the integer is negative or too large to be an - IPv4 IP address. - """ - try: - return struct.pack('!I', address) - except: - raise ValueError("Address negative or too large for IPv4") - - -def v6_int_to_packed(address): - """Represent an address as 16 packed bytes in network (big-endian) order. - - Args: - address: An integer representation of an IPv4 IP address. - - Returns: - The integer address packed as 16 bytes in network (big-endian) order. - """ - try: - return struct.pack('!QQ', address >> 64, address & (2**64 - 1)) - except: - raise ValueError("Address negative or too large for IPv6") - - -def _find_address_range(addresses): - """Find a sequence of IPv#Address. - - Args: - addresses: a list of IPv#Address objects. - - Returns: - A tuple containing the first and last IP addresses in the sequence. - - """ - first = last = addresses[0] - for ip in addresses[1:]: - if ip._ip == last._ip + 1: - last = ip - else: - break - return (first, last) - -def _get_prefix_length(number1, number2, bits): - """Get the number of leading bits that are same for two numbers. - - Args: - number1: an integer. - number2: another integer. - bits: the maximum number of bits to compare. - - Returns: - The number of leading bits that are the same for two numbers. - - """ - for i in range(bits): - if number1 >> i == number2 >> i: - return bits - i - return 0 - -def _count_righthand_zero_bits(number, bits): - """Count the number of zero bits on the right hand side. - - Args: - number: an integer. - bits: maximum number of bits to count. - - Returns: - The number of zero bits on the right hand side of the number. - - """ - if number == 0: - return bits - for i in range(bits): - if (number >> i) % 2: - return i - - -def summarize_address_range(first, last): - """Summarize a network range given the first and last IP addresses. - - Example: - >>> summarize_address_range(IPv4Address('192.0.2.0'), - IPv4Address('192.0.2.130')) - [IPv4Network('192.0.2.0/25'), IPv4Network('192.0.2.128/31'), - IPv4Network('192.0.2.130/32')] - - Args: - first: the first IPv4Address or IPv6Address in the range. - last: the last IPv4Address or IPv6Address in the range. - - Returns: - An iterator of the summarized IPv(4|6) network objects. - - Raise: - TypeError: - If the first and last objects are not IP addresses. - If the first and last objects are not the same version. - ValueError: - If the last object is not greater than the first. - If the version is not 4 or 6. - - """ - if not (isinstance(first, _BaseAddress) and isinstance(last, _BaseAddress)): - raise TypeError('first and last must be IP addresses, not networks') - if first.version != last.version: - raise TypeError("%s and %s are not of the same version" % ( - str(first), str(last))) - if first > last: - raise ValueError('last IP address must be greater than first') - - networks = [] - - if first.version == 4: - ip = IPv4Network - elif first.version == 6: - ip = IPv6Network - else: - raise ValueError('unknown IP version') - - ip_bits = first._max_prefixlen - first_int = first._ip - last_int = last._ip - while first_int <= last_int: - nbits = _count_righthand_zero_bits(first_int, ip_bits) - current = None - while nbits >= 0: - addend = 2**nbits - 1 - current = first_int + addend - nbits -= 1 - if current <= last_int: - break - prefix = _get_prefix_length(first_int, current, ip_bits) - net = ip('%s/%d' % (str(first), prefix)) - yield net - #networks.append(net) - if current == ip._ALL_ONES: - break - first_int = current + 1 - first = ip_address(first_int, version=first._version) - -def _collapse_addresses_recursive(addresses): - """Loops through the addresses, collapsing concurrent netblocks. - - Example: - - ip1 = IPv4Network('192.0.2.0/26') - ip2 = IPv4Network('192.0.2.64/26') - ip3 = IPv4Network('192.0.2.128/26') - ip4 = IPv4Network('192.0.2.192/26') - - _collapse_addresses_recursive([ip1, ip2, ip3, ip4]) -> - [IPv4Network('192.0.2.0/24')] - - This shouldn't be called directly; it is called via - collapse_addresses([]). - - Args: - addresses: A list of IPv4Network's or IPv6Network's - - Returns: - A list of IPv4Network's or IPv6Network's depending on what we were - passed. - - """ - ret_array = [] - optimized = False - - for cur_addr in addresses: - if not ret_array: - ret_array.append(cur_addr) - continue - if (cur_addr.network_address >= ret_array[-1].network_address and - cur_addr.broadcast_address <= ret_array[-1].broadcast_address): - optimized = True - elif cur_addr == list(ret_array[-1].supernet().subnets())[1]: - ret_array.append(ret_array.pop().supernet()) - optimized = True - else: - ret_array.append(cur_addr) - - if optimized: - return _collapse_addresses_recursive(ret_array) - - return ret_array - - -def collapse_addresses(addresses): - """Collapse a list of IP objects. - - Example: - collapse_addresses([IPv4Network('192.0.2.0/25'), - IPv4Network('192.0.2.128/25')]) -> - [IPv4Network('192.0.2.0/24')] - - Args: - addresses: An iterator of IPv4Network or IPv6Network objects. - - Returns: - An iterator of the collapsed IPv(4|6)Network objects. - - Raises: - TypeError: If passed a list of mixed version objects. - - """ - i = 0 - addrs = [] - ips = [] - nets = [] - - # split IP addresses and networks - for ip in addresses: - if isinstance(ip, _BaseAddress): - if ips and ips[-1]._version != ip._version: - raise TypeError("%s and %s are not of the same version" % ( - str(ip), str(ips[-1]))) - ips.append(ip) - elif ip._prefixlen == ip._max_prefixlen: - if ips and ips[-1]._version != ip._version: - raise TypeError("%s and %s are not of the same version" % ( - str(ip), str(ips[-1]))) - try: - ips.append(ip.ip) - except AttributeError: - ips.append(ip.network_address) - else: - if nets and nets[-1]._version != ip._version: - raise TypeError("%s and %s are not of the same version" % ( - str(ip), str(ips[-1]))) - nets.append(ip) - - # sort and dedup - ips = sorted(set(ips)) - nets = sorted(set(nets)) - - while i < len(ips): - (first, last) = _find_address_range(ips[i:]) - i = ips.index(last) + 1 - addrs.extend(summarize_address_range(first, last)) - - return iter(_collapse_addresses_recursive(sorted( - addrs + nets, key=_BaseNetwork._get_networks_key))) - - -def get_mixed_type_key(obj): - """Return a key suitable for sorting between networks and addresses. - - Address and Network objects are not sortable by default; they're - fundamentally different so the expression - - IPv4Address('192.0.2.0') <= IPv4Network('192.0.2.0/24') - - doesn't make any sense. There are some times however, where you may wish - to have ipaddress sort these for you anyway. If you need to do this, you - can use this function as the key= argument to sorted(). - - Args: - obj: either a Network or Address object. - Returns: - appropriate key. - - """ - if isinstance(obj, _BaseNetwork): - return obj._get_networks_key() - elif isinstance(obj, _BaseAddress): - return obj._get_address_key() - return NotImplemented - - -class _IPAddressBase(object): - - """The mother class.""" - - @property - def exploded(self): - """Return the longhand version of the IP address as a string.""" - return self._explode_shorthand_ip_string() - - @property - def compressed(self): - """Return the shorthand version of the IP address as a string.""" - return str(self) - - def _ip_int_from_prefix(self, prefixlen=None): - """Turn the prefix length netmask into a int for comparison. - - Args: - prefixlen: An integer, the prefix length. - - Returns: - An integer. - - """ - if not prefixlen and prefixlen != 0: - prefixlen = self._prefixlen - return self._ALL_ONES ^ (self._ALL_ONES >> prefixlen) - - def _prefix_from_ip_int(self, ip_int, mask=32): - """Return prefix length from the decimal netmask. - - Args: - ip_int: An integer, the IP address. - mask: The netmask. Defaults to 32. - - Returns: - An integer, the prefix length. - - """ - while mask: - if ip_int & 1 == 1: - break - ip_int >>= 1 - mask -= 1 - - return mask - - def _ip_string_from_prefix(self, prefixlen=None): - """Turn a prefix length into a dotted decimal string. - - Args: - prefixlen: An integer, the netmask prefix length. - - Returns: - A string, the dotted decimal netmask string. - - """ - if not prefixlen: - prefixlen = self._prefixlen - return self._string_from_ip_int(self._ip_int_from_prefix(prefixlen)) - - -class _BaseAddress(_IPAddressBase): - - """A generic IP object. - - This IP class contains the version independent methods which are - used by single IP addresses. - - """ - - def __init__(self, address): - if (not isinstance(address, bytes) - and '/' in str(address)): - raise AddressValueError(address) - - def __index__(self): - return self._ip - - def __int__(self): - return self._ip - - def __hex__(self): - return hex(self._ip) - - def __eq__(self, other): - try: - return (self._ip == other._ip - and self._version == other._version) - except AttributeError: - return NotImplemented - - def __ne__(self, other): - eq = self.__eq__(other) - if eq is NotImplemented: - return NotImplemented - return not eq - - def __le__(self, other): - gt = self.__gt__(other) - if gt is NotImplemented: - return NotImplemented - return not gt - - def __ge__(self, other): - lt = self.__lt__(other) - if lt is NotImplemented: - return NotImplemented - return not lt - - def __lt__(self, other): - if self._version != other._version: - raise TypeError('%s and %s are not of the same version' % ( - str(self), str(other))) - if not isinstance(other, _BaseAddress): - raise TypeError('%s and %s are not of the same type' % ( - str(self), str(other))) - if self._ip != other._ip: - return self._ip < other._ip - return False - - def __gt__(self, other): - if self._version != other._version: - raise TypeError('%s and %s are not of the same version' % ( - str(self), str(other))) - if not isinstance(other, _BaseAddress): - raise TypeError('%s and %s are not of the same type' % ( - str(self), str(other))) - if self._ip != other._ip: - return self._ip > other._ip - return False - - # Shorthand for Integer addition and subtraction. This is not - # meant to ever support addition/subtraction of addresses. - def __add__(self, other): - if not isinstance(other, int): - return NotImplemented - return ip_address(int(self) + other, version=self._version) - - def __sub__(self, other): - if not isinstance(other, int): - return NotImplemented - return ip_address(int(self) - other, version=self._version) - - def __repr__(self): - return '%s(%r)' % (self.__class__.__name__, str(self)) - - def __str__(self): - return '%s' % self._string_from_ip_int(self._ip) - - def __hash__(self): - return hash(hex(int(self._ip))) - - def _get_address_key(self): - return (self._version, self) - - @property - def version(self): - raise NotImplementedError('BaseIP has no version') - - -class _BaseNetwork(_IPAddressBase): - - """A generic IP object. - - This IP class contains the version independent methods which are - used by networks. - - """ - - def __init__(self, address): - self._cache = {} - - def __index__(self): - return int(self.network_address) ^ self.prefixlen - - def __int__(self): - return int(self.network_address) - - def __repr__(self): - return '%s(%r)' % (self.__class__.__name__, str(self)) - - def hosts(self): - """Generate Iterator over usable hosts in a network. - - This is like __iter__ except it doesn't return the network - or broadcast addresses. - - """ - cur = int(self.network_address) + 1 - bcast = int(self.broadcast_address) - 1 - while cur <= bcast: - cur += 1 - yield ip_address(cur - 1, version=self._version) - - def __iter__(self): - cur = int(self.network_address) - bcast = int(self.broadcast_address) - while cur <= bcast: - cur += 1 - yield ip_address(cur - 1, version=self._version) - - def __getitem__(self, n): - network = int(self.network_address) - broadcast = int(self.broadcast_address) - if n >= 0: - if network + n > broadcast: - raise IndexError - return ip_address(network + n, version=self._version) - else: - n += 1 - if broadcast + n < network: - raise IndexError - return ip_address(broadcast + n, version=self._version) - - def __lt__(self, other): - if self._version != other._version: - raise TypeError('%s and %s are not of the same version' % ( - str(self), str(other))) - if not isinstance(other, _BaseNetwork): - raise TypeError('%s and %s are not of the same type' % ( - str(self), str(other))) - if self.network_address != other.network_address: - return self.network_address < other.network_address - if self.netmask != other.netmask: - return self.netmask < other.netmask - return False - - def __gt__(self, other): - if self._version != other._version: - raise TypeError('%s and %s are not of the same version' % ( - str(self), str(other))) - if not isinstance(other, _BaseNetwork): - raise TypeError('%s and %s are not of the same type' % ( - str(self), str(other))) - if self.network_address != other.network_address: - return self.network_address > other.network_address - if self.netmask != other.netmask: - return self.netmask > other.netmask - return False - - def __le__(self, other): - gt = self.__gt__(other) - if gt is NotImplemented: - return NotImplemented - return not gt - - def __ge__(self, other): - lt = self.__lt__(other) - if lt is NotImplemented: - return NotImplemented - return not lt - - def __eq__(self, other): - if not isinstance(other, _BaseNetwork): - raise TypeError('%s and %s are not of the same type' % ( - str(self), str(other))) - return (self._version == other._version and - self.network_address == other.network_address and - int(self.netmask) == int(other.netmask)) - - def __ne__(self, other): - eq = self.__eq__(other) - if eq is NotImplemented: - return NotImplemented - return not eq - - def __str__(self): - return '%s/%s' % (str(self.ip), - str(self._prefixlen)) - - def __hash__(self): - return hash(int(self.network_address) ^ int(self.netmask)) - - def __contains__(self, other): - # always false if one is v4 and the other is v6. - if self._version != other._version: - return False - # dealing with another network. - if isinstance(other, _BaseNetwork): - return False - # dealing with another address - else: - # address - return (int(self.network_address) <= int(other._ip) <= - int(self.broadcast_address)) - - def overlaps(self, other): - """Tell if self is partly contained in other.""" - return self.network_address in other or ( - self.broadcast_address in other or ( - other.network_address in self or ( - other.broadcast_address in self))) - - @property - def broadcast_address(self): - x = self._cache.get('broadcast_address') - if x is None: - x = ip_address(int(self.network_address) | int(self.hostmask), - version=self._version) - self._cache['broadcast_address'] = x - return x - - @property - def hostmask(self): - x = self._cache.get('hostmask') - if x is None: - x = ip_address(int(self.netmask) ^ self._ALL_ONES, - version=self._version) - self._cache['hostmask'] = x - return x - - @property - def network(self): - return ip_network('%s/%d' % (str(self.network_address), - self.prefixlen)) - - @property - def with_prefixlen(self): - return '%s/%d' % (str(self.ip), self._prefixlen) - - @property - def with_netmask(self): - return '%s/%s' % (str(self.ip), str(self.netmask)) - - @property - def with_hostmask(self): - return '%s/%s' % (str(self.ip), str(self.hostmask)) - - @property - def num_addresses(self): - """Number of hosts in the current subnet.""" - return int(self.broadcast_address) - int(self.network_address) + 1 - - @property - def version(self): - raise NotImplementedError('BaseNet has no version') - - @property - def prefixlen(self): - return self._prefixlen - - def address_exclude(self, other): - """Remove an address from a larger block. - - For example: - - addr1 = ip_network('192.0.2.0/28') - addr2 = ip_network('192.0.2.1/32') - addr1.address_exclude(addr2) = - [IPv4Network('192.0.2.0/32'), IPv4Network('192.0.2.2/31'), - IPv4Network('192.0.2.4/30'), IPv4Network('192.0.2.8/29')] - - or IPv6: - - addr1 = ip_network('2001:db8::1/32') - addr2 = ip_network('2001:db8::1/128') - addr1.address_exclude(addr2) = - [ip_network('2001:db8::1/128'), - ip_network('2001:db8::2/127'), - ip_network('2001:db8::4/126'), - ip_network('2001:db8::8/125'), - ... - ip_network('2001:db8:8000::/33')] - - Args: - other: An IPv4Network or IPv6Network object of the same type. - - Returns: - An iterator of the the IPv(4|6)Network objects which is self - minus other. - - Raises: - TypeError: If self and other are of difffering address - versions, or if other is not a network object. - ValueError: If other is not completely contained by self. - - """ - if not self._version == other._version: - raise TypeError("%s and %s are not of the same version" % ( - str(self), str(other))) - - if not isinstance(other, _BaseNetwork): - raise TypeError("%s is not a network object" % str(other)) - - if not (other.network_address >= self.network_address and - other.broadcast_address <= self.broadcast_address): - raise ValueError('%s not contained in %s' % (str(other), str(self))) - - if other == self: - raise StopIteration - - ret_addrs = [] - - # Make sure we're comparing the network of other. - other = ip_network('%s/%s' % (str(other.network_address), - str(other.prefixlen)), - version=other._version) - - s1, s2 = self.subnets() - while s1 != other and s2 != other: - if (other.network_address >= s1.network_address and - other.broadcast_address <= s1.broadcast_address): - yield s2 - s1, s2 = s1.subnets() - elif (other.network_address >= s2.network_address and - other.broadcast_address <= s2.broadcast_address): - yield s1 - s1, s2 = s2.subnets() - else: - # If we got here, there's a bug somewhere. - raise AssertionError('Error performing exclusion: ' - 's1: %s s2: %s other: %s' % - (str(s1), str(s2), str(other))) - if s1 == other: - yield s2 - elif s2 == other: - yield s1 - else: - # If we got here, there's a bug somewhere. - raise AssertionError('Error performing exclusion: ' - 's1: %s s2: %s other: %s' % - (str(s1), str(s2), str(other))) - - def compare_networks(self, other): - """Compare two IP objects. - - This is only concerned about the comparison of the integer - representation of the network addresses. This means that the - host bits aren't considered at all in this method. If you want - to compare host bits, you can easily enough do a - 'HostA._ip < HostB._ip' - - Args: - other: An IP object. - - Returns: - If the IP versions of self and other are the same, returns: - - -1 if self < other: - eg: IPv4Network('192.0.2.0/25') < IPv4Network('192.0.2.128/25') - IPv6Network('2001:db8::1000/124') < - IPv6Network('2001:db8::2000/124') - 0 if self == other - eg: IPv4Network('192.0.2.0/24') == IPv4Network('192.0.2.0/24') - IPv6Network('2001:db8::1000/124') == - IPv6Network('2001:db8::1000/124') - 1 if self > other - eg: IPv4Network('192.0.2.128/25') > IPv4Network('192.0.2.0/25') - IPv6Network('2001:db8::2000/124') > - IPv6Network('2001:db8::1000/124') - - Raises: - TypeError if the IP versions are different. - - """ - # does this need to raise a ValueError? - if self._version != other._version: - raise TypeError('%s and %s are not of the same type' % ( - str(self), str(other))) - # self._version == other._version below here: - if self.network_address < other.network_address: - return -1 - if self.network_address > other.network_address: - return 1 - # self.network_address == other.network_address below here: - if self.netmask < other.netmask: - return -1 - if self.netmask > other.netmask: - return 1 - return 0 - - def _get_networks_key(self): - """Network-only key function. - - Returns an object that identifies this address' network and - netmask. This function is a suitable "key" argument for sorted() - and list.sort(). - - """ - return (self._version, self.network_address, self.netmask) - - def subnets(self, prefixlen_diff=1, new_prefix=None): - """The subnets which join to make the current subnet. - - In the case that self contains only one IP - (self._prefixlen == 32 for IPv4 or self._prefixlen == 128 - for IPv6), yield an iterator with just ourself. - - Args: - prefixlen_diff: An integer, the amount the prefix length - should be increased by. This should not be set if - new_prefix is also set. - new_prefix: The desired new prefix length. This must be a - larger number (smaller prefix) than the existing prefix. - This should not be set if prefixlen_diff is also set. - - Returns: - An iterator of IPv(4|6) objects. - - Raises: - ValueError: The prefixlen_diff is too small or too large. - OR - prefixlen_diff and new_prefix are both set or new_prefix - is a smaller number than the current prefix (smaller - number means a larger network) - - """ - if self._prefixlen == self._max_prefixlen: - yield self - return - - if new_prefix is not None: - if new_prefix < self._prefixlen: - raise ValueError('new prefix must be longer') - if prefixlen_diff != 1: - raise ValueError('cannot set prefixlen_diff and new_prefix') - prefixlen_diff = new_prefix - self._prefixlen - - if prefixlen_diff < 0: - raise ValueError('prefix length diff must be > 0') - new_prefixlen = self._prefixlen + prefixlen_diff - - if not self._is_valid_netmask(str(new_prefixlen)): - raise ValueError( - 'prefix length diff %d is invalid for netblock %s' % ( - new_prefixlen, str(self))) - - first = ip_network('%s/%s' % (str(self.network_address), - str(self._prefixlen + prefixlen_diff)), - version=self._version) - - yield first - current = first - while True: - broadcast = current.broadcast_address - if broadcast == self.broadcast_address: - return - new_addr = ip_address(int(broadcast) + 1, version=self._version) - current = ip_network('%s/%s' % (str(new_addr), str(new_prefixlen)), - version=self._version) - - yield current - - def masked(self): - """Return the network object with the host bits masked out.""" - return ip_network('%s/%d' % (self.network_address, self._prefixlen), - version=self._version) - - def supernet(self, prefixlen_diff=1, new_prefix=None): - """The supernet containing the current network. - - Args: - prefixlen_diff: An integer, the amount the prefix length of - the network should be decreased by. For example, given a - /24 network and a prefixlen_diff of 3, a supernet with a - /21 netmask is returned. - - Returns: - An IPv4 network object. - - Raises: - ValueError: If self.prefixlen - prefixlen_diff < 0. I.e., you have a - negative prefix length. - OR - If prefixlen_diff and new_prefix are both set or new_prefix is a - larger number than the current prefix (larger number means a - smaller network) - - """ - if self._prefixlen == 0: - return self - - if new_prefix is not None: - if new_prefix > self._prefixlen: - raise ValueError('new prefix must be shorter') - if prefixlen_diff != 1: - raise ValueError('cannot set prefixlen_diff and new_prefix') - prefixlen_diff = self._prefixlen - new_prefix - - - if self.prefixlen - prefixlen_diff < 0: - raise ValueError( - 'current prefixlen is %d, cannot have a prefixlen_diff of %d' % - (self.prefixlen, prefixlen_diff)) - # TODO (pmoody): optimize this. - t = ip_network('%s/%d' % (str(self.network_address), - self.prefixlen - prefixlen_diff), - version=self._version, strict=False) - return ip_network('%s/%d' % (str(t.network_address), t.prefixlen), - version=t._version) - - -class _BaseV4(object): - - """Base IPv4 object. - - The following methods are used by IPv4 objects in both single IP - addresses and networks. - - """ - - # Equivalent to 255.255.255.255 or 32 bits of 1's. - _ALL_ONES = (2**IPV4LENGTH) - 1 - _DECIMAL_DIGITS = frozenset('0123456789') - - def __init__(self, address): - self._version = 4 - self._max_prefixlen = IPV4LENGTH - - def _explode_shorthand_ip_string(self): - return str(self) - - def _ip_int_from_string(self, ip_str): - """Turn the given IP string into an integer for comparison. - - Args: - ip_str: A string, the IP ip_str. - - Returns: - The IP ip_str as an integer. - - Raises: - AddressValueError: if ip_str isn't a valid IPv4 Address. - - """ - octets = ip_str.split('.') - if len(octets) != 4: - raise AddressValueError(ip_str) - - packed_ip = 0 - for oc in octets: - try: - packed_ip = (packed_ip << 8) | self._parse_octet(oc) - except ValueError: - raise AddressValueError(ip_str) - return packed_ip - - def _parse_octet(self, octet_str): - """Convert a decimal octet into an integer. - - Args: - octet_str: A string, the number to parse. - - Returns: - The octet as an integer. - - Raises: - ValueError: if the octet isn't strictly a decimal from [0..255]. - - """ - # Whitelist the characters, since int() allows a lot of bizarre stuff. - if not self._DECIMAL_DIGITS.issuperset(octet_str): - raise ValueError - octet_int = int(octet_str, 10) - # Disallow leading zeroes, because no clear standard exists on - # whether these should be interpreted as decimal or octal. - if octet_int > 255 or (octet_str[0] == '0' and len(octet_str) > 1): - raise ValueError - return octet_int - - def _string_from_ip_int(self, ip_int): - """Turns a 32-bit integer into dotted decimal notation. - - Args: - ip_int: An integer, the IP address. - - Returns: - The IP address as a string in dotted decimal notation. - - """ - octets = [] - for _ in range(4): - octets.insert(0, str(ip_int & 0xFF)) - ip_int >>= 8 - return '.'.join(octets) - - @property - def max_prefixlen(self): - return self._max_prefixlen - - @property - def version(self): - return self._version - - @property - def is_reserved(self): - """Test if the address is otherwise IETF reserved. - - Returns: - A boolean, True if the address is within the - reserved IPv4 Network range. - - """ - reserved_network = IPv4Network('240.0.0.0/4') - if isinstance(self, _BaseAddress): - return self in reserved_network - return (self.network_address in reserved_network and - self.broadcast_address in reserved_network) - - @property - def is_private(self): - """Test if this address is allocated for private networks. - - Returns: - A boolean, True if the address is reserved per RFC 1918. - - """ - private_10 = IPv4Network('10.0.0.0/8') - private_172 = IPv4Network('172.16.0.0/12') - private_192 = IPv4Network('192.168.0.0/16') - if isinstance(self, _BaseAddress): - return (self in private_10 or self in private_172 or - self in private_192) - else: - return ((self.network_address in private_10 and - self.broadcast_address in private_10) or - (self.network_address in private_172 and - self.broadcast_address in private_172) or - (self.network_address in private_192 and - self.broadcast_address in private_192)) - - @property - def is_multicast(self): - """Test if the address is reserved for multicast use. - - Returns: - A boolean, True if the address is multicast. - See RFC 3171 for details. - - """ - multicast_network = IPv4Network('224.0.0.0/4') - if isinstance(self, _BaseAddress): - return self in IPv4Network('224.0.0.0/4') - return (self.network_address in multicast_network and - self.broadcast_address in multicast_network) - - @property - def is_unspecified(self): - """Test if the address is unspecified. - - Returns: - A boolean, True if this is the unspecified address as defined in - RFC 5735 3. - - """ - unspecified_address = IPv4Address('0.0.0.0') - if isinstance(self, _BaseAddress): - return self in unspecified_address - return (self.network_address == self.broadcast_address == - unspecified_address) - - @property - def is_loopback(self): - """Test if the address is a loopback address. - - Returns: - A boolean, True if the address is a loopback per RFC 3330. - - """ - loopback_address = IPv4Network('127.0.0.0/8') - if isinstance(self, _BaseAddress): - return self in loopback_address - - return (self.network_address in loopback_address and - self.broadcast_address in loopback_address) - - @property - def is_link_local(self): - """Test if the address is reserved for link-local. - - Returns: - A boolean, True if the address is link-local per RFC 3927. - - """ - linklocal_network = IPv4Network('169.254.0.0/16') - if isinstance(self, _BaseAddress): - return self in linklocal_network - return (self.network_address in linklocal_network and - self.broadcast_address in linklocal_network) - - -class IPv4Address(_BaseV4, _BaseAddress): - - """Represent and manipulate single IPv4 Addresses.""" - - def __init__(self, address): - - """ - Args: - address: A string or integer representing the IP - - Additionally, an integer can be passed, so - IPv4Address('192.0.2.1') == IPv4Address(3221225985). - or, more generally - IPv4Address(int(IPv4Address('192.0.2.1'))) == - IPv4Address('192.0.2.1') - - Raises: - AddressValueError: If ipaddressisn't a valid IPv4 address. - - """ - _BaseAddress.__init__(self, address) - _BaseV4.__init__(self, address) - - # Efficient constructor from integer. - if isinstance(address, int): - self._ip = address - if address < 0 or address > self._ALL_ONES: - raise AddressValueError(address) - return - - # Constructing from a packed address - if isinstance(address, bytes) and len(address) == 4: - self._ip = struct.unpack('!I', address)[0] - return - - # Assume input argument to be string or any object representation - # which converts into a formatted IP string. - addr_str = str(address) - self._ip = self._ip_int_from_string(addr_str) - - @property - def packed(self): - """The binary representation of this address.""" - return v4_int_to_packed(self._ip) - - -class IPv4Interface(IPv4Address): - - # the valid octets for host and netmasks. only useful for IPv4. - _valid_mask_octets = set((255, 254, 252, 248, 240, 224, 192, 128, 0)) - - def __init__(self, address): - if isinstance(address, (bytes, int)): - IPv4Address.__init__(self, address) - self.network = IPv4Network(self._ip) - self._prefixlen = self._max_prefixlen - return - - addr = str(address).split('/') - if len(addr) > 2: - raise AddressValueError(address) - IPv4Address.__init__(self, addr[0]) - - self.network = IPv4Network(address, strict=False) - self._prefixlen = self.network._prefixlen - - self.netmask = self.network.netmask - self.hostmask = self.network.hostmask - - - def __str__(self): - return '%s/%d' % (self._string_from_ip_int(self._ip), - self.network.prefixlen) - - def __eq__(self, other): - try: - return (IPv4Address.__eq__(self, other) and - self.network == other.network) - except AttributeError: - return NotImplemented - - def __hash__(self): - return self._ip ^ self._prefixlen ^ int(self.network.network_address) - - def _is_valid_netmask(self, netmask): - """Verify that the netmask is valid. - - Args: - netmask: A string, either a prefix or dotted decimal - netmask. - - Returns: - A boolean, True if the prefix represents a valid IPv4 - netmask. - - """ - mask = netmask.split('.') - if len(mask) == 4: - if [x for x in mask if int(x) not in self._valid_mask_octets]: - return False - if [y for idx, y in enumerate(mask) if idx > 0 and - y > mask[idx - 1]]: - return False - return True - try: - netmask = int(netmask) - except ValueError: - return False - return 0 <= netmask <= self._max_prefixlen - - def _is_hostmask(self, ip_str): - """Test if the IP string is a hostmask (rather than a netmask). - - Args: - ip_str: A string, the potential hostmask. - - Returns: - A boolean, True if the IP string is a hostmask. - - """ - bits = ip_str.split('.') - try: - parts = [int(x) for x in bits if int(x) in self._valid_mask_octets] - except ValueError: - return False - if len(parts) != len(bits): - return False - if parts[0] < parts[-1]: - return True - return False - - - @property - def prefixlen(self): - return self._prefixlen - - @property - def ip(self): - return IPv4Address(self._ip) - - @property - def with_prefixlen(self): - return self - - @property - def with_netmask(self): - return '%s/%s' % (self._string_from_ip_int(self._ip), - self.netmask) - @property - def with_hostmask(self): - return '%s/%s' % (self._string_from_ip_int(self._ip), - self.hostmask) - - -class IPv4Network(_BaseV4, _BaseNetwork): - - """This class represents and manipulates 32-bit IPv4 network + addresses.. - - Attributes: [examples for IPv4Network('192.0.2.0/27')] - .network_address: IPv4Address('192.0.2.0') - .hostmask: IPv4Address('0.0.0.31') - .broadcast_address: IPv4Address('192.0.2.32') - .netmask: IPv4Address('255.255.255.224') - .prefixlen: 27 - - """ - - # the valid octets for host and netmasks. only useful for IPv4. - _valid_mask_octets = set((255, 254, 252, 248, 240, 224, 192, 128, 0)) - - def __init__(self, address, strict=True): - - """Instantiate a new IPv4 network object. - - Args: - address: A string or integer representing the IP [& network]. - '192.0.2.0/24' - '192.0.2.0/255.255.255.0' - '192.0.0.2/0.0.0.255' - are all functionally the same in IPv4. Similarly, - '192.0.2.1' - '192.0.2.1/255.255.255.255' - '192.0.2.1/32' - are also functionaly equivalent. That is to say, failing to - provide a subnetmask will create an object with a mask of /32. - - If the mask (portion after the / in the argument) is given in - dotted quad form, it is treated as a netmask if it starts with a - non-zero field (e.g. /255.0.0.0 == /8) and as a hostmask if it - starts with a zero field (e.g. 0.255.255.255 == /8), with the - single exception of an all-zero mask which is treated as a - netmask == /0. If no mask is given, a default of /32 is used. - - Additionally, an integer can be passed, so - IPv4Network('192.0.2.1') == IPv4Network(3221225985) - or, more generally - IPv4Interface(int(IPv4Interface('192.0.2.1'))) == - IPv4Interface('192.0.2.1') - - Raises: - AddressValueError: If ipaddressisn't a valid IPv4 address. - NetmaskValueError: If the netmask isn't valid for - an IPv4 address. - ValueError: If strict was True and a network address was not - supplied. - - """ - - _BaseV4.__init__(self, address) - _BaseNetwork.__init__(self, address) - - # Constructing from a packed address - if isinstance(address, bytes) and len(address) == 4: - self.network_address = IPv4Address( - struct.unpack('!I', address)[0]) - self._prefixlen = self._max_prefixlen - self.netmask = IPv4Address(self._ALL_ONES) - #fixme: address/network test here - return - - # Efficient constructor from integer. - if isinstance(address, int): - self._prefixlen = self._max_prefixlen - self.netmask = IPv4Address(self._ALL_ONES) - if address < 0 or address > self._ALL_ONES: - raise AddressValueError(address) - self.network_address = IPv4Address(address) - #fixme: address/network test here. - return - - # Assume input argument to be string or any object representation - # which converts into a formatted IP prefix string. - addr = str(address).split('/') - self.network_address = IPv4Address(self._ip_int_from_string(addr[0])) - - if len(addr) > 2: - raise AddressValueError(address) - - if len(addr) == 2: - mask = addr[1].split('.') - - if len(mask) == 4: - # We have dotted decimal netmask. - if self._is_valid_netmask(addr[1]): - self.netmask = IPv4Address(self._ip_int_from_string( - addr[1])) - elif self._is_hostmask(addr[1]): - self.netmask = IPv4Address( - self._ip_int_from_string(addr[1]) ^ self._ALL_ONES) - else: - raise NetmaskValueError('%s is not a valid netmask' - % addr[1]) - - self._prefixlen = self._prefix_from_ip_int(int(self.netmask)) - else: - # We have a netmask in prefix length form. - if not self._is_valid_netmask(addr[1]): - raise NetmaskValueError(addr[1]) - self._prefixlen = int(addr[1]) - self.netmask = IPv4Address(self._ip_int_from_prefix( - self._prefixlen)) - else: - self._prefixlen = self._max_prefixlen - self.netmask = IPv4Address(self._ip_int_from_prefix( - self._prefixlen)) - - if strict: - if (IPv4Address(int(self.network_address) & int(self.netmask)) != - self.network_address): - raise ValueError('%s has host bits set' % self) - self.network_address = IPv4Address(int(self.network_address) & - int(self.netmask)) - - if self._prefixlen == (self._max_prefixlen - 1): - self.hosts = self.__iter__ - - @property - def packed(self): - """The binary representation of this address.""" - return v4_int_to_packed(self.network_address) - - def __str__(self): - return '%s/%d' % (str(self.network_address), - self.prefixlen) - - def _is_valid_netmask(self, netmask): - """Verify that the netmask is valid. - - Args: - netmask: A string, either a prefix or dotted decimal - netmask. - - Returns: - A boolean, True if the prefix represents a valid IPv4 - netmask. - - """ - mask = netmask.split('.') - if len(mask) == 4: - if [x for x in mask if int(x) not in self._valid_mask_octets]: - return False - if [y for idx, y in enumerate(mask) if idx > 0 and - y > mask[idx - 1]]: - return False - return True - try: - netmask = int(netmask) - except ValueError: - return False - return 0 <= netmask <= self._max_prefixlen - - def _is_hostmask(self, ip_str): - """Test if the IP string is a hostmask (rather than a netmask). - - Args: - ip_str: A string, the potential hostmask. - - Returns: - A boolean, True if the IP string is a hostmask. - - """ - bits = ip_str.split('.') - try: - parts = [int(x) for x in bits if int(x) in self._valid_mask_octets] - except ValueError: - return False - if len(parts) != len(bits): - return False - if parts[0] < parts[-1]: - return True - return False - - @property - def with_prefixlen(self): - return '%s/%d' % (str(self.network_address), self._prefixlen) - - @property - def with_netmask(self): - return '%s/%s' % (str(self.network_address), str(self.netmask)) - - @property - def with_hostmask(self): - return '%s/%s' % (str(self.network_address), str(self.hostmask)) - - -class _BaseV6(object): - - """Base IPv6 object. - - The following methods are used by IPv6 objects in both single IP - addresses and networks. - - """ - - _ALL_ONES = (2**IPV6LENGTH) - 1 - _HEXTET_COUNT = 8 - _HEX_DIGITS = frozenset('0123456789ABCDEFabcdef') - - def __init__(self, address): - self._version = 6 - self._max_prefixlen = IPV6LENGTH - - def _ip_int_from_string(self, ip_str): - """Turn an IPv6 ip_str into an integer. - - Args: - ip_str: A string, the IPv6 ip_str. - - Returns: - An int, the IPv6 address - - Raises: - AddressValueError: if ip_str isn't a valid IPv6 Address. - - """ - parts = ip_str.split(':') - - # An IPv6 address needs at least 2 colons (3 parts). - if len(parts) < 3: - raise AddressValueError(ip_str) - - # If the address has an IPv4-style suffix, convert it to hexadecimal. - if '.' in parts[-1]: - ipv4_int = IPv4Address(parts.pop())._ip - parts.append('%x' % ((ipv4_int >> 16) & 0xFFFF)) - parts.append('%x' % (ipv4_int & 0xFFFF)) - - # An IPv6 address can't have more than 8 colons (9 parts). - if len(parts) > self._HEXTET_COUNT + 1: - raise AddressValueError(ip_str) - - # Disregarding the endpoints, find '::' with nothing in between. - # This indicates that a run of zeroes has been skipped. - try: - skip_index, = ( - [i for i in range(1, len(parts) - 1) if not parts[i]] or - [None]) - except ValueError: - # Can't have more than one '::' - raise AddressValueError(ip_str) - - # parts_hi is the number of parts to copy from above/before the '::' - # parts_lo is the number of parts to copy from below/after the '::' - if skip_index is not None: - # If we found a '::', then check if it also covers the endpoints. - parts_hi = skip_index - parts_lo = len(parts) - skip_index - 1 - if not parts[0]: - parts_hi -= 1 - if parts_hi: - raise AddressValueError(ip_str) # ^: requires ^:: - if not parts[-1]: - parts_lo -= 1 - if parts_lo: - raise AddressValueError(ip_str) # :$ requires ::$ - parts_skipped = self._HEXTET_COUNT - (parts_hi + parts_lo) - if parts_skipped < 1: - raise AddressValueError(ip_str) - else: - # Otherwise, allocate the entire address to parts_hi. The endpoints - # could still be empty, but _parse_hextet() will check for that. - if len(parts) != self._HEXTET_COUNT: - raise AddressValueError(ip_str) - parts_hi = len(parts) - parts_lo = 0 - parts_skipped = 0 - - try: - # Now, parse the hextets into a 128-bit integer. - ip_int = 0 - for i in range(parts_hi): - ip_int <<= 16 - ip_int |= self._parse_hextet(parts[i]) - ip_int <<= 16 * parts_skipped - for i in range(-parts_lo, 0): - ip_int <<= 16 - ip_int |= self._parse_hextet(parts[i]) - return ip_int - except ValueError: - raise AddressValueError(ip_str) - - def _parse_hextet(self, hextet_str): - """Convert an IPv6 hextet string into an integer. - - Args: - hextet_str: A string, the number to parse. - - Returns: - The hextet as an integer. - - Raises: - ValueError: if the input isn't strictly a hex number from [0..FFFF]. - - """ - # Whitelist the characters, since int() allows a lot of bizarre stuff. - if not self._HEX_DIGITS.issuperset(hextet_str): - raise ValueError - hextet_int = int(hextet_str, 16) - if hextet_int > 0xFFFF: - raise ValueError - return hextet_int - - def _compress_hextets(self, hextets): - """Compresses a list of hextets. - - Compresses a list of strings, replacing the longest continuous - sequence of "0" in the list with "" and adding empty strings at - the beginning or at the end of the string such that subsequently - calling ":".join(hextets) will produce the compressed version of - the IPv6 address. - - Args: - hextets: A list of strings, the hextets to compress. - - Returns: - A list of strings. - - """ - best_doublecolon_start = -1 - best_doublecolon_len = 0 - doublecolon_start = -1 - doublecolon_len = 0 - for index in range(len(hextets)): - if hextets[index] == '0': - doublecolon_len += 1 - if doublecolon_start == -1: - # Start of a sequence of zeros. - doublecolon_start = index - if doublecolon_len > best_doublecolon_len: - # This is the longest sequence of zeros so far. - best_doublecolon_len = doublecolon_len - best_doublecolon_start = doublecolon_start - else: - doublecolon_len = 0 - doublecolon_start = -1 - - if best_doublecolon_len > 1: - best_doublecolon_end = (best_doublecolon_start + - best_doublecolon_len) - # For zeros at the end of the address. - if best_doublecolon_end == len(hextets): - hextets += [''] - hextets[best_doublecolon_start:best_doublecolon_end] = [''] - # For zeros at the beginning of the address. - if best_doublecolon_start == 0: - hextets = [''] + hextets - - return hextets - - def _string_from_ip_int(self, ip_int=None): - """Turns a 128-bit integer into hexadecimal notation. - - Args: - ip_int: An integer, the IP address. - - Returns: - A string, the hexadecimal representation of the address. - - Raises: - ValueError: The address is bigger than 128 bits of all ones. - - """ - if not ip_int and ip_int != 0: - ip_int = int(self._ip) - - if ip_int > self._ALL_ONES: - raise ValueError('IPv6 address is too large') - - hex_str = '%032x' % ip_int - hextets = [] - for x in range(0, 32, 4): - hextets.append('%x' % int(hex_str[x:x+4], 16)) - - hextets = self._compress_hextets(hextets) - return ':'.join(hextets) - - def _explode_shorthand_ip_string(self): - """Expand a shortened IPv6 address. - - Args: - ip_str: A string, the IPv6 address. - - Returns: - A string, the expanded IPv6 address. - - """ - if isinstance(self, IPv6Network): - ip_str = str(self.network_address) - elif isinstance(self, IPv6Interface): - ip_str = str(self.ip) - else: - ip_str = str(self) - - ip_int = self._ip_int_from_string(ip_str) - parts = [] - for i in range(self._HEXTET_COUNT): - parts.append('%04x' % (ip_int & 0xFFFF)) - ip_int >>= 16 - parts.reverse() - if isinstance(self, (_BaseNetwork, IPv6Interface)): - return '%s/%d' % (':'.join(parts), self.prefixlen) - return ':'.join(parts) - - @property - def max_prefixlen(self): - return self._max_prefixlen - - @property - def packed(self): - """The binary representation of this address.""" - return v6_int_to_packed(self._ip) - - @property - def version(self): - return self._version - - @property - def is_multicast(self): - """Test if the address is reserved for multicast use. - - Returns: - A boolean, True if the address is a multicast address. - See RFC 2373 2.7 for details. - - """ - multicast_network = IPv6Network('ff00::/8') - if isinstance(self, _BaseAddress): - return self in multicast_network - return (self.network_address in multicast_network and - self.broadcast_address in multicast_network) - - @property - def is_reserved(self): - """Test if the address is otherwise IETF reserved. - - Returns: - A boolean, True if the address is within one of the - reserved IPv6 Network ranges. - - """ - reserved_networks = [IPv6Network('::/8'), IPv6Network('100::/8'), - IPv6Network('200::/7'), IPv6Network('400::/6'), - IPv6Network('800::/5'), IPv6Network('1000::/4'), - IPv6Network('4000::/3'), IPv6Network('6000::/3'), - IPv6Network('8000::/3'), IPv6Network('A000::/3'), - IPv6Network('C000::/3'), IPv6Network('E000::/4'), - IPv6Network('F000::/5'), IPv6Network('F800::/6'), - IPv6Network('FE00::/9')] - - if isinstance(self, _BaseAddress): - return len([x for x in reserved_networks if self in x]) > 0 - return len([x for x in reserved_networks if self.network_address in x - and self.broadcast_address in x]) > 0 - - @property - def is_link_local(self): - """Test if the address is reserved for link-local. - - Returns: - A boolean, True if the address is reserved per RFC 4291. - - """ - linklocal_network = IPv6Network('fe80::/10') - if isinstance(self, _BaseAddress): - return self in linklocal_network - return (self.network_address in linklocal_network and - self.broadcast_address in linklocal_network) - - @property - def is_site_local(self): - """Test if the address is reserved for site-local. - - Note that the site-local address space has been deprecated by RFC 3879. - Use is_private to test if this address is in the space of unique local - addresses as defined by RFC 4193. - - Returns: - A boolean, True if the address is reserved per RFC 3513 2.5.6. - - """ - sitelocal_network = IPv6Network('fec0::/10') - if isinstance(self, _BaseAddress): - return self in sitelocal_network - return (self.network_address in sitelocal_network and - self.broadcast_address in sitelocal_network) - - @property - def is_private(self): - """Test if this address is allocated for private networks. - - Returns: - A boolean, True if the address is reserved per RFC 4193. - - """ - private_network = IPv6Network('fc00::/7') - if isinstance(self, _BaseAddress): - return self in private_network - return (self.network_address in private_network and - self.broadcast_address in private_network) - - - @property - def ipv4_mapped(self): - """Return the IPv4 mapped address. - - Returns: - If the IPv6 address is a v4 mapped address, return the - IPv4 mapped address. Return None otherwise. - - """ - if (self._ip >> 32) != 0xFFFF: - return None - return IPv4Address(self._ip & 0xFFFFFFFF) - - @property - def teredo(self): - """Tuple of embedded teredo IPs. - - Returns: - Tuple of the (server, client) IPs or None if the address - doesn't appear to be a teredo address (doesn't start with - 2001::/32) - - """ - if (self._ip >> 96) != 0x20010000: - return None - return (IPv4Address((self._ip >> 64) & 0xFFFFFFFF), - IPv4Address(~self._ip & 0xFFFFFFFF)) - - @property - def sixtofour(self): - """Return the IPv4 6to4 embedded address. - - Returns: - The IPv4 6to4-embedded address if present or None if the - address doesn't appear to contain a 6to4 embedded address. - - """ - if (self._ip >> 112) != 0x2002: - return None - return IPv4Address((self._ip >> 80) & 0xFFFFFFFF) - - @property - def is_unspecified(self): - """Test if the address is unspecified. - - Returns: - A boolean, True if this is the unspecified address as defined in - RFC 2373 2.5.2. - - """ - if isinstance(self, (IPv6Network, IPv6Interface)): - return int(self.network_address) == 0 and getattr( - self, '_prefixlen', 128) == 128 - return self._ip == 0 - - @property - def is_loopback(self): - """Test if the address is a loopback address. - - Returns: - A boolean, True if the address is a loopback address as defined in - RFC 2373 2.5.3. - - """ - if isinstance(self, IPv6Network): - return int(self.network) == 1 and getattr( - self, '_prefixlen', 128) == 128 - elif isinstance(self, IPv6Interface): - return int(self.network.network_address) == 1 and getattr( - self, '_prefixlen', 128) == 128 - return self._ip == 1 - - -class IPv6Address(_BaseV6, _BaseAddress): - - """Represent and manipulate single IPv6 Addresses. - """ - - def __init__(self, address): - """Instantiate a new IPv6 address object. - - Args: - address: A string or integer representing the IP - - Additionally, an integer can be passed, so - IPv6Address('2001:db8::') == - IPv6Address(42540766411282592856903984951653826560) - or, more generally - IPv6Address(int(IPv6Address('2001:db8::'))) == - IPv6Address('2001:db8::') - - Raises: - AddressValueError: If address isn't a valid IPv6 address. - - """ - _BaseAddress.__init__(self, address) - _BaseV6.__init__(self, address) - - # Efficient constructor from integer. - if isinstance(address, int): - self._ip = address - if address < 0 or address > self._ALL_ONES: - raise AddressValueError(address) - return - - # Constructing from a packed address - if isinstance(address, bytes) and len(address) == 16: - tmp = struct.unpack('!QQ', address) - self._ip = (tmp[0] << 64) | tmp[1] - return - - # Assume input argument to be string or any object representation - # which converts into a formatted IP string. - addr_str = str(address) - if not addr_str: - raise AddressValueError('') - - self._ip = self._ip_int_from_string(addr_str) - - -class IPv6Interface(IPv6Address): - - def __init__(self, address): - if isinstance(address, (bytes, int)): - IPv6Address.__init__(self, address) - self.network = IPv6Network(self._ip) - self._prefixlen = self._max_prefixlen - return - - addr = str(address).split('/') - IPv6Address.__init__(self, addr[0]) - self.network = IPv6Network(address, strict=False) - self.netmask = self.network.netmask - self._prefixlen = self.network._prefixlen - self.hostmask = self.network.hostmask - - - def __str__(self): - return '%s/%d' % (self._string_from_ip_int(self._ip), - self.network.prefixlen) - - def __eq__(self, other): - try: - return (IPv6Address.__eq__(self, other) and - self.network == other.network) - except AttributeError: - return NotImplemented - - def __hash__(self): - return self._ip ^ self._prefixlen ^ int(self.network.network_address) - - @property - def prefixlen(self): - return self._prefixlen - @property - def ip(self): - return IPv6Address(self._ip) - - @property - def with_prefixlen(self): - return self - - @property - def with_netmask(self): - return self.with_prefixlen - @property - def with_hostmask(self): - return '%s/%s' % (self._string_from_ip_int(self._ip), - self.hostmask) - - -class IPv6Network(_BaseV6, _BaseNetwork): - - """This class represents and manipulates 128-bit IPv6 networks. - - Attributes: [examples for IPv6('2001:db8::1000/124')] - .network_address: IPv6Address('2001:db8::1000') - .hostmask: IPv6Address('::f') - .broadcast_address: IPv6Address('2001:db8::100f') - .netmask: IPv6Address('ffff:ffff:ffff:ffff:ffff:ffff:ffff:fff0') - .prefixlen: 124 - - """ - - def __init__(self, address, strict=True): - """Instantiate a new IPv6 Network object. - - Args: - address: A string or integer representing the IPv6 network or the IP - and prefix/netmask. - '2001:db8::/128' - '2001:db8:0000:0000:0000:0000:0000:0000/128' - '2001:db8::' - are all functionally the same in IPv6. That is to say, - failing to provide a subnetmask will create an object with - a mask of /128. - - Additionally, an integer can be passed, so - IPv6Network('2001:db8::') == - IPv6Network(42540766411282592856903984951653826560) - or, more generally - IPv6Network(int(IPv6Network('2001:db8::'))) == - IPv6Network('2001:db8::') - - strict: A boolean. If true, ensure that we have been passed - A true network address, eg, 2001:db8::1000/124 and not an - IP address on a network, eg, 2001:db8::1/124. - - Raises: - AddressValueError: If address isn't a valid IPv6 address. - NetmaskValueError: If the netmask isn't valid for - an IPv6 address. - ValueError: If strict was True and a network address was not - supplied. - - """ - _BaseV6.__init__(self, address) - _BaseNetwork.__init__(self, address) - - # Efficient constructor from integer. - if isinstance(address, int): - if address < 0 or address > self._ALL_ONES: - raise AddressValueError(address) - self.network_address = IPv6Address(address) - self._prefixlen = self._max_prefixlen - self.netmask = IPv6Address(self._ALL_ONES) - if strict: - if (IPv6Address(int(self.network_address) & - int(self.netmask)) != self.network_address): - raise ValueError('%s has host bits set' % str(self)) - self.network_address = IPv6Address(int(self.network_address) & - int(self.netmask)) - return - - # Constructing from a packed address - if isinstance(address, bytes) and len(address) == 16: - tmp = struct.unpack('!QQ', address) - self.network_address = IPv6Address((tmp[0] << 64) | tmp[1]) - self._prefixlen = self._max_prefixlen - self.netmask = IPv6Address(self._ALL_ONES) - if strict: - if (IPv6Address(int(self.network_address) & - int(self.netmask)) != self.network_address): - raise ValueError('%s has host bits set' % str(self)) - self.network_address = IPv6Address(int(self.network_address) & - int(self.netmask)) - return - - # Assume input argument to be string or any object representation - # which converts into a formatted IP prefix string. - addr = str(address).split('/') - - if len(addr) > 2: - raise AddressValueError(address) - - self.network_address = IPv6Address(self._ip_int_from_string(addr[0])) - - if len(addr) == 2: - if self._is_valid_netmask(addr[1]): - self._prefixlen = int(addr[1]) - else: - raise NetmaskValueError(addr[1]) - else: - self._prefixlen = self._max_prefixlen - - self.netmask = IPv6Address(self._ip_int_from_prefix(self._prefixlen)) - if strict: - if (IPv6Address(int(self.network_address) & int(self.netmask)) != - self.network_address): - raise ValueError('%s has host bits set' % str(self)) - self.network_address = IPv6Address(int(self.network_address) & - int(self.netmask)) - - if self._prefixlen == (self._max_prefixlen - 1): - self.hosts = self.__iter__ - - def __str__(self): - return '%s/%d' % (str(self.network_address), - self.prefixlen) - - def _is_valid_netmask(self, prefixlen): - """Verify that the netmask/prefixlen is valid. - - Args: - prefixlen: A string, the netmask in prefix length format. - - Returns: - A boolean, True if the prefix represents a valid IPv6 - netmask. - - """ - try: - prefixlen = int(prefixlen) - except ValueError: - return False - return 0 <= prefixlen <= self._max_prefixlen - - @property - def with_netmask(self): - return self.with_prefixlen - - @property - def with_prefixlen(self): - return '%s/%d' % (str(self.network_address), self._prefixlen) - - @property - def with_netmask(self): - return '%s/%s' % (str(self.network_address), str(self.netmask)) - - @property - def with_hostmask(self): - return '%s/%s' % (str(self.network_address), str(self.hostmask)) diff -r 0be296605165 -r faa88c50a3d2 Lib/json/encoder.py --- a/Lib/json/encoder.py Wed May 23 22:26:55 2012 +0200 +++ b/Lib/json/encoder.py Wed May 23 21:09:05 2012 +0200 @@ -27,7 +27,8 @@ ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i)) #ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,)) -INFINITY = float('inf') +# Assume this produces an infinity on all machines (probably not guaranteed) +INFINITY = float('1e66666') FLOAT_REPR = repr def encode_basestring(s): diff -r 0be296605165 -r faa88c50a3d2 Lib/multiprocessing/forking.py --- a/Lib/multiprocessing/forking.py Wed May 23 22:26:55 2012 +0200 +++ b/Lib/multiprocessing/forking.py Wed May 23 21:09:05 2012 +0200 @@ -324,8 +324,7 @@ return [sys.executable, '--multiprocessing-fork'] else: prog = 'from multiprocessing.forking import main; main()' - opts = util._args_from_interpreter_flags() - return [_python_exe] + opts + ['-c', prog, '--multiprocessing-fork'] + return [_python_exe, '-c', prog, '--multiprocessing-fork'] def main(): diff -r 0be296605165 -r faa88c50a3d2 Lib/multiprocessing/util.py --- a/Lib/multiprocessing/util.py Wed May 23 22:26:55 2012 +0200 +++ b/Lib/multiprocessing/util.py Wed May 23 21:09:05 2012 +0200 @@ -7,14 +7,12 @@ # Licensed to PSF under a Contributor Agreement. # -import sys import functools import itertools import weakref import atexit import threading # we want threading to install it's # cleanup function before multiprocessing does -from subprocess import _args_from_interpreter_flags from multiprocessing.process import current_process, active_children diff -r 0be296605165 -r faa88c50a3d2 Lib/os.py --- a/Lib/os.py Wed May 23 22:26:55 2012 +0200 +++ b/Lib/os.py Wed May 23 21:09:05 2012 +0200 @@ -30,9 +30,8 @@ # Note: more names are added to __all__ later. __all__ = ["altsep", "curdir", "pardir", "sep", "pathsep", "linesep", - "defpath", "name", "path", "devnull", "SEEK_SET", "SEEK_CUR", - "SEEK_END", "fsencode", "fsdecode", "get_exec_path", "fdopen", - "popen", "extsep"] + "defpath", "name", "path", "devnull", + "SEEK_SET", "SEEK_CUR", "SEEK_END"] def _exists(name): return name in globals() @@ -51,7 +50,6 @@ from posix import * try: from posix import _exit - __all__.append('_exit') except ImportError: pass import posixpath as path @@ -66,7 +64,6 @@ from nt import * try: from nt import _exit - __all__.append('_exit') except ImportError: pass import ntpath as path @@ -81,7 +78,6 @@ from os2 import * try: from os2 import _exit - __all__.append('_exit') except ImportError: pass if sys.version.find('EMX GCC') == -1: @@ -100,7 +96,6 @@ from ce import * try: from ce import _exit - __all__.append('_exit') except ImportError: pass # We can use the standard Windows path. @@ -358,23 +353,13 @@ names = flistdir(topfd) dirs, nondirs = [], [] for name in names: - try: - # Here, we don't use AT_SYMLINK_NOFOLLOW to be consistent with - # walk() which reports symlinks to directories as directories. - # We do however check for symlinks before recursing into - # a subdirectory. - if st.S_ISDIR(fstatat(topfd, name).st_mode): - dirs.append(name) - else: - nondirs.append(name) - except FileNotFoundError: - try: - # Add dangling symlinks, ignore disappeared files - if st.S_ISLNK(fstatat(topfd, name, AT_SYMLINK_NOFOLLOW) - .st_mode): - nondirs.append(name) - except FileNotFoundError: - continue + # Here, we don't use AT_SYMLINK_NOFOLLOW to be consistent with + # walk() which reports symlinks to directories as directories. We do + # however check for symlinks before recursing into a subdirectory. + if st.S_ISDIR(fstatat(topfd, name).st_mode): + dirs.append(name) + else: + nondirs.append(name) if topdown: yield toppath, dirs, nondirs, topfd @@ -705,8 +690,6 @@ P_WAIT = 0 P_NOWAIT = P_NOWAITO = 1 - __all__.extend(["P_WAIT", "P_NOWAIT", "P_NOWAITO"]) - # XXX Should we support P_DETACH? I suppose it could fork()**2 # and close the std I/O streams. Also, P_OVERLAY is the same # as execv*()? diff -r 0be296605165 -r faa88c50a3d2 Lib/pyclbr.py --- a/Lib/pyclbr.py Wed May 23 22:26:55 2012 +0200 +++ b/Lib/pyclbr.py Wed May 23 21:09:05 2012 +0200 @@ -130,8 +130,6 @@ parent = _readmodule(package, path, inpackage) if inpackage is not None: package = "%s.%s" % (inpackage, package) - if not '__path__' in parent: - raise ImportError('No package named {}'.format(package)) return _readmodule(submodule, parent['__path__'], package) # Search the path for the module diff -r 0be296605165 -r faa88c50a3d2 Lib/pydoc.py --- a/Lib/pydoc.py Wed May 23 22:26:55 2012 +0200 +++ b/Lib/pydoc.py Wed May 23 21:09:05 2012 +0200 @@ -167,7 +167,7 @@ if name in {'__builtins__', '__doc__', '__file__', '__path__', '__module__', '__name__', '__slots__', '__package__', '__cached__', '__author__', '__credits__', '__date__', - '__version__', '__qualname__', '__initializing__'}: + '__version__', '__qualname__'}: return 0 # Private names are hidden, but special names are displayed. if name.startswith('__') and name.endswith('__'): return 1 diff -r 0be296605165 -r faa88c50a3d2 Lib/stat.py --- a/Lib/stat.py Wed May 23 22:26:55 2012 +0200 +++ b/Lib/stat.py Wed May 23 21:09:05 2012 +0200 @@ -19,131 +19,78 @@ # Extract bits from the mode def S_IMODE(mode): - """Return the portion of the file's mode that can be set by - os.chmod(). - """ return mode & 0o7777 def S_IFMT(mode): - """Return the portion of the file's mode that describes the - file type. - """ return mode & 0o170000 # Constants used as S_IFMT() for various file types # (not all are implemented on all systems) -S_IFDIR = 0o040000 # directory -S_IFCHR = 0o020000 # character device -S_IFBLK = 0o060000 # block device -S_IFREG = 0o100000 # regular file -S_IFIFO = 0o010000 # fifo (named pipe) -S_IFLNK = 0o120000 # symbolic link -S_IFSOCK = 0o140000 # socket file +S_IFDIR = 0o040000 +S_IFCHR = 0o020000 +S_IFBLK = 0o060000 +S_IFREG = 0o100000 +S_IFIFO = 0o010000 +S_IFLNK = 0o120000 +S_IFSOCK = 0o140000 # Functions to test for each file type def S_ISDIR(mode): - """Return True if mode is from a directory.""" return S_IFMT(mode) == S_IFDIR def S_ISCHR(mode): - """Return True if mode is from a character special device file.""" return S_IFMT(mode) == S_IFCHR def S_ISBLK(mode): - """Return True if mode is from a block special device file.""" return S_IFMT(mode) == S_IFBLK def S_ISREG(mode): - """Return True if mode is from a regular file.""" return S_IFMT(mode) == S_IFREG def S_ISFIFO(mode): - """Return True if mode is from a FIFO (named pipe).""" return S_IFMT(mode) == S_IFIFO def S_ISLNK(mode): - """Return True if mode is from a symbolic link.""" return S_IFMT(mode) == S_IFLNK def S_ISSOCK(mode): - """Return True if mode is from a socket.""" return S_IFMT(mode) == S_IFSOCK # Names for permission bits -S_ISUID = 0o4000 # set UID bit -S_ISGID = 0o2000 # set GID bit -S_ENFMT = S_ISGID # file locking enforcement -S_ISVTX = 0o1000 # sticky bit -S_IREAD = 0o0400 # Unix V7 synonym for S_IRUSR -S_IWRITE = 0o0200 # Unix V7 synonym for S_IWUSR -S_IEXEC = 0o0100 # Unix V7 synonym for S_IXUSR -S_IRWXU = 0o0700 # mask for owner permissions -S_IRUSR = 0o0400 # read by owner -S_IWUSR = 0o0200 # write by owner -S_IXUSR = 0o0100 # execute by owner -S_IRWXG = 0o0070 # mask for group permissions -S_IRGRP = 0o0040 # read by group -S_IWGRP = 0o0020 # write by group -S_IXGRP = 0o0010 # execute by group -S_IRWXO = 0o0007 # mask for others (not in group) permissions -S_IROTH = 0o0004 # read by others -S_IWOTH = 0o0002 # write by others -S_IXOTH = 0o0001 # execute by others +S_ISUID = 0o4000 +S_ISGID = 0o2000 +S_ENFMT = S_ISGID +S_ISVTX = 0o1000 +S_IREAD = 0o0400 +S_IWRITE = 0o0200 +S_IEXEC = 0o0100 +S_IRWXU = 0o0700 +S_IRUSR = 0o0400 +S_IWUSR = 0o0200 +S_IXUSR = 0o0100 +S_IRWXG = 0o0070 +S_IRGRP = 0o0040 +S_IWGRP = 0o0020 +S_IXGRP = 0o0010 +S_IRWXO = 0o0007 +S_IROTH = 0o0004 +S_IWOTH = 0o0002 +S_IXOTH = 0o0001 # Names for file flags -UF_NODUMP = 0x00000001 # do not dump file -UF_IMMUTABLE = 0x00000002 # file may not be changed -UF_APPEND = 0x00000004 # file may only be appended to -UF_OPAQUE = 0x00000008 # directory is opaque when viewed through a union stack -UF_NOUNLINK = 0x00000010 # file may not be renamed or deleted -UF_COMPRESSED = 0x00000020 # OS X: file is hfs-compressed -UF_HIDDEN = 0x00008000 # OS X: file should not be displayed -SF_ARCHIVED = 0x00010000 # file may be archived -SF_IMMUTABLE = 0x00020000 # file may not be changed -SF_APPEND = 0x00040000 # file may only be appended to -SF_NOUNLINK = 0x00100000 # file may not be renamed or deleted -SF_SNAPSHOT = 0x00200000 # file is a snapshot file - - -_filemode_table = ( - ((S_IFLNK, "l"), - (S_IFREG, "-"), - (S_IFBLK, "b"), - (S_IFDIR, "d"), - (S_IFCHR, "c"), - (S_IFIFO, "p")), - - ((S_IRUSR, "r"),), - ((S_IWUSR, "w"),), - ((S_IXUSR|S_ISUID, "s"), - (S_ISUID, "S"), - (S_IXUSR, "x")), - - ((S_IRGRP, "r"),), - ((S_IWGRP, "w"),), - ((S_IXGRP|S_ISGID, "s"), - (S_ISGID, "S"), - (S_IXGRP, "x")), - - ((S_IROTH, "r"),), - ((S_IWOTH, "w"),), - ((S_IXOTH|S_ISVTX, "t"), - (S_ISVTX, "T"), - (S_IXOTH, "x")) -) - -def filemode(mode): - """Convert a file's mode to a string of the form '-rwxrwxrwx'.""" - perm = [] - for table in _filemode_table: - for bit, char in table: - if mode & bit == bit: - perm.append(char) - break - else: - perm.append("-") - return "".join(perm) +UF_NODUMP = 0x00000001 +UF_IMMUTABLE = 0x00000002 +UF_APPEND = 0x00000004 +UF_OPAQUE = 0x00000008 +UF_NOUNLINK = 0x00000010 +UF_COMPRESSED = 0x00000020 # OS X: file is hfs-compressed +UF_HIDDEN = 0x00008000 # OS X: file should not be displayed +SF_ARCHIVED = 0x00010000 +SF_IMMUTABLE = 0x00020000 +SF_APPEND = 0x00040000 +SF_NOUNLINK = 0x00100000 +SF_SNAPSHOT = 0x00200000 diff -r 0be296605165 -r faa88c50a3d2 Lib/subprocess.py --- a/Lib/subprocess.py Wed May 23 22:26:55 2012 +0200 +++ b/Lib/subprocess.py Wed May 23 21:09:05 2012 +0200 @@ -475,37 +475,6 @@ continue -# XXX This function is only used by multiprocessing and the test suite, -# but it's here so that it can be imported when Python is compiled without -# threads. - -def _args_from_interpreter_flags(): - """Return a list of command-line arguments reproducing the current - settings in sys.flags and sys.warnoptions.""" - flag_opt_map = { - 'debug': 'd', - # 'inspect': 'i', - # 'interactive': 'i', - 'optimize': 'O', - 'dont_write_bytecode': 'B', - 'no_user_site': 's', - 'no_site': 'S', - 'ignore_environment': 'E', - 'verbose': 'v', - 'bytes_warning': 'b', - 'quiet': 'q', - 'hash_randomization': 'R', - } - args = [] - for flag, opt in flag_opt_map.items(): - v = getattr(sys.flags, flag) - if v > 0: - args.append('-' + opt * v) - for opt in sys.warnoptions: - args.append('-W' + opt) - return args - - def call(*popenargs, timeout=None, **kwargs): """Run command with arguments. Wait for command to complete or timeout, then return the returncode attribute. diff -r 0be296605165 -r faa88c50a3d2 Lib/tarfile.py --- a/Lib/tarfile.py Wed May 23 22:26:55 2012 +0200 +++ b/Lib/tarfile.py Wed May 23 21:09:05 2012 +0200 @@ -245,8 +245,8 @@ the high bit set. So we calculate two checksums, unsigned and signed. """ - unsigned_chksum = 256 + sum(struct.unpack_from("148B8x356B", buf)) - signed_chksum = 256 + sum(struct.unpack_from("148b8x356b", buf)) + unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512])) + signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) + struct.unpack("356b", buf[156:512])) return unsigned_chksum, signed_chksum def copyfileobj(src, dst, length=None): @@ -274,13 +274,47 @@ dst.write(buf) return +filemode_table = ( + ((S_IFLNK, "l"), + (S_IFREG, "-"), + (S_IFBLK, "b"), + (S_IFDIR, "d"), + (S_IFCHR, "c"), + (S_IFIFO, "p")), + + ((TUREAD, "r"),), + ((TUWRITE, "w"),), + ((TUEXEC|TSUID, "s"), + (TSUID, "S"), + (TUEXEC, "x")), + + ((TGREAD, "r"),), + ((TGWRITE, "w"),), + ((TGEXEC|TSGID, "s"), + (TSGID, "S"), + (TGEXEC, "x")), + + ((TOREAD, "r"),), + ((TOWRITE, "w"),), + ((TOEXEC|TSVTX, "t"), + (TSVTX, "T"), + (TOEXEC, "x")) +) + def filemode(mode): - """Deprecated in this location; use stat.filemode.""" - import warnings - warnings.warn("deprecated in favor of stat.filemode", - DeprecationWarning, 2) - return stat.filemode(mode) - + """Convert a file's mode to a string of the form + -rwxrwxrwx. + Used by TarFile.list() + """ + perm = [] + for table in filemode_table: + for bit, char in table: + if mode & bit == bit: + perm.append(char) + break + else: + perm.append("-") + return "".join(perm) class TarError(Exception): """Base exception.""" @@ -1857,7 +1891,7 @@ for tarinfo in self: if verbose: - print(stat.filemode(tarinfo.mode), end=' ') + print(filemode(tarinfo.mode), end=' ') print("%s/%s" % (tarinfo.uname or tarinfo.uid, tarinfo.gname or tarinfo.gid), end=' ') if tarinfo.ischr() or tarinfo.isblk(): diff -r 0be296605165 -r faa88c50a3d2 Lib/test/lock_tests.py --- a/Lib/test/lock_tests.py Wed May 23 22:26:55 2012 +0200 +++ b/Lib/test/lock_tests.py Wed May 23 21:09:05 2012 +0200 @@ -247,6 +247,7 @@ # Cannot release an unacquired lock lock = self.locktype() self.assertRaises(RuntimeError, lock.release) + self.assertRaises(RuntimeError, lock._release_save) lock.acquire() lock.acquire() lock.release() @@ -254,17 +255,6 @@ lock.release() lock.release() self.assertRaises(RuntimeError, lock.release) - - def test_release_save_unacquired(self): - # Cannot _release_save an unacquired lock - lock = self.locktype() - self.assertRaises(RuntimeError, lock._release_save) - lock.acquire() - lock.acquire() - lock.release() - lock.acquire() - lock.release() - lock.release() self.assertRaises(RuntimeError, lock._release_save) def test_different_thread(self): diff -r 0be296605165 -r faa88c50a3d2 Lib/test/support.py --- a/Lib/test/support.py Wed May 23 22:26:55 2012 +0200 +++ b/Lib/test/support.py Wed May 23 21:09:05 2012 +0200 @@ -24,7 +24,6 @@ import fnmatch import logging.handlers import struct -import tempfile try: import _thread, threading @@ -52,25 +51,23 @@ lzma = None __all__ = [ - "Error", "TestFailed", "ResourceDenied", "import_module", "verbose", - "use_resources", "max_memuse", "record_original_stdout", + "Error", "TestFailed", "ResourceDenied", "import_module", + "verbose", "use_resources", "max_memuse", "record_original_stdout", "get_original_stdout", "unload", "unlink", "rmtree", "forget", "is_resource_enabled", "requires", "requires_freebsd_version", - "requires_linux_version", "requires_mac_ver", "find_unused_port", - "bind_port", "IPV6_ENABLED", "is_jython", "TESTFN", "HOST", "SAVEDCWD", - "temp_cwd", "findfile", "create_empty_file", "sortdict", - "check_syntax_error", "open_urlresource", "check_warnings", "CleanImport", - "EnvironmentVarGuard", "TransientResource", "captured_stdout", - "captured_stdin", "captured_stderr", "time_out", "socket_peer_reset", - "ioerror_peer_reset", "run_with_locale", 'temp_umask', + "requires_linux_version", "requires_mac_ver", "find_unused_port", "bind_port", + "IPV6_ENABLED", "is_jython", "TESTFN", "HOST", "SAVEDCWD", "temp_cwd", + "findfile", "create_empty_file", "sortdict", "check_syntax_error", "open_urlresource", + "check_warnings", "CleanImport", "EnvironmentVarGuard", "TransientResource", + "captured_stdout", "captured_stdin", "captured_stderr", "time_out", + "socket_peer_reset", "ioerror_peer_reset", "run_with_locale", 'temp_umask', "transient_internet", "set_memlimit", "bigmemtest", "bigaddrspacetest", "BasicTestRunner", "run_unittest", "run_doctest", "threading_setup", "threading_cleanup", "reap_children", "cpython_only", "check_impl_detail", "get_attribute", "swap_item", "swap_attr", "requires_IEEE_754", "TestHandler", "Matcher", "can_symlink", "skip_unless_symlink", - "skip_unless_xattr", "import_fresh_module", "requires_zlib", - "PIPE_MAX_SIZE", "failfast", "anticipate_failure", "run_with_tz", - "requires_bz2", "requires_lzma" + "import_fresh_module", "requires_zlib", "PIPE_MAX_SIZE", "failfast", + "anticipate_failure", "run_with_tz", "requires_bz2", "requires_lzma" ] class Error(Exception): @@ -1599,7 +1596,24 @@ def args_from_interpreter_flags(): """Return a list of command-line arguments reproducing the current settings in sys.flags and sys.warnoptions.""" - return subprocess._args_from_interpreter_flags() + flag_opt_map = { + 'bytes_warning': 'b', + 'dont_write_bytecode': 'B', + 'hash_randomization': 'R', + 'ignore_environment': 'E', + 'no_user_site': 's', + 'no_site': 'S', + 'optimize': 'O', + 'verbose': 'v', + } + args = [] + for flag, opt in flag_opt_map.items(): + v = getattr(sys.flags, flag) + if v > 0: + args.append('-' + opt * v) + for opt in sys.warnoptions: + args.append('-W' + opt) + return args #============================================================ # Support for assertions about logging. @@ -1697,13 +1711,9 @@ if not hasattr(os, "setxattr"): can = False else: - tmp_fp, tmp_name = tempfile.mkstemp() try: with open(TESTFN, "wb") as fp: try: - # TESTFN & tempfile may use different file systems with - # different capabilities - os.fsetxattr(tmp_fp, b"user.test", b"") os.fsetxattr(fp.fileno(), b"user.test", b"") # Kernels < 2.6.39 don't respect setxattr flags. kernel_version = platform.release() @@ -1713,7 +1723,6 @@ can = False finally: unlink(TESTFN) - unlink(tmp_name) _can_xattr = can return can diff -r 0be296605165 -r faa88c50a3d2 Lib/test/test_bisect.py --- a/Lib/test/test_bisect.py Wed May 23 22:26:55 2012 +0200 +++ b/Lib/test/test_bisect.py Wed May 23 21:09:05 2012 +0200 @@ -23,28 +23,6 @@ import bisect as c_bisect -class Range(object): - """A trivial range()-like object without any integer width limitations.""" - def __init__(self, start, stop): - self.start = start - self.stop = stop - self.last_insert = None - - def __len__(self): - return self.stop - self.start - - def __getitem__(self, idx): - n = self.stop - self.start - if idx < 0: - idx += n - if idx >= n: - raise IndexError(idx) - return self.start + idx - - def insert(self, idx, item): - self.last_insert = idx, item - - class TestBisect(unittest.TestCase): module = None @@ -147,28 +125,9 @@ def test_large_range(self): # Issue 13496 mod = self.module - n = sys.maxsize - data = range(n-1) - self.assertEqual(mod.bisect_left(data, n-3), n-3) - self.assertEqual(mod.bisect_right(data, n-3), n-2) - self.assertEqual(mod.bisect_left(data, n-3, n-10, n), n-3) - self.assertEqual(mod.bisect_right(data, n-3, n-10, n), n-2) - - def test_large_pyrange(self): - # Same as above, but without C-imposed limits on range() parameters - mod = self.module - n = sys.maxsize - data = Range(0, n-1) - self.assertEqual(mod.bisect_left(data, n-3), n-3) - self.assertEqual(mod.bisect_right(data, n-3), n-2) - self.assertEqual(mod.bisect_left(data, n-3, n-10, n), n-3) - self.assertEqual(mod.bisect_right(data, n-3, n-10, n), n-2) - x = n - 100 - mod.insort_left(data, x, x - 50, x + 50) - self.assertEqual(data.last_insert, (x, x)) - x = n - 200 - mod.insort_right(data, x, x - 50, x + 50) - self.assertEqual(data.last_insert, (x + 1, x)) + data = range(sys.maxsize-1) + self.assertEqual(mod.bisect_left(data, sys.maxsize-3), sys.maxsize-3) + self.assertEqual(mod.bisect_right(data, sys.maxsize-3), sys.maxsize-2) def test_random(self, n=25): from random import randrange diff -r 0be296605165 -r faa88c50a3d2 Lib/test/test_buffer.py --- a/Lib/test/test_buffer.py Wed May 23 22:26:55 2012 +0200 +++ b/Lib/test/test_buffer.py Wed May 23 21:09:05 2012 +0200 @@ -747,8 +747,10 @@ class TestBufferProtocol(unittest.TestCase): def setUp(self): - # The suboffsets tests need sizeof(void *). - self.sizeof_void_p = get_sizeof_void_p() + self.sizeof_void_p = get_config_var('SIZEOF_VOID_P') \ + if sys.platform != 'darwin' else None + if not self.sizeof_void_p: + self.sizeof_void_p = 8 if sys.maxsize > 2**32 else 4 def verify(self, result, obj=-1, itemsize={1}, fmt=-1, readonly={1}, diff -r 0be296605165 -r faa88c50a3d2 Lib/test/test_cmd_line_script.py --- a/Lib/test/test_cmd_line_script.py Wed May 23 22:26:55 2012 +0200 +++ b/Lib/test/test_cmd_line_script.py Wed May 23 21:09:05 2012 +0200 @@ -7,7 +7,6 @@ import os.path import py_compile -import textwrap from test import support from test.script_helper import ( make_pkg, make_script, make_zip_pkg, make_zip_script, @@ -287,24 +286,6 @@ self._check_output(script_name, rc, out, script_name, script_name, '', '') - def test_pep_409_verbiage(self): - # Make sure PEP 409 syntax properly suppresses - # the context of an exception - script = textwrap.dedent("""\ - try: - raise ValueError - except: - raise NameError from None - """) - with temp_dir() as script_dir: - script_name = _make_test_script(script_dir, 'script', script) - exitcode, stdout, stderr = assert_python_failure(script_name) - text = stderr.decode('ascii').split('\n') - self.assertEqual(len(text), 4) - self.assertTrue(text[0].startswith('Traceback')) - self.assertTrue(text[1].startswith(' File ')) - self.assertTrue(text[3].startswith('NameError')) - def test_main(): support.run_unittest(CmdLineTest) support.reap_children() diff -r 0be296605165 -r faa88c50a3d2 Lib/test/test_contextlib.py --- a/Lib/test/test_contextlib.py Wed May 23 22:26:55 2012 +0200 +++ b/Lib/test/test_contextlib.py Wed May 23 21:09:05 2012 +0200 @@ -370,129 +370,6 @@ self.assertEqual(state, [1, 'something else', 999]) -class TestExitStack(unittest.TestCase): - - def test_no_resources(self): - with ExitStack(): - pass - - def test_callback(self): - expected = [ - ((), {}), - ((1,), {}), - ((1,2), {}), - ((), dict(example=1)), - ((1,), dict(example=1)), - ((1,2), dict(example=1)), - ] - result = [] - def _exit(*args, **kwds): - """Test metadata propagation""" - result.append((args, kwds)) - with ExitStack() as stack: - for args, kwds in reversed(expected): - if args and kwds: - f = stack.callback(_exit, *args, **kwds) - elif args: - f = stack.callback(_exit, *args) - elif kwds: - f = stack.callback(_exit, **kwds) - else: - f = stack.callback(_exit) - self.assertIs(f, _exit) - for wrapper in stack._exit_callbacks: - self.assertIs(wrapper.__wrapped__, _exit) - self.assertNotEqual(wrapper.__name__, _exit.__name__) - self.assertIsNone(wrapper.__doc__, _exit.__doc__) - self.assertEqual(result, expected) - - def test_push(self): - exc_raised = ZeroDivisionError - def _expect_exc(exc_type, exc, exc_tb): - self.assertIs(exc_type, exc_raised) - def _suppress_exc(*exc_details): - return True - def _expect_ok(exc_type, exc, exc_tb): - self.assertIsNone(exc_type) - self.assertIsNone(exc) - self.assertIsNone(exc_tb) - class ExitCM(object): - def __init__(self, check_exc): - self.check_exc = check_exc - def __enter__(self): - self.fail("Should not be called!") - def __exit__(self, *exc_details): - self.check_exc(*exc_details) - with ExitStack() as stack: - stack.push(_expect_ok) - self.assertIs(stack._exit_callbacks[-1], _expect_ok) - cm = ExitCM(_expect_ok) - stack.push(cm) - self.assertIs(stack._exit_callbacks[-1].__self__, cm) - stack.push(_suppress_exc) - self.assertIs(stack._exit_callbacks[-1], _suppress_exc) - cm = ExitCM(_expect_exc) - stack.push(cm) - self.assertIs(stack._exit_callbacks[-1].__self__, cm) - stack.push(_expect_exc) - self.assertIs(stack._exit_callbacks[-1], _expect_exc) - stack.push(_expect_exc) - self.assertIs(stack._exit_callbacks[-1], _expect_exc) - 1/0 - - def test_enter_context(self): - class TestCM(object): - def __enter__(self): - result.append(1) - def __exit__(self, *exc_details): - result.append(3) - - result = [] - cm = TestCM() - with ExitStack() as stack: - @stack.callback # Registered first => cleaned up last - def _exit(): - result.append(4) - self.assertIsNotNone(_exit) - stack.enter_context(cm) - self.assertIs(stack._exit_callbacks[-1].__self__, cm) - result.append(2) - self.assertEqual(result, [1, 2, 3, 4]) - - def test_close(self): - result = [] - with ExitStack() as stack: - @stack.callback - def _exit(): - result.append(1) - self.assertIsNotNone(_exit) - stack.close() - result.append(2) - self.assertEqual(result, [1, 2]) - - def test_pop_all(self): - result = [] - with ExitStack() as stack: - @stack.callback - def _exit(): - result.append(3) - self.assertIsNotNone(_exit) - new_stack = stack.pop_all() - result.append(1) - result.append(2) - new_stack.close() - self.assertEqual(result, [1, 2, 3]) - - def test_instance_bypass(self): - class Example(object): pass - cm = Example() - cm.__exit__ = object() - stack = ExitStack() - self.assertRaises(AttributeError, stack.enter_context, cm) - stack.push(cm) - self.assertIs(stack._exit_callbacks[-1], cm) - - # This is needed to make the test actually run under regrtest.py! def test_main(): support.run_unittest(__name__) diff -r 0be296605165 -r faa88c50a3d2 Lib/test/test_exceptions.py --- a/Lib/test/test_exceptions.py Wed May 23 22:26:55 2012 +0200 +++ b/Lib/test/test_exceptions.py Wed May 23 21:09:05 2012 +0200 @@ -388,18 +388,18 @@ def testChainingAttrs(self): e = Exception() self.assertIsNone(e.__context__) - self.assertIsNone(e.__cause__) + self.assertIs(e.__cause__, Ellipsis) e = TypeError() self.assertIsNone(e.__context__) - self.assertIsNone(e.__cause__) + self.assertIs(e.__cause__, Ellipsis) class MyException(EnvironmentError): pass e = MyException() self.assertIsNone(e.__context__) - self.assertIsNone(e.__cause__) + self.assertIs(e.__cause__, Ellipsis) def testChainingDescriptors(self): try: @@ -408,16 +408,15 @@ e = exc self.assertIsNone(e.__context__) - self.assertIsNone(e.__cause__) - self.assertFalse(e.__suppress_context__) + self.assertIs(e.__cause__, Ellipsis) e.__context__ = NameError() e.__cause__ = None self.assertIsInstance(e.__context__, NameError) self.assertIsNone(e.__cause__) - self.assertTrue(e.__suppress_context__) - e.__suppress_context__ = False - self.assertFalse(e.__suppress_context__) + + e.__cause__ = Ellipsis + self.assertIs(e.__cause__, Ellipsis) def testKeywordArgs(self): # test that builtin exception don't take keyword args, diff -r 0be296605165 -r faa88c50a3d2 Lib/test/test_hashlib.py --- a/Lib/test/test_hashlib.py Wed May 23 22:26:55 2012 +0200 +++ b/Lib/test/test_hashlib.py Wed May 23 21:09:05 2012 +0200 @@ -9,7 +9,6 @@ import array import hashlib import itertools -import os import sys try: import threading @@ -38,8 +37,7 @@ 'sha224', 'SHA224', 'sha256', 'SHA256', 'sha384', 'SHA384', 'sha512', 'SHA512' ) - # Issue #14693: fallback modules are always compiled under POSIX - _warn_on_extension_import = os.name == 'posix' or COMPILED_WITH_PYDEBUG + _warn_on_extension_import = COMPILED_WITH_PYDEBUG def _conditional_import_module(self, module_name): """Import a module and return a reference to it or None on failure.""" diff -r 0be296605165 -r faa88c50a3d2 Lib/test/test_http_cookies.py --- a/Lib/test/test_http_cookies.py Wed May 23 22:26:55 2012 +0200 +++ b/Lib/test/test_http_cookies.py Wed May 23 21:09:05 2012 +0200 @@ -95,13 +95,13 @@ # loading 'expires' C = cookies.SimpleCookie() - C.load('Customer="W"; expires=Wed, 01 Jan 2010 00:00:00 GMT') + C.load('Customer="W"; expires=Wed, 01-Jan-2010 00:00:00 GMT') self.assertEqual(C['Customer']['expires'], - 'Wed, 01 Jan 2010 00:00:00 GMT') + 'Wed, 01-Jan-2010 00:00:00 GMT') C = cookies.SimpleCookie() - C.load('Customer="W"; expires=Wed, 01 Jan 98 00:00:00 GMT') + C.load('Customer="W"; expires=Wed, 01-Jan-98 00:00:00 GMT') self.assertEqual(C['Customer']['expires'], - 'Wed, 01 Jan 98 00:00:00 GMT') + 'Wed, 01-Jan-98 00:00:00 GMT') # 'max-age' C = cookies.SimpleCookie('Customer="WILE_E_COYOTE"') diff -r 0be296605165 -r faa88c50a3d2 Lib/test/test_httplib.py --- a/Lib/test/test_httplib.py Wed May 23 22:26:55 2012 +0200 +++ b/Lib/test/test_httplib.py Wed May 23 21:09:05 2012 +0200 @@ -99,34 +99,6 @@ conn.request('POST', '/', body, headers) self.assertEqual(conn._buffer.count[header.lower()], 1) - def test_content_length_0(self): - - class ContentLengthChecker(list): - def __init__(self): - list.__init__(self) - self.content_length = None - def append(self, item): - kv = item.split(b':', 1) - if len(kv) > 1 and kv[0].lower() == b'content-length': - self.content_length = kv[1].strip() - list.append(self, item) - - # POST with empty body - conn = client.HTTPConnection('example.com') - conn.sock = FakeSocket(None) - conn._buffer = ContentLengthChecker() - conn.request('POST', '/', '') - self.assertEqual(conn._buffer.content_length, b'0', - 'Header Content-Length not set') - - # PUT request with empty body - conn = client.HTTPConnection('example.com') - conn.sock = FakeSocket(None) - conn._buffer = ContentLengthChecker() - conn.request('PUT', '/', '') - self.assertEqual(conn._buffer.content_length, b'0', - 'Header Content-Length not set') - def test_putheader(self): conn = client.HTTPConnection('example.com') conn.sock = FakeSocket(None) diff -r 0be296605165 -r faa88c50a3d2 Lib/test/test_ipaddress.py --- a/Lib/test/test_ipaddress.py Wed May 23 22:26:55 2012 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,1160 +0,0 @@ -#!/usr/bin/python3 -# -# Copyright 2007 Google Inc. -# Licensed to PSF under a Contributor Agreement. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unittest for ipaddressmodule.""" - - -import unittest -import time -import ipaddress - -# Compatibility function to cast str to bytes objects -_cb = lambda bytestr: bytes(bytestr, 'charmap') - -class IpaddrUnitTest(unittest.TestCase): - - def setUp(self): - self.ipv4_address = ipaddress.IPv4Address('1.2.3.4') - self.ipv4_interface = ipaddress.IPv4Interface('1.2.3.4/24') - self.ipv4_network = ipaddress.IPv4Network('1.2.3.0/24') - #self.ipv4_hostmask = ipaddress.IPv4Interface('10.0.0.1/0.255.255.255') - self.ipv6_address = ipaddress.IPv6Interface( - '2001:658:22a:cafe:200:0:0:1') - self.ipv6_interface = ipaddress.IPv6Interface( - '2001:658:22a:cafe:200:0:0:1/64') - self.ipv6_network = ipaddress.IPv6Network('2001:658:22a:cafe::/64') - - def testRepr(self): - self.assertEqual("IPv4Interface('1.2.3.4/32')", - repr(ipaddress.IPv4Interface('1.2.3.4'))) - self.assertEqual("IPv6Interface('::1/128')", - repr(ipaddress.IPv6Interface('::1'))) - - # issue57 - def testAddressIntMath(self): - self.assertEqual(ipaddress.IPv4Address('1.1.1.1') + 255, - ipaddress.IPv4Address('1.1.2.0')) - self.assertEqual(ipaddress.IPv4Address('1.1.1.1') - 256, - ipaddress.IPv4Address('1.1.0.1')) - self.assertEqual(ipaddress.IPv6Address('::1') + (2**16 - 2), - ipaddress.IPv6Address('::ffff')) - self.assertEqual(ipaddress.IPv6Address('::ffff') - (2**16 - 2), - ipaddress.IPv6Address('::1')) - - def testInvalidStrings(self): - def AssertInvalidIP(ip_str): - self.assertRaises(ValueError, ipaddress.ip_address, ip_str) - AssertInvalidIP("") - AssertInvalidIP("016.016.016.016") - AssertInvalidIP("016.016.016") - AssertInvalidIP("016.016") - AssertInvalidIP("016") - AssertInvalidIP("000.000.000.000") - AssertInvalidIP("000") - AssertInvalidIP("0x0a.0x0a.0x0a.0x0a") - AssertInvalidIP("0x0a.0x0a.0x0a") - AssertInvalidIP("0x0a.0x0a") - AssertInvalidIP("0x0a") - AssertInvalidIP("42.42.42.42.42") - AssertInvalidIP("42.42.42") - AssertInvalidIP("42.42") - AssertInvalidIP("42") - AssertInvalidIP("42..42.42") - AssertInvalidIP("42..42.42.42") - AssertInvalidIP("42.42.42.42.") - AssertInvalidIP("42.42.42.42...") - AssertInvalidIP(".42.42.42.42") - AssertInvalidIP("...42.42.42.42") - AssertInvalidIP("42.42.42.-0") - AssertInvalidIP("42.42.42.+0") - AssertInvalidIP(".") - AssertInvalidIP("...") - AssertInvalidIP("bogus") - AssertInvalidIP("bogus.com") - AssertInvalidIP("192.168.0.1.com") - AssertInvalidIP("12345.67899.-54321.-98765") - AssertInvalidIP("257.0.0.0") - AssertInvalidIP("42.42.42.-42") - AssertInvalidIP("3ffe::1.net") - AssertInvalidIP("3ffe::1::1") - AssertInvalidIP("1::2::3::4:5") - AssertInvalidIP("::7:6:5:4:3:2:") - AssertInvalidIP(":6:5:4:3:2:1::") - AssertInvalidIP("2001::db:::1") - AssertInvalidIP("FEDC:9878") - AssertInvalidIP("+1.+2.+3.4") - AssertInvalidIP("1.2.3.4e0") - AssertInvalidIP("::7:6:5:4:3:2:1:0") - AssertInvalidIP("7:6:5:4:3:2:1:0::") - AssertInvalidIP("9:8:7:6:5:4:3::2:1") - AssertInvalidIP("0:1:2:3::4:5:6:7") - AssertInvalidIP("3ffe:0:0:0:0:0:0:0:1") - AssertInvalidIP("3ffe::10000") - AssertInvalidIP("3ffe::goog") - AssertInvalidIP("3ffe::-0") - AssertInvalidIP("3ffe::+0") - AssertInvalidIP("3ffe::-1") - AssertInvalidIP(":") - AssertInvalidIP(":::") - AssertInvalidIP("::1.2.3") - AssertInvalidIP("::1.2.3.4.5") - AssertInvalidIP("::1.2.3.4:") - AssertInvalidIP("1.2.3.4::") - AssertInvalidIP("2001:db8::1:") - AssertInvalidIP(":2001:db8::1") - AssertInvalidIP(":1:2:3:4:5:6:7") - AssertInvalidIP("1:2:3:4:5:6:7:") - AssertInvalidIP(":1:2:3:4:5:6:") - - self.assertRaises(ipaddress.AddressValueError, - ipaddress.IPv4Interface, '') - self.assertRaises(ipaddress.AddressValueError, ipaddress.IPv4Interface, - 'google.com') - self.assertRaises(ipaddress.AddressValueError, ipaddress.IPv4Interface, - '::1.2.3.4') - self.assertRaises(ipaddress.AddressValueError, - ipaddress.IPv6Interface, '') - self.assertRaises(ipaddress.AddressValueError, ipaddress.IPv6Interface, - 'google.com') - self.assertRaises(ipaddress.AddressValueError, ipaddress.IPv6Interface, - '1.2.3.4') - self.assertRaises(ipaddress.AddressValueError, ipaddress.IPv6Interface, - 'cafe:cafe::/128/190') - self.assertRaises(ipaddress.AddressValueError, ipaddress.IPv6Interface, - '1234:axy::b') - self.assertRaises(ipaddress.AddressValueError, ipaddress.IPv6Address, - '1234:axy::b') - self.assertRaises(ipaddress.AddressValueError, ipaddress.IPv6Address, - '2001:db8:::1') - self.assertRaises(ipaddress.AddressValueError, ipaddress.IPv6Address, - '2001:888888::1') - self.assertRaises(ipaddress.AddressValueError, - ipaddress.IPv4Address(1)._ip_int_from_string, - '1.a.2.3') - self.assertEqual(False, ipaddress.IPv4Interface(1)._is_hostmask( - '1.a.2.3')) - - def testGetNetwork(self): - self.assertEqual(int(self.ipv4_network.network_address), 16909056) - self.assertEqual(str(self.ipv4_network.network_address), '1.2.3.0') - - self.assertEqual(int(self.ipv6_network.network_address), - 42540616829182469433403647294022090752) - self.assertEqual(str(self.ipv6_network.network_address), - '2001:658:22a:cafe::') - self.assertEqual(str(self.ipv6_network.hostmask), - '::ffff:ffff:ffff:ffff') - - def testBadVersionComparison(self): - # These should always raise TypeError - v4addr = ipaddress.ip_address('1.1.1.1') - v4net = ipaddress.ip_network('1.1.1.1') - v6addr = ipaddress.ip_address('::1') - v6net = ipaddress.ip_address('::1') - - self.assertRaises(TypeError, v4addr.__lt__, v6addr) - self.assertRaises(TypeError, v4addr.__gt__, v6addr) - self.assertRaises(TypeError, v4net.__lt__, v6net) - self.assertRaises(TypeError, v4net.__gt__, v6net) - - self.assertRaises(TypeError, v6addr.__lt__, v4addr) - self.assertRaises(TypeError, v6addr.__gt__, v4addr) - self.assertRaises(TypeError, v6net.__lt__, v4net) - self.assertRaises(TypeError, v6net.__gt__, v4net) - - def testMixedTypeComparison(self): - v4addr = ipaddress.ip_address('1.1.1.1') - v4net = ipaddress.ip_network('1.1.1.1/32') - v6addr = ipaddress.ip_address('::1') - v6net = ipaddress.ip_network('::1/128') - - self.assertFalse(v4net.__contains__(v6net)) - self.assertFalse(v6net.__contains__(v4net)) - - self.assertRaises(TypeError, lambda: v4addr < v4net) - self.assertRaises(TypeError, lambda: v4addr > v4net) - self.assertRaises(TypeError, lambda: v4net < v4addr) - self.assertRaises(TypeError, lambda: v4net > v4addr) - - self.assertRaises(TypeError, lambda: v6addr < v6net) - self.assertRaises(TypeError, lambda: v6addr > v6net) - self.assertRaises(TypeError, lambda: v6net < v6addr) - self.assertRaises(TypeError, lambda: v6net > v6addr) - - # with get_mixed_type_key, you can sort addresses and network. - self.assertEqual([v4addr, v4net], - sorted([v4net, v4addr], - key=ipaddress.get_mixed_type_key)) - self.assertEqual([v6addr, v6net], - sorted([v6net, v6addr], - key=ipaddress.get_mixed_type_key)) - - def testIpFromInt(self): - self.assertEqual(self.ipv4_interface._ip, - ipaddress.IPv4Interface(16909060)._ip) - self.assertRaises(ipaddress.AddressValueError, - ipaddress.IPv4Interface, 2**32) - self.assertRaises(ipaddress.AddressValueError, - ipaddress.IPv4Interface, -1) - - ipv4 = ipaddress.ip_network('1.2.3.4') - ipv6 = ipaddress.ip_network('2001:658:22a:cafe:200:0:0:1') - self.assertEqual(ipv4, ipaddress.ip_network(int(ipv4))) - self.assertEqual(ipv6, ipaddress.ip_network(int(ipv6))) - - v6_int = 42540616829182469433547762482097946625 - self.assertEqual(self.ipv6_interface._ip, - ipaddress.IPv6Interface(v6_int)._ip) - self.assertRaises(ipaddress.AddressValueError, - ipaddress.IPv6Interface, 2**128) - self.assertRaises(ipaddress.AddressValueError, - ipaddress.IPv6Interface, -1) - - self.assertEqual(ipaddress.ip_network(self.ipv4_address._ip).version, 4) - self.assertEqual(ipaddress.ip_network(self.ipv6_address._ip).version, 6) - - def testIpFromPacked(self): - ip = ipaddress.ip_network - - self.assertEqual(self.ipv4_interface._ip, - ipaddress.ip_interface(_cb('\x01\x02\x03\x04'))._ip) - self.assertEqual(ip('255.254.253.252'), - ip(_cb('\xff\xfe\xfd\xfc'))) - self.assertRaises(ValueError, ipaddress.ip_network, _cb('\x00' * 3)) - self.assertRaises(ValueError, ipaddress.ip_network, _cb('\x00' * 5)) - self.assertEqual(self.ipv6_interface.ip, - ipaddress.ip_interface( - _cb('\x20\x01\x06\x58\x02\x2a\xca\xfe' - '\x02\x00\x00\x00\x00\x00\x00\x01')).ip) - self.assertEqual(ip('ffff:2:3:4:ffff::'), - ip(_cb('\xff\xff\x00\x02\x00\x03\x00\x04' + - '\xff\xff' + '\x00' * 6))) - self.assertEqual(ip('::'), - ip(_cb('\x00' * 16))) - self.assertRaises(ValueError, ip, _cb('\x00' * 15)) - self.assertRaises(ValueError, ip, _cb('\x00' * 17)) - - def testGetIp(self): - self.assertEqual(int(self.ipv4_interface.ip), 16909060) - self.assertEqual(str(self.ipv4_interface.ip), '1.2.3.4') - - self.assertEqual(int(self.ipv6_interface.ip), - 42540616829182469433547762482097946625) - self.assertEqual(str(self.ipv6_interface.ip), - '2001:658:22a:cafe:200::1') - - def testGetNetmask(self): - self.assertEqual(int(self.ipv4_network.netmask), 4294967040) - self.assertEqual(str(self.ipv4_network.netmask), '255.255.255.0') - self.assertEqual(int(self.ipv6_network.netmask), - 340282366920938463444927863358058659840) - self.assertEqual(self.ipv6_network.prefixlen, 64) - - def testZeroNetmask(self): - ipv4_zero_netmask = ipaddress.IPv4Interface('1.2.3.4/0') - self.assertEqual(int(ipv4_zero_netmask.network.netmask), 0) - self.assertTrue(ipv4_zero_netmask.network._is_valid_netmask( - str(0))) - - ipv6_zero_netmask = ipaddress.IPv6Interface('::1/0') - self.assertEqual(int(ipv6_zero_netmask.network.netmask), 0) - self.assertTrue(ipv6_zero_netmask.network._is_valid_netmask( - str(0))) - - def testGetBroadcast(self): - self.assertEqual(int(self.ipv4_network.broadcast_address), 16909311) - self.assertEqual(str(self.ipv4_network.broadcast_address), '1.2.3.255') - - self.assertEqual(int(self.ipv6_network.broadcast_address), - 42540616829182469451850391367731642367) - self.assertEqual(str(self.ipv6_network.broadcast_address), - '2001:658:22a:cafe:ffff:ffff:ffff:ffff') - - def testGetPrefixlen(self): - self.assertEqual(self.ipv4_interface.prefixlen, 24) - self.assertEqual(self.ipv6_interface.prefixlen, 64) - - def testGetSupernet(self): - self.assertEqual(self.ipv4_network.supernet().prefixlen, 23) - self.assertEqual(str(self.ipv4_network.supernet().network_address), - '1.2.2.0') - self.assertEqual( - ipaddress.IPv4Interface('0.0.0.0/0').network.supernet(), - ipaddress.IPv4Network('0.0.0.0/0')) - - self.assertEqual(self.ipv6_network.supernet().prefixlen, 63) - self.assertEqual(str(self.ipv6_network.supernet().network_address), - '2001:658:22a:cafe::') - self.assertEqual(ipaddress.IPv6Interface('::0/0').network.supernet(), - ipaddress.IPv6Network('::0/0')) - - def testGetSupernet3(self): - self.assertEqual(self.ipv4_network.supernet(3).prefixlen, 21) - self.assertEqual(str(self.ipv4_network.supernet(3).network_address), - '1.2.0.0') - - self.assertEqual(self.ipv6_network.supernet(3).prefixlen, 61) - self.assertEqual(str(self.ipv6_network.supernet(3).network_address), - '2001:658:22a:caf8::') - - def testGetSupernet4(self): - self.assertRaises(ValueError, self.ipv4_network.supernet, - prefixlen_diff=2, new_prefix=1) - self.assertRaises(ValueError, self.ipv4_network.supernet, new_prefix=25) - self.assertEqual(self.ipv4_network.supernet(prefixlen_diff=2), - self.ipv4_network.supernet(new_prefix=22)) - - self.assertRaises(ValueError, self.ipv6_network.supernet, - prefixlen_diff=2, new_prefix=1) - self.assertRaises(ValueError, self.ipv6_network.supernet, new_prefix=65) - self.assertEqual(self.ipv6_network.supernet(prefixlen_diff=2), - self.ipv6_network.supernet(new_prefix=62)) - - def testHosts(self): - self.assertEqual([ipaddress.IPv4Address('2.0.0.0'), - ipaddress.IPv4Address('2.0.0.1')], - list(ipaddress.ip_network('2.0.0.0/31').hosts())) - - def testFancySubnetting(self): - self.assertEqual(sorted(self.ipv4_network.subnets(prefixlen_diff=3)), - sorted(self.ipv4_network.subnets(new_prefix=27))) - self.assertRaises(ValueError, list, - self.ipv4_network.subnets(new_prefix=23)) - self.assertRaises(ValueError, list, - self.ipv4_network.subnets(prefixlen_diff=3, - new_prefix=27)) - self.assertEqual(sorted(self.ipv6_network.subnets(prefixlen_diff=4)), - sorted(self.ipv6_network.subnets(new_prefix=68))) - self.assertRaises(ValueError, list, - self.ipv6_network.subnets(new_prefix=63)) - self.assertRaises(ValueError, list, - self.ipv6_network.subnets(prefixlen_diff=4, - new_prefix=68)) - - def testGetSubnets(self): - self.assertEqual(list(self.ipv4_network.subnets())[0].prefixlen, 25) - self.assertEqual(str(list( - self.ipv4_network.subnets())[0].network_address), - '1.2.3.0') - self.assertEqual(str(list( - self.ipv4_network.subnets())[1].network_address), - '1.2.3.128') - - self.assertEqual(list(self.ipv6_network.subnets())[0].prefixlen, 65) - - def testGetSubnetForSingle32(self): - ip = ipaddress.IPv4Network('1.2.3.4/32') - subnets1 = [str(x) for x in ip.subnets()] - subnets2 = [str(x) for x in ip.subnets(2)] - self.assertEqual(subnets1, ['1.2.3.4/32']) - self.assertEqual(subnets1, subnets2) - - def testGetSubnetForSingle128(self): - ip = ipaddress.IPv6Network('::1/128') - subnets1 = [str(x) for x in ip.subnets()] - subnets2 = [str(x) for x in ip.subnets(2)] - self.assertEqual(subnets1, ['::1/128']) - self.assertEqual(subnets1, subnets2) - - def testSubnet2(self): - ips = [str(x) for x in self.ipv4_network.subnets(2)] - self.assertEqual( - ips, - ['1.2.3.0/26', '1.2.3.64/26', '1.2.3.128/26', '1.2.3.192/26']) - - ipsv6 = [str(x) for x in self.ipv6_network.subnets(2)] - self.assertEqual( - ipsv6, - ['2001:658:22a:cafe::/66', - '2001:658:22a:cafe:4000::/66', - '2001:658:22a:cafe:8000::/66', - '2001:658:22a:cafe:c000::/66']) - - def testSubnetFailsForLargeCidrDiff(self): - self.assertRaises(ValueError, list, - self.ipv4_interface.network.subnets(9)) - self.assertRaises(ValueError, list, - self.ipv4_network.subnets(9)) - self.assertRaises(ValueError, list, - self.ipv6_interface.network.subnets(65)) - self.assertRaises(ValueError, list, - self.ipv6_network.subnets(65)) - - def testSupernetFailsForLargeCidrDiff(self): - self.assertRaises(ValueError, - self.ipv4_interface.network.supernet, 25) - self.assertRaises(ValueError, - self.ipv6_interface.network.supernet, 65) - - def testSubnetFailsForNegativeCidrDiff(self): - self.assertRaises(ValueError, list, - self.ipv4_interface.network.subnets(-1)) - self.assertRaises(ValueError, list, - self.ipv4_network.network.subnets(-1)) - self.assertRaises(ValueError, list, - self.ipv6_interface.network.subnets(-1)) - self.assertRaises(ValueError, list, - self.ipv6_network.subnets(-1)) - - def testGetNum_Addresses(self): - self.assertEqual(self.ipv4_network.num_addresses, 256) - self.assertEqual(list(self.ipv4_network.subnets())[0].num_addresses, 128) - self.assertEqual(self.ipv4_network.supernet().num_addresses, 512) - - self.assertEqual(self.ipv6_network.num_addresses, 18446744073709551616) - self.assertEqual(list(self.ipv6_network.subnets())[0].num_addresses, - 9223372036854775808) - self.assertEqual(self.ipv6_network.supernet().num_addresses, - 36893488147419103232) - - def testContains(self): - self.assertTrue(ipaddress.IPv4Interface('1.2.3.128/25') in - self.ipv4_network) - self.assertFalse(ipaddress.IPv4Interface('1.2.4.1/24') in - self.ipv4_network) - # We can test addresses and string as well. - addr1 = ipaddress.IPv4Address('1.2.3.37') - self.assertTrue(addr1 in self.ipv4_network) - # issue 61, bad network comparison on like-ip'd network objects - # with identical broadcast addresses. - self.assertFalse(ipaddress.IPv4Network('1.1.0.0/16').__contains__( - ipaddress.IPv4Network('1.0.0.0/15'))) - - def testBadAddress(self): - self.assertRaises(ipaddress.AddressValueError, ipaddress.IPv4Interface, - 'poop') - self.assertRaises(ipaddress.AddressValueError, - ipaddress.IPv4Interface, '1.2.3.256') - - self.assertRaises(ipaddress.AddressValueError, ipaddress.IPv6Interface, - 'poopv6') - self.assertRaises(ipaddress.AddressValueError, - ipaddress.IPv4Interface, '1.2.3.4/32/24') - self.assertRaises(ipaddress.AddressValueError, - ipaddress.IPv4Interface, '10/8') - self.assertRaises(ipaddress.AddressValueError, - ipaddress.IPv6Interface, '10/8') - - - def testBadNetMask(self): - self.assertRaises(ipaddress.NetmaskValueError, - ipaddress.IPv4Interface, '1.2.3.4/') - self.assertRaises(ipaddress.NetmaskValueError, - ipaddress.IPv4Interface, '1.2.3.4/33') - self.assertRaises(ipaddress.NetmaskValueError, - ipaddress.IPv4Interface, '1.2.3.4/254.254.255.256') - self.assertRaises(ipaddress.NetmaskValueError, - ipaddress.IPv4Interface, '1.1.1.1/240.255.0.0') - self.assertRaises(ipaddress.NetmaskValueError, - ipaddress.IPv6Interface, '::1/') - self.assertRaises(ipaddress.NetmaskValueError, - ipaddress.IPv6Interface, '::1/129') - - def testNth(self): - self.assertEqual(str(self.ipv4_network[5]), '1.2.3.5') - self.assertRaises(IndexError, self.ipv4_network.__getitem__, 256) - - self.assertEqual(str(self.ipv6_network[5]), - '2001:658:22a:cafe::5') - - def testGetitem(self): - # http://code.google.com/p/ipaddr-py/issues/detail?id=15 - addr = ipaddress.IPv4Network('172.31.255.128/255.255.255.240') - self.assertEqual(28, addr.prefixlen) - addr_list = list(addr) - self.assertEqual('172.31.255.128', str(addr_list[0])) - self.assertEqual('172.31.255.128', str(addr[0])) - self.assertEqual('172.31.255.143', str(addr_list[-1])) - self.assertEqual('172.31.255.143', str(addr[-1])) - self.assertEqual(addr_list[-1], addr[-1]) - - def testEqual(self): - self.assertTrue(self.ipv4_interface == - ipaddress.IPv4Interface('1.2.3.4/24')) - self.assertFalse(self.ipv4_interface == - ipaddress.IPv4Interface('1.2.3.4/23')) - self.assertFalse(self.ipv4_interface == - ipaddress.IPv6Interface('::1.2.3.4/24')) - self.assertFalse(self.ipv4_interface == '') - self.assertFalse(self.ipv4_interface == []) - self.assertFalse(self.ipv4_interface == 2) - - self.assertTrue(self.ipv6_interface == - ipaddress.IPv6Interface('2001:658:22a:cafe:200::1/64')) - self.assertFalse(self.ipv6_interface == - ipaddress.IPv6Interface('2001:658:22a:cafe:200::1/63')) - self.assertFalse(self.ipv6_interface == - ipaddress.IPv4Interface('1.2.3.4/23')) - self.assertFalse(self.ipv6_interface == '') - self.assertFalse(self.ipv6_interface == []) - self.assertFalse(self.ipv6_interface == 2) - - def testNotEqual(self): - self.assertFalse(self.ipv4_interface != - ipaddress.IPv4Interface('1.2.3.4/24')) - self.assertTrue(self.ipv4_interface != - ipaddress.IPv4Interface('1.2.3.4/23')) - self.assertTrue(self.ipv4_interface != - ipaddress.IPv6Interface('::1.2.3.4/24')) - self.assertTrue(self.ipv4_interface != '') - self.assertTrue(self.ipv4_interface != []) - self.assertTrue(self.ipv4_interface != 2) - - self.assertTrue(self.ipv4_address != - ipaddress.IPv4Address('1.2.3.5')) - self.assertTrue(self.ipv4_address != '') - self.assertTrue(self.ipv4_address != []) - self.assertTrue(self.ipv4_address != 2) - - self.assertFalse(self.ipv6_interface != - ipaddress.IPv6Interface('2001:658:22a:cafe:200::1/64')) - self.assertTrue(self.ipv6_interface != - ipaddress.IPv6Interface('2001:658:22a:cafe:200::1/63')) - self.assertTrue(self.ipv6_interface != - ipaddress.IPv4Interface('1.2.3.4/23')) - self.assertTrue(self.ipv6_interface != '') - self.assertTrue(self.ipv6_interface != []) - self.assertTrue(self.ipv6_interface != 2) - - self.assertTrue(self.ipv6_address != - ipaddress.IPv4Address('1.2.3.4')) - self.assertTrue(self.ipv6_address != '') - self.assertTrue(self.ipv6_address != []) - self.assertTrue(self.ipv6_address != 2) - - def testSlash32Constructor(self): - self.assertEqual(str(ipaddress.IPv4Interface( - '1.2.3.4/255.255.255.255')), '1.2.3.4/32') - - def testSlash128Constructor(self): - self.assertEqual(str(ipaddress.IPv6Interface('::1/128')), - '::1/128') - - def testSlash0Constructor(self): - self.assertEqual(str(ipaddress.IPv4Interface('1.2.3.4/0.0.0.0')), - '1.2.3.4/0') - - def testCollapsing(self): - # test only IP addresses including some duplicates - ip1 = ipaddress.IPv4Address('1.1.1.0') - ip2 = ipaddress.IPv4Address('1.1.1.1') - ip3 = ipaddress.IPv4Address('1.1.1.2') - ip4 = ipaddress.IPv4Address('1.1.1.3') - ip5 = ipaddress.IPv4Address('1.1.1.4') - ip6 = ipaddress.IPv4Address('1.1.1.0') - # check that addreses are subsumed properly. - collapsed = ipaddress.collapse_addresses( - [ip1, ip2, ip3, ip4, ip5, ip6]) - self.assertEqual(list(collapsed), [ipaddress.IPv4Network('1.1.1.0/30'), - ipaddress.IPv4Network('1.1.1.4/32')]) - - # test a mix of IP addresses and networks including some duplicates - ip1 = ipaddress.IPv4Address('1.1.1.0') - ip2 = ipaddress.IPv4Address('1.1.1.1') - ip3 = ipaddress.IPv4Address('1.1.1.2') - ip4 = ipaddress.IPv4Address('1.1.1.3') - #ip5 = ipaddress.IPv4Interface('1.1.1.4/30') - #ip6 = ipaddress.IPv4Interface('1.1.1.4/30') - # check that addreses are subsumed properly. - collapsed = ipaddress.collapse_addresses([ip1, ip2, ip3, ip4]) - self.assertEqual(list(collapsed), [ipaddress.IPv4Network('1.1.1.0/30')]) - - # test only IP networks - ip1 = ipaddress.IPv4Network('1.1.0.0/24') - ip2 = ipaddress.IPv4Network('1.1.1.0/24') - ip3 = ipaddress.IPv4Network('1.1.2.0/24') - ip4 = ipaddress.IPv4Network('1.1.3.0/24') - ip5 = ipaddress.IPv4Network('1.1.4.0/24') - # stored in no particular order b/c we want CollapseAddr to call [].sort - ip6 = ipaddress.IPv4Network('1.1.0.0/22') - # check that addreses are subsumed properly. - collapsed = ipaddress.collapse_addresses([ip1, ip2, ip3, ip4, ip5, - ip6]) - self.assertEqual(list(collapsed), [ipaddress.IPv4Network('1.1.0.0/22'), - ipaddress.IPv4Network('1.1.4.0/24')]) - - # test that two addresses are supernet'ed properly - collapsed = ipaddress.collapse_addresses([ip1, ip2]) - self.assertEqual(list(collapsed), [ipaddress.IPv4Network('1.1.0.0/23')]) - - # test same IP networks - ip_same1 = ip_same2 = ipaddress.IPv4Network('1.1.1.1/32') - self.assertEqual(list(ipaddress.collapse_addresses( - [ip_same1, ip_same2])), - [ip_same1]) - - # test same IP addresses - ip_same1 = ip_same2 = ipaddress.IPv4Address('1.1.1.1') - self.assertEqual(list(ipaddress.collapse_addresses( - [ip_same1, ip_same2])), - [ipaddress.ip_network('1.1.1.1/32')]) - ip1 = ipaddress.IPv6Network('2001::/100') - ip2 = ipaddress.IPv6Network('2001::/120') - ip3 = ipaddress.IPv6Network('2001::/96') - # test that ipv6 addresses are subsumed properly. - collapsed = ipaddress.collapse_addresses([ip1, ip2, ip3]) - self.assertEqual(list(collapsed), [ip3]) - - # the toejam test - ip1 = ipaddress.ip_address('1.1.1.1') - ip2 = ipaddress.ip_address('::1') - self.assertRaises(TypeError, ipaddress.collapse_addresses, - [ip1, ip2]) - - def testSummarizing(self): - #ip = ipaddress.ip_address - #ipnet = ipaddress.ip_network - summarize = ipaddress.summarize_address_range - ip1 = ipaddress.ip_address('1.1.1.0') - ip2 = ipaddress.ip_address('1.1.1.255') - # test a /24 is sumamrized properly - self.assertEqual(list(summarize(ip1, ip2))[0], - ipaddress.ip_network('1.1.1.0/24')) - # test an IPv4 range that isn't on a network byte boundary - ip2 = ipaddress.ip_address('1.1.1.8') - self.assertEqual(list(summarize(ip1, ip2)), - [ipaddress.ip_network('1.1.1.0/29'), - ipaddress.ip_network('1.1.1.8')]) - - ip1 = ipaddress.ip_address('1::') - ip2 = ipaddress.ip_address('1:ffff:ffff:ffff:ffff:ffff:ffff:ffff') - # test a IPv6 is sumamrized properly - self.assertEqual(list(summarize(ip1, ip2))[0], - ipaddress.ip_network('1::/16')) - # test an IPv6 range that isn't on a network byte boundary - ip2 = ipaddress.ip_address('2::') - self.assertEqual(list(summarize(ip1, ip2)), - [ipaddress.ip_network('1::/16'), - ipaddress.ip_network('2::/128')]) - - # test exception raised when first is greater than last - self.assertRaises(ValueError, list, - summarize(ipaddress.ip_address('1.1.1.0'), - ipaddress.ip_address('1.1.0.0'))) - # test exception raised when first and last aren't IP addresses - self.assertRaises(TypeError, list, - summarize(ipaddress.ip_network('1.1.1.0'), - ipaddress.ip_network('1.1.0.0'))) - self.assertRaises(TypeError, list, - summarize(ipaddress.ip_network('1.1.1.0'), - ipaddress.ip_network('1.1.0.0'))) - # test exception raised when first and last are not same version - self.assertRaises(TypeError, list, - summarize(ipaddress.ip_address('::'), - ipaddress.ip_network('1.1.0.0'))) - - def testAddressComparison(self): - self.assertTrue(ipaddress.ip_address('1.1.1.1') <= - ipaddress.ip_address('1.1.1.1')) - self.assertTrue(ipaddress.ip_address('1.1.1.1') <= - ipaddress.ip_address('1.1.1.2')) - self.assertTrue(ipaddress.ip_address('::1') <= - ipaddress.ip_address('::1')) - self.assertTrue(ipaddress.ip_address('::1') <= - ipaddress.ip_address('::2')) - - def testNetworkComparison(self): - # ip1 and ip2 have the same network address - ip1 = ipaddress.IPv4Network('1.1.1.0/24') - ip2 = ipaddress.IPv4Network('1.1.1.1/32') - ip3 = ipaddress.IPv4Network('1.1.2.0/24') - - self.assertTrue(ip1 < ip3) - self.assertTrue(ip3 > ip2) - - #self.assertEqual(ip1.compare_networks(ip2), 0) - #self.assertTrue(ip1._get_networks_key() == ip2._get_networks_key()) - self.assertEqual(ip1.compare_networks(ip3), -1) - self.assertTrue(ip1._get_networks_key() < ip3._get_networks_key()) - - ip1 = ipaddress.IPv6Network('2001:2000::/96') - ip2 = ipaddress.IPv6Network('2001:2001::/96') - ip3 = ipaddress.IPv6Network('2001:ffff:2000::/96') - - self.assertTrue(ip1 < ip3) - self.assertTrue(ip3 > ip2) - self.assertEqual(ip1.compare_networks(ip3), -1) - self.assertTrue(ip1._get_networks_key() < ip3._get_networks_key()) - - # Test comparing different protocols. - # Should always raise a TypeError. - ipv6 = ipaddress.IPv6Interface('::/0') - ipv4 = ipaddress.IPv4Interface('0.0.0.0/0') - self.assertRaises(TypeError, ipv4.__lt__, ipv6) - self.assertRaises(TypeError, ipv4.__gt__, ipv6) - self.assertRaises(TypeError, ipv6.__lt__, ipv4) - self.assertRaises(TypeError, ipv6.__gt__, ipv4) - - # Regression test for issue 19. - ip1 = ipaddress.ip_network('10.1.2.128/25') - self.assertFalse(ip1 < ip1) - self.assertFalse(ip1 > ip1) - ip2 = ipaddress.ip_network('10.1.3.0/24') - self.assertTrue(ip1 < ip2) - self.assertFalse(ip2 < ip1) - self.assertFalse(ip1 > ip2) - self.assertTrue(ip2 > ip1) - ip3 = ipaddress.ip_network('10.1.3.0/25') - self.assertTrue(ip2 < ip3) - self.assertFalse(ip3 < ip2) - self.assertFalse(ip2 > ip3) - self.assertTrue(ip3 > ip2) - - # Regression test for issue 28. - ip1 = ipaddress.ip_network('10.10.10.0/31') - ip2 = ipaddress.ip_network('10.10.10.0') - ip3 = ipaddress.ip_network('10.10.10.2/31') - ip4 = ipaddress.ip_network('10.10.10.2') - sorted = [ip1, ip2, ip3, ip4] - unsorted = [ip2, ip4, ip1, ip3] - unsorted.sort() - self.assertEqual(sorted, unsorted) - unsorted = [ip4, ip1, ip3, ip2] - unsorted.sort() - self.assertEqual(sorted, unsorted) - self.assertRaises(TypeError, ip1.__lt__, - ipaddress.ip_address('10.10.10.0')) - self.assertRaises(TypeError, ip2.__lt__, - ipaddress.ip_address('10.10.10.0')) - - # <=, >= - self.assertTrue(ipaddress.ip_network('1.1.1.1') <= - ipaddress.ip_network('1.1.1.1')) - self.assertTrue(ipaddress.ip_network('1.1.1.1') <= - ipaddress.ip_network('1.1.1.2')) - self.assertFalse(ipaddress.ip_network('1.1.1.2') <= - ipaddress.ip_network('1.1.1.1')) - self.assertTrue(ipaddress.ip_network('::1') <= - ipaddress.ip_network('::1')) - self.assertTrue(ipaddress.ip_network('::1') <= - ipaddress.ip_network('::2')) - self.assertFalse(ipaddress.ip_network('::2') <= - ipaddress.ip_network('::1')) - - def testStrictNetworks(self): - self.assertRaises(ValueError, ipaddress.ip_network, '192.168.1.1/24') - self.assertRaises(ValueError, ipaddress.ip_network, '::1/120') - - def testOverlaps(self): - other = ipaddress.IPv4Network('1.2.3.0/30') - other2 = ipaddress.IPv4Network('1.2.2.0/24') - other3 = ipaddress.IPv4Network('1.2.2.64/26') - self.assertTrue(self.ipv4_network.overlaps(other)) - self.assertFalse(self.ipv4_network.overlaps(other2)) - self.assertTrue(other2.overlaps(other3)) - - def testEmbeddedIpv4(self): - ipv4_string = '192.168.0.1' - ipv4 = ipaddress.IPv4Interface(ipv4_string) - v4compat_ipv6 = ipaddress.IPv6Interface('::%s' % ipv4_string) - self.assertEqual(int(v4compat_ipv6.ip), int(ipv4.ip)) - v4mapped_ipv6 = ipaddress.IPv6Interface('::ffff:%s' % ipv4_string) - self.assertNotEqual(v4mapped_ipv6.ip, ipv4.ip) - self.assertRaises(ipaddress.AddressValueError, ipaddress.IPv6Interface, - '2001:1.1.1.1:1.1.1.1') - - # Issue 67: IPv6 with embedded IPv4 address not recognized. - def testIPv6AddressTooLarge(self): - # RFC4291 2.5.5.2 - self.assertEqual(ipaddress.ip_address('::FFFF:192.0.2.1'), - ipaddress.ip_address('::FFFF:c000:201')) - # RFC4291 2.2 (part 3) x::d.d.d.d - self.assertEqual(ipaddress.ip_address('FFFF::192.0.2.1'), - ipaddress.ip_address('FFFF::c000:201')) - - def testIPVersion(self): - self.assertEqual(self.ipv4_address.version, 4) - self.assertEqual(self.ipv6_address.version, 6) - - with self.assertRaises(ValueError): - ipaddress.ip_address('1', version=[]) - - with self.assertRaises(ValueError): - ipaddress.ip_address('1', version=5) - - def testMaxPrefixLength(self): - self.assertEqual(self.ipv4_interface.max_prefixlen, 32) - self.assertEqual(self.ipv6_interface.max_prefixlen, 128) - - def testPacked(self): - self.assertEqual(self.ipv4_address.packed, - _cb('\x01\x02\x03\x04')) - self.assertEqual(ipaddress.IPv4Interface('255.254.253.252').packed, - _cb('\xff\xfe\xfd\xfc')) - self.assertEqual(self.ipv6_address.packed, - _cb('\x20\x01\x06\x58\x02\x2a\xca\xfe' - '\x02\x00\x00\x00\x00\x00\x00\x01')) - self.assertEqual(ipaddress.IPv6Interface('ffff:2:3:4:ffff::').packed, - _cb('\xff\xff\x00\x02\x00\x03\x00\x04\xff\xff' - + '\x00' * 6)) - self.assertEqual(ipaddress.IPv6Interface('::1:0:0:0:0').packed, - _cb('\x00' * 6 + '\x00\x01' + '\x00' * 8)) - - def testIpStrFromPrefixlen(self): - ipv4 = ipaddress.IPv4Interface('1.2.3.4/24') - self.assertEqual(ipv4._ip_string_from_prefix(), '255.255.255.0') - self.assertEqual(ipv4._ip_string_from_prefix(28), '255.255.255.240') - - def testIpType(self): - ipv4net = ipaddress.ip_network('1.2.3.4') - ipv4addr = ipaddress.ip_address('1.2.3.4') - ipv6net = ipaddress.ip_network('::1.2.3.4') - ipv6addr = ipaddress.ip_address('::1.2.3.4') - self.assertEqual(ipaddress.IPv4Network, type(ipv4net)) - self.assertEqual(ipaddress.IPv4Address, type(ipv4addr)) - self.assertEqual(ipaddress.IPv6Network, type(ipv6net)) - self.assertEqual(ipaddress.IPv6Address, type(ipv6addr)) - - def testReservedIpv4(self): - # test networks - self.assertEqual(True, ipaddress.ip_interface( - '224.1.1.1/31').is_multicast) - self.assertEqual(False, ipaddress.ip_network('240.0.0.0').is_multicast) - - self.assertEqual(True, ipaddress.ip_interface( - '192.168.1.1/17').is_private) - self.assertEqual(False, ipaddress.ip_network('192.169.0.0').is_private) - self.assertEqual(True, ipaddress.ip_network( - '10.255.255.255').is_private) - self.assertEqual(False, ipaddress.ip_network('11.0.0.0').is_private) - self.assertEqual(True, ipaddress.ip_network( - '172.31.255.255').is_private) - self.assertEqual(False, ipaddress.ip_network('172.32.0.0').is_private) - - self.assertEqual(True, - ipaddress.ip_interface( - '169.254.100.200/24').is_link_local) - self.assertEqual(False, - ipaddress.ip_interface( - '169.255.100.200/24').is_link_local) - - self.assertEqual(True, - ipaddress.ip_network( - '127.100.200.254/32').is_loopback) - self.assertEqual(True, ipaddress.ip_network( - '127.42.0.0/16').is_loopback) - self.assertEqual(False, ipaddress.ip_network('128.0.0.0').is_loopback) - - # test addresses - self.assertEqual(True, ipaddress.ip_address('224.1.1.1').is_multicast) - self.assertEqual(False, ipaddress.ip_address('240.0.0.0').is_multicast) - - self.assertEqual(True, ipaddress.ip_address('192.168.1.1').is_private) - self.assertEqual(False, ipaddress.ip_address('192.169.0.0').is_private) - self.assertEqual(True, ipaddress.ip_address( - '10.255.255.255').is_private) - self.assertEqual(False, ipaddress.ip_address('11.0.0.0').is_private) - self.assertEqual(True, ipaddress.ip_address( - '172.31.255.255').is_private) - self.assertEqual(False, ipaddress.ip_address('172.32.0.0').is_private) - - self.assertEqual(True, - ipaddress.ip_address('169.254.100.200').is_link_local) - self.assertEqual(False, - ipaddress.ip_address('169.255.100.200').is_link_local) - - self.assertEqual(True, - ipaddress.ip_address('127.100.200.254').is_loopback) - self.assertEqual(True, ipaddress.ip_address('127.42.0.0').is_loopback) - self.assertEqual(False, ipaddress.ip_address('128.0.0.0').is_loopback) - self.assertEqual(True, ipaddress.ip_network('0.0.0.0').is_unspecified) - - def testReservedIpv6(self): - - self.assertEqual(True, ipaddress.ip_network('ffff::').is_multicast) - self.assertEqual(True, ipaddress.ip_network(2**128-1).is_multicast) - self.assertEqual(True, ipaddress.ip_network('ff00::').is_multicast) - self.assertEqual(False, ipaddress.ip_network('fdff::').is_multicast) - - self.assertEqual(True, ipaddress.ip_network('fecf::').is_site_local) - self.assertEqual(True, ipaddress.ip_network( - 'feff:ffff:ffff:ffff::').is_site_local) - self.assertEqual(False, ipaddress.ip_network( - 'fbf:ffff::').is_site_local) - self.assertEqual(False, ipaddress.ip_network('ff00::').is_site_local) - - self.assertEqual(True, ipaddress.ip_network('fc00::').is_private) - self.assertEqual(True, ipaddress.ip_network( - 'fc00:ffff:ffff:ffff::').is_private) - self.assertEqual(False, ipaddress.ip_network('fbff:ffff::').is_private) - self.assertEqual(False, ipaddress.ip_network('fe00::').is_private) - - self.assertEqual(True, ipaddress.ip_network('fea0::').is_link_local) - self.assertEqual(True, ipaddress.ip_network( - 'febf:ffff::').is_link_local) - self.assertEqual(False, ipaddress.ip_network( - 'fe7f:ffff::').is_link_local) - self.assertEqual(False, ipaddress.ip_network('fec0::').is_link_local) - - self.assertEqual(True, ipaddress.ip_interface('0:0::0:01').is_loopback) - self.assertEqual(False, ipaddress.ip_interface('::1/127').is_loopback) - self.assertEqual(False, ipaddress.ip_network('::').is_loopback) - self.assertEqual(False, ipaddress.ip_network('::2').is_loopback) - - self.assertEqual(True, ipaddress.ip_network('0::0').is_unspecified) - self.assertEqual(False, ipaddress.ip_network('::1').is_unspecified) - self.assertEqual(False, ipaddress.ip_network('::/127').is_unspecified) - - # test addresses - self.assertEqual(True, ipaddress.ip_address('ffff::').is_multicast) - self.assertEqual(True, ipaddress.ip_address(2**128-1).is_multicast) - self.assertEqual(True, ipaddress.ip_address('ff00::').is_multicast) - self.assertEqual(False, ipaddress.ip_address('fdff::').is_multicast) - - self.assertEqual(True, ipaddress.ip_address('fecf::').is_site_local) - self.assertEqual(True, ipaddress.ip_address( - 'feff:ffff:ffff:ffff::').is_site_local) - self.assertEqual(False, ipaddress.ip_address( - 'fbf:ffff::').is_site_local) - self.assertEqual(False, ipaddress.ip_address('ff00::').is_site_local) - - self.assertEqual(True, ipaddress.ip_address('fc00::').is_private) - self.assertEqual(True, ipaddress.ip_address( - 'fc00:ffff:ffff:ffff::').is_private) - self.assertEqual(False, ipaddress.ip_address('fbff:ffff::').is_private) - self.assertEqual(False, ipaddress.ip_address('fe00::').is_private) - - self.assertEqual(True, ipaddress.ip_address('fea0::').is_link_local) - self.assertEqual(True, ipaddress.ip_address( - 'febf:ffff::').is_link_local) - self.assertEqual(False, ipaddress.ip_address( - 'fe7f:ffff::').is_link_local) - self.assertEqual(False, ipaddress.ip_address('fec0::').is_link_local) - - self.assertEqual(True, ipaddress.ip_address('0:0::0:01').is_loopback) - self.assertEqual(True, ipaddress.ip_address('::1').is_loopback) - self.assertEqual(False, ipaddress.ip_address('::2').is_loopback) - - self.assertEqual(True, ipaddress.ip_address('0::0').is_unspecified) - self.assertEqual(False, ipaddress.ip_address('::1').is_unspecified) - - # some generic IETF reserved addresses - self.assertEqual(True, ipaddress.ip_address('100::').is_reserved) - self.assertEqual(True, ipaddress.ip_network('4000::1/128').is_reserved) - - def testIpv4Mapped(self): - self.assertEqual(ipaddress.ip_address('::ffff:192.168.1.1').ipv4_mapped, - ipaddress.ip_address('192.168.1.1')) - self.assertEqual(ipaddress.ip_address('::c0a8:101').ipv4_mapped, None) - self.assertEqual(ipaddress.ip_address('::ffff:c0a8:101').ipv4_mapped, - ipaddress.ip_address('192.168.1.1')) - - def testAddrExclude(self): - addr1 = ipaddress.ip_network('10.1.1.0/24') - addr2 = ipaddress.ip_network('10.1.1.0/26') - addr3 = ipaddress.ip_network('10.2.1.0/24') - addr4 = ipaddress.ip_address('10.1.1.0') - self.assertEqual(sorted(list(addr1.address_exclude(addr2))), - [ipaddress.ip_network('10.1.1.64/26'), - ipaddress.ip_network('10.1.1.128/25')]) - self.assertRaises(ValueError, list, addr1.address_exclude(addr3)) - self.assertRaises(TypeError, list, addr1.address_exclude(addr4)) - self.assertEqual(list(addr1.address_exclude(addr1)), []) - - def testHash(self): - self.assertEqual(hash(ipaddress.ip_network('10.1.1.0/24')), - hash(ipaddress.ip_network('10.1.1.0/24'))) - self.assertEqual(hash(ipaddress.ip_address('10.1.1.0')), - hash(ipaddress.ip_address('10.1.1.0'))) - # i70 - self.assertEqual(hash(ipaddress.ip_address('1.2.3.4')), - hash(ipaddress.ip_address( - int(ipaddress.ip_address('1.2.3.4')._ip)))) - ip1 = ipaddress.ip_address('10.1.1.0') - ip2 = ipaddress.ip_address('1::') - dummy = {} - dummy[self.ipv4_address] = None - dummy[self.ipv6_address] = None - dummy[ip1] = None - dummy[ip2] = None - self.assertTrue(self.ipv4_address in dummy) - self.assertTrue(ip2 in dummy) - - def testCopyConstructor(self): - addr1 = ipaddress.ip_network('10.1.1.0/24') - addr2 = ipaddress.ip_network(addr1) - addr3 = ipaddress.ip_interface('2001:658:22a:cafe:200::1/64') - addr4 = ipaddress.ip_interface(addr3) - addr5 = ipaddress.IPv4Address('1.1.1.1') - addr6 = ipaddress.IPv6Address('2001:658:22a:cafe:200::1') - - self.assertEqual(addr1, addr2) - self.assertEqual(addr3, addr4) - self.assertEqual(addr5, ipaddress.IPv4Address(addr5)) - self.assertEqual(addr6, ipaddress.IPv6Address(addr6)) - - def testCompressIPv6Address(self): - test_addresses = { - '1:2:3:4:5:6:7:8': '1:2:3:4:5:6:7:8/128', - '2001:0:0:4:0:0:0:8': '2001:0:0:4::8/128', - '2001:0:0:4:5:6:7:8': '2001::4:5:6:7:8/128', - '2001:0:3:4:5:6:7:8': '2001:0:3:4:5:6:7:8/128', - '2001:0:3:4:5:6:7:8': '2001:0:3:4:5:6:7:8/128', - '0:0:3:0:0:0:0:ffff': '0:0:3::ffff/128', - '0:0:0:4:0:0:0:ffff': '::4:0:0:0:ffff/128', - '0:0:0:0:5:0:0:ffff': '::5:0:0:ffff/128', - '1:0:0:4:0:0:7:8': '1::4:0:0:7:8/128', - '0:0:0:0:0:0:0:0': '::/128', - '0:0:0:0:0:0:0:0/0': '::/0', - '0:0:0:0:0:0:0:1': '::1/128', - '2001:0658:022a:cafe:0000:0000:0000:0000/66': - '2001:658:22a:cafe::/66', - '::1.2.3.4': '::102:304/128', - '1:2:3:4:5:ffff:1.2.3.4': '1:2:3:4:5:ffff:102:304/128', - '::7:6:5:4:3:2:1': '0:7:6:5:4:3:2:1/128', - '::7:6:5:4:3:2:0': '0:7:6:5:4:3:2:0/128', - '7:6:5:4:3:2:1::': '7:6:5:4:3:2:1:0/128', - '0:6:5:4:3:2:1::': '0:6:5:4:3:2:1:0/128', - } - for uncompressed, compressed in list(test_addresses.items()): - self.assertEqual(compressed, str(ipaddress.IPv6Interface( - uncompressed))) - - def testExplodeShortHandIpStr(self): - addr1 = ipaddress.IPv6Interface('2001::1') - addr2 = ipaddress.IPv6Address('2001:0:5ef5:79fd:0:59d:a0e5:ba1') - addr3 = ipaddress.IPv6Network('2001::/96') - self.assertEqual('2001:0000:0000:0000:0000:0000:0000:0001/128', - addr1.exploded) - self.assertEqual('0000:0000:0000:0000:0000:0000:0000:0001/128', - ipaddress.IPv6Interface('::1/128').exploded) - # issue 77 - self.assertEqual('2001:0000:5ef5:79fd:0000:059d:a0e5:0ba1', - addr2.exploded) - self.assertEqual('2001:0000:0000:0000:0000:0000:0000:0000/96', - addr3.exploded) - - def testIntRepresentation(self): - self.assertEqual(16909060, int(self.ipv4_address)) - self.assertEqual(42540616829182469433547762482097946625, - int(self.ipv6_address)) - - def testHexRepresentation(self): - self.assertEqual(hex(0x1020304), - hex(self.ipv4_address)) - - self.assertEqual(hex(0x20010658022ACAFE0200000000000001), - hex(self.ipv6_address)) - - def testForceVersion(self): - self.assertEqual(ipaddress.ip_network(1).version, 4) - self.assertEqual(ipaddress.ip_network(1, version=6).version, 6) - - with self.assertRaises(ValueError): - ipaddress.ip_network(1, version='l') - with self.assertRaises(ValueError): - ipaddress.ip_network(1, version=3) - - def testWithStar(self): - self.assertEqual(str(self.ipv4_interface.with_prefixlen), "1.2.3.4/24") - self.assertEqual(str(self.ipv4_interface.with_netmask), - "1.2.3.4/255.255.255.0") - self.assertEqual(str(self.ipv4_interface.with_hostmask), - "1.2.3.4/0.0.0.255") - - self.assertEqual(str(self.ipv6_interface.with_prefixlen), - '2001:658:22a:cafe:200::1/64') - # rfc3513 sec 2.3 says that ipv6 only uses cidr notation for - # subnets - self.assertEqual(str(self.ipv6_interface.with_netmask), - '2001:658:22a:cafe:200::1/64') - # this probably don't make much sense, but it's included for - # compatibility with ipv4 - self.assertEqual(str(self.ipv6_interface.with_hostmask), - '2001:658:22a:cafe:200::1/::ffff:ffff:ffff:ffff') - - def testNetworkElementCaching(self): - # V4 - make sure we're empty - self.assertFalse('network_address' in self.ipv4_network._cache) - self.assertFalse('broadcast_address' in self.ipv4_network._cache) - self.assertFalse('hostmask' in self.ipv4_network._cache) - - # V4 - populate and test - self.assertEqual(self.ipv4_network.network_address, - ipaddress.IPv4Address('1.2.3.0')) - self.assertEqual(self.ipv4_network.broadcast_address, - ipaddress.IPv4Address('1.2.3.255')) - self.assertEqual(self.ipv4_network.hostmask, - ipaddress.IPv4Address('0.0.0.255')) - - # V4 - check we're cached - self.assertTrue('broadcast_address' in self.ipv4_network._cache) - self.assertTrue('hostmask' in self.ipv4_network._cache) - - # V6 - make sure we're empty - self.assertFalse('broadcast_address' in self.ipv6_network._cache) - self.assertFalse('hostmask' in self.ipv6_network._cache) - - # V6 - populate and test - self.assertEqual(self.ipv6_network.network_address, - ipaddress.IPv6Address('2001:658:22a:cafe::')) - self.assertEqual(self.ipv6_interface.network.network_address, - ipaddress.IPv6Address('2001:658:22a:cafe::')) - - self.assertEqual( - self.ipv6_network.broadcast_address, - ipaddress.IPv6Address('2001:658:22a:cafe:ffff:ffff:ffff:ffff')) - self.assertEqual(self.ipv6_network.hostmask, - ipaddress.IPv6Address('::ffff:ffff:ffff:ffff')) - self.assertEqual( - self.ipv6_interface.network.broadcast_address, - ipaddress.IPv6Address('2001:658:22a:cafe:ffff:ffff:ffff:ffff')) - self.assertEqual(self.ipv6_interface.network.hostmask, - ipaddress.IPv6Address('::ffff:ffff:ffff:ffff')) - - # V6 - check we're cached - self.assertTrue('broadcast_address' in self.ipv6_network._cache) - self.assertTrue('hostmask' in self.ipv6_network._cache) - self.assertTrue('broadcast_address' in self.ipv6_interface.network._cache) - self.assertTrue('hostmask' in self.ipv6_interface.network._cache) - - def testTeredo(self): - # stolen from wikipedia - server = ipaddress.IPv4Address('65.54.227.120') - client = ipaddress.IPv4Address('192.0.2.45') - teredo_addr = '2001:0000:4136:e378:8000:63bf:3fff:fdd2' - self.assertEqual((server, client), - ipaddress.ip_address(teredo_addr).teredo) - bad_addr = '2000::4136:e378:8000:63bf:3fff:fdd2' - self.assertFalse(ipaddress.ip_address(bad_addr).teredo) - bad_addr = '2001:0001:4136:e378:8000:63bf:3fff:fdd2' - self.assertFalse(ipaddress.ip_address(bad_addr).teredo) - - # i77 - teredo_addr = ipaddress.IPv6Address('2001:0:5ef5:79fd:0:59d:a0e5:ba1') - self.assertEqual((ipaddress.IPv4Address('94.245.121.253'), - ipaddress.IPv4Address('95.26.244.94')), - teredo_addr.teredo) - - - def testsixtofour(self): - sixtofouraddr = ipaddress.ip_address('2002:ac1d:2d64::1') - bad_addr = ipaddress.ip_address('2000:ac1d:2d64::1') - self.assertEqual(ipaddress.IPv4Address('172.29.45.100'), - sixtofouraddr.sixtofour) - self.assertFalse(bad_addr.sixtofour) - - def testIpInterfaceVersion(self): - with self.assertRaises(ValueError): - ipaddress.ip_interface(1, version=123) - - with self.assertRaises(ValueError): - ipaddress.ip_interface(1, version='') - - -if __name__ == '__main__': - unittest.main() diff -r 0be296605165 -r faa88c50a3d2 Lib/test/test_multiprocessing.py --- a/Lib/test/test_multiprocessing.py Wed May 23 22:26:55 2012 +0200 +++ b/Lib/test/test_multiprocessing.py Wed May 23 21:09:05 2012 +0200 @@ -2814,41 +2814,8 @@ with self.assertRaises(ValueError): multiprocessing.connection.Listener('/var/test.pipe') -# -# Issue 12098: check sys.flags of child matches that for parent -# - -class TestFlags(unittest.TestCase): - @classmethod - def run_in_grandchild(cls, conn): - conn.send(tuple(sys.flags)) - - @classmethod - def run_in_child(cls): - import json - r, w = multiprocessing.Pipe(duplex=False) - p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,)) - p.start() - grandchild_flags = r.recv() - p.join() - r.close() - w.close() - flags = (tuple(sys.flags), grandchild_flags) - print(json.dumps(flags)) - - def test_flags(self): - import json, subprocess - # start child process using unusual flags - prog = ('from test.test_multiprocessing import TestFlags; ' + - 'TestFlags.run_in_child()') - data = subprocess.check_output( - [sys.executable, '-E', '-S', '-O', '-c', prog]) - child_flags, grandchild_flags = json.loads(data.decode('ascii')) - self.assertEqual(child_flags, grandchild_flags) - testcases_other = [OtherTest, TestInvalidHandle, TestInitializers, - TestStdinBadfiledescriptor, TestWait, TestInvalidFamily, - TestFlags] + TestStdinBadfiledescriptor, TestWait, TestInvalidFamily] # # diff -r 0be296605165 -r faa88c50a3d2 Lib/test/test_os.py --- a/Lib/test/test_os.py Wed May 23 22:26:55 2012 +0200 +++ b/Lib/test/test_os.py Wed May 23 21:09:05 2012 +0200 @@ -651,7 +651,6 @@ # SUB2/ a file kid and a dirsymlink kid # tmp3 # link/ a symlink to TESTFN.2 - # broken_link # TEST2/ # tmp4 a lone file walk_path = join(support.TESTFN, "TEST1") @@ -664,8 +663,6 @@ link_path = join(sub2_path, "link") t2_path = join(support.TESTFN, "TEST2") tmp4_path = join(support.TESTFN, "TEST2", "tmp4") - link_path = join(sub2_path, "link") - broken_link_path = join(sub2_path, "broken_link") # Create stuff. os.makedirs(sub11_path) @@ -682,8 +679,7 @@ else: symlink_to_dir = os.symlink symlink_to_dir(os.path.abspath(t2_path), link_path) - symlink_to_dir('broken', broken_link_path) - sub2_tree = (sub2_path, ["link"], ["broken_link", "tmp3"]) + sub2_tree = (sub2_path, ["link"], ["tmp3"]) else: sub2_tree = (sub2_path, [], ["tmp3"]) @@ -695,7 +691,6 @@ # flipped: TESTFN, SUB2, SUB1, SUB11 flipped = all[0][1][0] != "SUB1" all[0][1].sort() - all[3 - 2 * flipped][-1].sort() self.assertEqual(all[0], (walk_path, ["SUB1", "SUB2"], ["tmp1"])) self.assertEqual(all[1 + flipped], (sub1_path, ["SUB11"], ["tmp2"])) self.assertEqual(all[2 + flipped], (sub11_path, [], [])) @@ -711,7 +706,6 @@ dirs.remove('SUB1') self.assertEqual(len(all), 2) self.assertEqual(all[0], (walk_path, ["SUB2"], ["tmp1"])) - all[1][-1].sort() self.assertEqual(all[1], sub2_tree) # Walk bottom-up. @@ -722,7 +716,6 @@ # flipped: SUB2, SUB11, SUB1, TESTFN flipped = all[3][1][0] != "SUB1" all[3][1].sort() - all[2 - 2 * flipped][-1].sort() self.assertEqual(all[3], (walk_path, ["SUB1", "SUB2"], ["tmp1"])) self.assertEqual(all[flipped], (sub11_path, [], [])) self.assertEqual(all[flipped + 1], (sub1_path, ["SUB11"], ["tmp2"])) diff -r 0be296605165 -r faa88c50a3d2 Lib/test/test_pkg.py --- a/Lib/test/test_pkg.py Wed May 23 22:26:55 2012 +0200 +++ b/Lib/test/test_pkg.py Wed May 23 21:09:05 2012 +0200 @@ -23,8 +23,6 @@ def fixdir(lst): if "__builtins__" in lst: lst.remove("__builtins__") - if "__initializing__" in lst: - lst.remove("__initializing__") return lst diff -r 0be296605165 -r faa88c50a3d2 Lib/test/test_pkgutil.py --- a/Lib/test/test_pkgutil.py Wed May 23 22:26:55 2012 +0200 +++ b/Lib/test/test_pkgutil.py Wed May 23 21:09:05 2012 +0200 @@ -137,57 +137,8 @@ self.assertEqual(foo.loads, 1) del sys.modules['foo'] - -class ExtendPathTests(unittest.TestCase): - def create_init(self, pkgname): - dirname = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, dirname) - sys.path.insert(0, dirname) - - pkgdir = os.path.join(dirname, pkgname) - os.mkdir(pkgdir) - with open(os.path.join(pkgdir, '__init__.py'), 'w') as fl: - fl.write('from pkgutil import extend_path\n__path__ = extend_path(__path__, __name__)\n') - - return dirname - - def create_submodule(self, dirname, pkgname, submodule_name, value): - module_name = os.path.join(dirname, pkgname, submodule_name + '.py') - with open(module_name, 'w') as fl: - print('value={}'.format(value), file=fl) - - def setUp(self): - # Create 2 directories on sys.path - self.pkgname = 'foo' - self.dirname_0 = self.create_init(self.pkgname) - self.dirname_1 = self.create_init(self.pkgname) - - def tearDown(self): - del sys.path[0] - del sys.path[0] - del sys.modules['foo'] - del sys.modules['foo.bar'] - del sys.modules['foo.baz'] - - def test_simple(self): - self.create_submodule(self.dirname_0, self.pkgname, 'bar', 0) - self.create_submodule(self.dirname_1, self.pkgname, 'baz', 1) - import foo.bar - import foo.baz - # Ensure we read the expected values - self.assertEqual(foo.bar.value, 0) - self.assertEqual(foo.baz.value, 1) - - # Ensure the path is set up correctly - self.assertEqual(sorted(foo.__path__), - sorted([os.path.join(self.dirname_0, self.pkgname), - os.path.join(self.dirname_1, self.pkgname)])) - - # XXX: test .pkg files - - def test_main(): - run_unittest(PkgutilTests, PkgutilPEP302Tests, ExtendPathTests) + run_unittest(PkgutilTests, PkgutilPEP302Tests) # this is necessary if test is run repeated (like when finding leaks) import zipimport zipimport._zip_directory_cache.clear() diff -r 0be296605165 -r faa88c50a3d2 Lib/test/test_pyclbr.py --- a/Lib/test/test_pyclbr.py Wed May 23 22:26:55 2012 +0200 +++ b/Lib/test/test_pyclbr.py Wed May 23 21:09:05 2012 +0200 @@ -167,11 +167,6 @@ cm('email.parser') cm('test.test_pyclbr') - def test_issue_14798(self): - # test ImportError is raised when the first part of a dotted name is - # not a package - self.assertRaises(ImportError, pyclbr.readmodule_ex, 'asyncore.foo') - def test_main(): run_unittest(PyclbrTest) diff -r 0be296605165 -r faa88c50a3d2 Lib/test/test_raise.py --- a/Lib/test/test_raise.py Wed May 23 22:26:55 2012 +0200 +++ b/Lib/test/test_raise.py Wed May 23 21:09:05 2012 +0200 @@ -3,13 +3,27 @@ """Tests for the raise statement.""" -from test import support +from test import support, script_helper import re import sys import types import unittest +try: + from resource import setrlimit, RLIMIT_CORE, error as resource_error +except ImportError: + prepare_subprocess = None +else: + def prepare_subprocess(): + # don't create core file + try: + setrlimit(RLIMIT_CORE, (0, 0)) + except (ValueError, resource_error): + pass + + + def get_tb(): try: raise OSError() @@ -160,14 +174,11 @@ raise ValueError from None except ValueError as exc: self.assertIsNone(exc.__cause__) - self.assertTrue(exc.__suppress_context__) - exc.__suppress_context__ = False - raise exc + raise exc from Ellipsis except ValueError as exc: e = exc - self.assertIsNone(e.__cause__) - self.assertFalse(e.__suppress_context__) + self.assertIs(e.__cause__, Ellipsis) self.assertIsInstance(e.__context__, TypeError) def test_invalid_cause(self): @@ -210,6 +221,43 @@ class TestTraceback(unittest.TestCase): + def get_output(self, code, filename=None): + """ + Run the specified code in Python (in a new child process) and read the + output from the standard error or from a file (if filename is set). + Return the output lines as a list. + """ + options = {} + if prepare_subprocess: + options['preexec_fn'] = prepare_subprocess + process = script_helper.spawn_python('-c', code, **options) + stdout, stderr = process.communicate() + exitcode = process.wait() + output = support.strip_python_stderr(stdout) + output = output.decode('ascii', 'backslashreplace') + if filename: + self.assertEqual(output, '') + with open(filename, "rb") as fp: + output = fp.read() + output = output.decode('ascii', 'backslashreplace') + output = re.sub('Current thread 0x[0-9a-f]+', + 'Current thread XXX', + output) + return output.splitlines(), exitcode + + def test_traceback_verbiage(self): + code = """ +try: + raise ValueError +except: + raise NameError from None +""" + text, exitcode = self.get_output(code) + self.assertEqual(len(text), 3) + self.assertTrue(text[0].startswith('Traceback')) + self.assertTrue(text[1].startswith(' File ')) + self.assertTrue(text[2].startswith('NameError')) + def test_sets_traceback(self): try: raise IndexError() diff -r 0be296605165 -r faa88c50a3d2 Lib/test/test_stat.py --- a/Lib/test/test_stat.py Wed May 23 22:26:55 2012 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,66 +0,0 @@ -import unittest -import os -import stat -from test.support import TESTFN, run_unittest - - -def get_mode(fname=TESTFN): - return stat.filemode(os.lstat(fname).st_mode) - - -class TestFilemode(unittest.TestCase): - - def setUp(self): - try: - os.remove(TESTFN) - except OSError: - try: - os.rmdir(TESTFN) - except OSError: - pass - tearDown = setUp - - def test_mode(self): - with open(TESTFN, 'w'): - pass - if os.name == 'posix': - os.chmod(TESTFN, 0o700) - self.assertEqual(get_mode(), '-rwx------') - os.chmod(TESTFN, 0o070) - self.assertEqual(get_mode(), '----rwx---') - os.chmod(TESTFN, 0o007) - self.assertEqual(get_mode(), '-------rwx') - os.chmod(TESTFN, 0o444) - self.assertEqual(get_mode(), '-r--r--r--') - else: - os.chmod(TESTFN, 0o700) - self.assertEqual(get_mode()[:3], '-rw') - - def test_directory(self): - os.mkdir(TESTFN) - os.chmod(TESTFN, 0o700) - if os.name == 'posix': - self.assertEqual(get_mode(), 'drwx------') - else: - self.assertEqual(get_mode()[0], 'd') - - @unittest.skipUnless(hasattr(os, 'symlink'), 'os.symlink not available') - def test_link(self): - try: - os.symlink(os.getcwd(), TESTFN) - except (OSError, NotImplementedError) as err: - raise unittest.SkipTest(str(err)) - else: - self.assertEqual(get_mode()[0], 'l') - - @unittest.skipUnless(hasattr(os, 'mkfifo'), 'os.mkfifo not available') - def test_fifo(self): - os.mkfifo(TESTFN, 0o700) - self.assertEqual(get_mode(), 'prwx------') - - -def test_main(): - run_unittest(TestFilemode) - -if __name__ == '__main__': - test_main() diff -r 0be296605165 -r faa88c50a3d2 Lib/test/test_strptime.py --- a/Lib/test/test_strptime.py Wed May 23 22:26:55 2012 +0200 +++ b/Lib/test/test_strptime.py Wed May 23 21:09:05 2012 +0200 @@ -381,11 +381,6 @@ def test_feb29_on_leap_year_without_year(self): time.strptime("Feb 29", "%b %d") - def test_mar1_comes_after_feb29_even_when_omitting_the_year(self): - self.assertLess( - time.strptime("Feb 29", "%b %d"), - time.strptime("Mar 1", "%b %d")) - class Strptime12AMPMTests(unittest.TestCase): """Test a _strptime regression in '%I %p' at 12 noon (12 PM)""" diff -r 0be296605165 -r faa88c50a3d2 Lib/test/test_sys.py --- a/Lib/test/test_sys.py Wed May 23 22:26:55 2012 +0200 +++ b/Lib/test/test_sys.py Wed May 23 21:09:05 2012 +0200 @@ -700,14 +700,14 @@ class C(object): pass check(C.__dict__, size(h + 'P')) # BaseException - check(BaseException(), size(h + '5Pi')) + check(BaseException(), size(h + '5P')) # UnicodeEncodeError - check(UnicodeEncodeError("", "", 0, 0, ""), size(h + '5Pi 2P2PP')) + check(UnicodeEncodeError("", "", 0, 0, ""), size(h + '5P 2P2PP')) # UnicodeDecodeError # XXX # check(UnicodeDecodeError("", "", 0, 0, ""), size(h + '5P2PP')) # UnicodeTranslateError - check(UnicodeTranslateError("", 0, 1, ""), size(h + '5Pi 2P2PP')) + check(UnicodeTranslateError("", 0, 1, ""), size(h + '5P 2P2PP')) # ellipses check(Ellipsis, size(h + '')) # EncodingMap diff -r 0be296605165 -r faa88c50a3d2 Lib/test/test_textwrap.py --- a/Lib/test/test_textwrap.py Wed May 23 22:26:55 2012 +0200 +++ b/Lib/test/test_textwrap.py Wed May 23 21:09:05 2012 +0200 @@ -91,14 +91,6 @@ result = wrapper.fill(text) self.check(result, '\n'.join(expect)) - text = "\tTest\tdefault\t\ttabsize." - expect = [" Test default tabsize."] - self.check_wrap(text, 80, expect) - - text = "\tTest\tcustom\t\ttabsize." - expect = [" Test custom tabsize."] - self.check_wrap(text, 80, expect, tabsize=4) - def test_fix_sentence_endings(self): wrapper = TextWrapper(60, fix_sentence_endings=True) diff -r 0be296605165 -r faa88c50a3d2 Lib/test/test_threaded_import.py --- a/Lib/test/test_threaded_import.py Wed May 23 22:26:55 2012 +0200 +++ b/Lib/test/test_threaded_import.py Wed May 23 21:09:05 2012 +0200 @@ -12,7 +12,7 @@ import shutil import unittest from test.support import ( - verbose, import_module, run_unittest, TESTFN, reap_threads, forget, unlink) + verbose, import_module, run_unittest, TESTFN, reap_threads) threading = import_module('threading') def task(N, done, done_tasks, errors): @@ -187,7 +187,7 @@ contents = contents % {'delay': delay} with open(os.path.join(TESTFN, name + ".py"), "wb") as f: f.write(contents.encode('utf-8')) - self.addCleanup(forget, name) + self.addCleanup(sys.modules.pop, name, None) results = [] def import_ab(): @@ -204,23 +204,6 @@ t2.join() self.assertEqual(set(results), {'a', 'b'}) - def test_side_effect_import(self): - code = """if 1: - import threading - def target(): - import random - t = threading.Thread(target=target) - t.start() - t.join()""" - sys.path.insert(0, os.curdir) - self.addCleanup(sys.path.remove, os.curdir) - filename = TESTFN + ".py" - with open(filename, "wb") as f: - f.write(code.encode('utf-8')) - self.addCleanup(unlink, filename) - self.addCleanup(forget, TESTFN) - __import__(TESTFN) - @reap_threads def test_main(): diff -r 0be296605165 -r faa88c50a3d2 Lib/test/test_types.py --- a/Lib/test/test_types.py Wed May 23 22:26:55 2012 +0200 +++ b/Lib/test/test_types.py Wed May 23 21:09:05 2012 +0200 @@ -747,257 +747,8 @@ self.assertEqual(copy['key1'], 27) -class ClassCreationTests(unittest.TestCase): - - class Meta(type): - def __init__(cls, name, bases, ns, **kw): - super().__init__(name, bases, ns) - @staticmethod - def __new__(mcls, name, bases, ns, **kw): - return super().__new__(mcls, name, bases, ns) - @classmethod - def __prepare__(mcls, name, bases, **kw): - ns = super().__prepare__(name, bases) - ns["y"] = 1 - ns.update(kw) - return ns - - def test_new_class_basics(self): - C = types.new_class("C") - self.assertEqual(C.__name__, "C") - self.assertEqual(C.__bases__, (object,)) - - def test_new_class_subclass(self): - C = types.new_class("C", (int,)) - self.assertTrue(issubclass(C, int)) - - def test_new_class_meta(self): - Meta = self.Meta - settings = {"metaclass": Meta, "z": 2} - # We do this twice to make sure the passed in dict isn't mutated - for i in range(2): - C = types.new_class("C" + str(i), (), settings) - self.assertIsInstance(C, Meta) - self.assertEqual(C.y, 1) - self.assertEqual(C.z, 2) - - def test_new_class_exec_body(self): - Meta = self.Meta - def func(ns): - ns["x"] = 0 - C = types.new_class("C", (), {"metaclass": Meta, "z": 2}, func) - self.assertIsInstance(C, Meta) - self.assertEqual(C.x, 0) - self.assertEqual(C.y, 1) - self.assertEqual(C.z, 2) - - def test_new_class_exec_body(self): - #Test that keywords are passed to the metaclass: - def meta_func(name, bases, ns, **kw): - return name, bases, ns, kw - res = types.new_class("X", - (int, object), - dict(metaclass=meta_func, x=0)) - self.assertEqual(res, ("X", (int, object), {}, {"x": 0})) - - def test_new_class_defaults(self): - # Test defaults/keywords: - C = types.new_class("C", (), {}, None) - self.assertEqual(C.__name__, "C") - self.assertEqual(C.__bases__, (object,)) - - def test_new_class_meta_with_base(self): - Meta = self.Meta - def func(ns): - ns["x"] = 0 - C = types.new_class(name="C", - bases=(int,), - kwds=dict(metaclass=Meta, z=2), - exec_body=func) - self.assertTrue(issubclass(C, int)) - self.assertIsInstance(C, Meta) - self.assertEqual(C.x, 0) - self.assertEqual(C.y, 1) - self.assertEqual(C.z, 2) - - # Many of the following tests are derived from test_descr.py - def test_prepare_class(self): - # Basic test of metaclass derivation - expected_ns = {} - class A(type): - def __new__(*args, **kwargs): - return type.__new__(*args, **kwargs) - - def __prepare__(*args): - return expected_ns - - B = types.new_class("B", (object,)) - C = types.new_class("C", (object,), {"metaclass": A}) - - # The most derived metaclass of D is A rather than type. - meta, ns, kwds = types.prepare_class("D", (B, C), {"metaclass": type}) - self.assertIs(meta, A) - self.assertIs(ns, expected_ns) - self.assertEqual(len(kwds), 0) - - def test_metaclass_derivation(self): - # issue1294232: correct metaclass calculation - new_calls = [] # to check the order of __new__ calls - class AMeta(type): - def __new__(mcls, name, bases, ns): - new_calls.append('AMeta') - return super().__new__(mcls, name, bases, ns) - @classmethod - def __prepare__(mcls, name, bases): - return {} - - class BMeta(AMeta): - def __new__(mcls, name, bases, ns): - new_calls.append('BMeta') - return super().__new__(mcls, name, bases, ns) - @classmethod - def __prepare__(mcls, name, bases): - ns = super().__prepare__(name, bases) - ns['BMeta_was_here'] = True - return ns - - A = types.new_class("A", (), {"metaclass": AMeta}) - self.assertEqual(new_calls, ['AMeta']) - new_calls.clear() - - B = types.new_class("B", (), {"metaclass": BMeta}) - # BMeta.__new__ calls AMeta.__new__ with super: - self.assertEqual(new_calls, ['BMeta', 'AMeta']) - new_calls.clear() - - C = types.new_class("C", (A, B)) - # The most derived metaclass is BMeta: - self.assertEqual(new_calls, ['BMeta', 'AMeta']) - new_calls.clear() - # BMeta.__prepare__ should've been called: - self.assertIn('BMeta_was_here', C.__dict__) - - # The order of the bases shouldn't matter: - C2 = types.new_class("C2", (B, A)) - self.assertEqual(new_calls, ['BMeta', 'AMeta']) - new_calls.clear() - self.assertIn('BMeta_was_here', C2.__dict__) - - # Check correct metaclass calculation when a metaclass is declared: - D = types.new_class("D", (C,), {"metaclass": type}) - self.assertEqual(new_calls, ['BMeta', 'AMeta']) - new_calls.clear() - self.assertIn('BMeta_was_here', D.__dict__) - - E = types.new_class("E", (C,), {"metaclass": AMeta}) - self.assertEqual(new_calls, ['BMeta', 'AMeta']) - new_calls.clear() - self.assertIn('BMeta_was_here', E.__dict__) - - def test_metaclass_override_function(self): - # Special case: the given metaclass isn't a class, - # so there is no metaclass calculation. - class A(metaclass=self.Meta): - pass - - marker = object() - def func(*args, **kwargs): - return marker - - X = types.new_class("X", (), {"metaclass": func}) - Y = types.new_class("Y", (object,), {"metaclass": func}) - Z = types.new_class("Z", (A,), {"metaclass": func}) - self.assertIs(marker, X) - self.assertIs(marker, Y) - self.assertIs(marker, Z) - - def test_metaclass_override_callable(self): - # The given metaclass is a class, - # but not a descendant of type. - new_calls = [] # to check the order of __new__ calls - prepare_calls = [] # to track __prepare__ calls - class ANotMeta: - def __new__(mcls, *args, **kwargs): - new_calls.append('ANotMeta') - return super().__new__(mcls) - @classmethod - def __prepare__(mcls, name, bases): - prepare_calls.append('ANotMeta') - return {} - - class BNotMeta(ANotMeta): - def __new__(mcls, *args, **kwargs): - new_calls.append('BNotMeta') - return super().__new__(mcls) - @classmethod - def __prepare__(mcls, name, bases): - prepare_calls.append('BNotMeta') - return super().__prepare__(name, bases) - - A = types.new_class("A", (), {"metaclass": ANotMeta}) - self.assertIs(ANotMeta, type(A)) - self.assertEqual(prepare_calls, ['ANotMeta']) - prepare_calls.clear() - self.assertEqual(new_calls, ['ANotMeta']) - new_calls.clear() - - B = types.new_class("B", (), {"metaclass": BNotMeta}) - self.assertIs(BNotMeta, type(B)) - self.assertEqual(prepare_calls, ['BNotMeta', 'ANotMeta']) - prepare_calls.clear() - self.assertEqual(new_calls, ['BNotMeta', 'ANotMeta']) - new_calls.clear() - - C = types.new_class("C", (A, B)) - self.assertIs(BNotMeta, type(C)) - self.assertEqual(prepare_calls, ['BNotMeta', 'ANotMeta']) - prepare_calls.clear() - self.assertEqual(new_calls, ['BNotMeta', 'ANotMeta']) - new_calls.clear() - - C2 = types.new_class("C2", (B, A)) - self.assertIs(BNotMeta, type(C2)) - self.assertEqual(prepare_calls, ['BNotMeta', 'ANotMeta']) - prepare_calls.clear() - self.assertEqual(new_calls, ['BNotMeta', 'ANotMeta']) - new_calls.clear() - - # This is a TypeError, because of a metaclass conflict: - # BNotMeta is neither a subclass, nor a superclass of type - with self.assertRaises(TypeError): - D = types.new_class("D", (C,), {"metaclass": type}) - - E = types.new_class("E", (C,), {"metaclass": ANotMeta}) - self.assertIs(BNotMeta, type(E)) - self.assertEqual(prepare_calls, ['BNotMeta', 'ANotMeta']) - prepare_calls.clear() - self.assertEqual(new_calls, ['BNotMeta', 'ANotMeta']) - new_calls.clear() - - F = types.new_class("F", (object(), C)) - self.assertIs(BNotMeta, type(F)) - self.assertEqual(prepare_calls, ['BNotMeta', 'ANotMeta']) - prepare_calls.clear() - self.assertEqual(new_calls, ['BNotMeta', 'ANotMeta']) - new_calls.clear() - - F2 = types.new_class("F2", (C, object())) - self.assertIs(BNotMeta, type(F2)) - self.assertEqual(prepare_calls, ['BNotMeta', 'ANotMeta']) - prepare_calls.clear() - self.assertEqual(new_calls, ['BNotMeta', 'ANotMeta']) - new_calls.clear() - - # TypeError: BNotMeta is neither a - # subclass, nor a superclass of int - with self.assertRaises(TypeError): - X = types.new_class("X", (C, int())) - with self.assertRaises(TypeError): - X = types.new_class("X", (int(), C)) - - def test_main(): - run_unittest(TypesTests, MappingProxyTests, ClassCreationTests) + run_unittest(TypesTests, MappingProxyTests) if __name__ == '__main__': test_main() diff -r 0be296605165 -r faa88c50a3d2 Lib/test/test_urllib2.py --- a/Lib/test/test_urllib2.py Wed May 23 22:26:55 2012 +0200 +++ b/Lib/test/test_urllib2.py Wed May 23 21:09:05 2012 +0200 @@ -1252,22 +1252,6 @@ def test_basic_auth_with_single_quoted_realm(self): self.test_basic_auth(quote_char="'") - def test_basic_auth_with_unquoted_realm(self): - opener = OpenerDirector() - password_manager = MockPasswordManager() - auth_handler = urllib.request.HTTPBasicAuthHandler(password_manager) - realm = "ACME Widget Store" - http_handler = MockHTTPHandler( - 401, 'WWW-Authenticate: Basic realm=%s\r\n\r\n' % realm) - opener.add_handler(auth_handler) - opener.add_handler(http_handler) - with self.assertWarns(UserWarning): - self._test_basic_auth(opener, auth_handler, "Authorization", - realm, http_handler, password_manager, - "http://acme.example.com/protected", - "http://acme.example.com/protected", - ) - def test_proxy_basic_auth(self): opener = OpenerDirector() ph = urllib.request.ProxyHandler(dict(http="proxy.example.com:3128")) diff -r 0be296605165 -r faa88c50a3d2 Lib/test/test_urllib2_localnet.py --- a/Lib/test/test_urllib2_localnet.py Wed May 23 22:26:55 2012 +0200 +++ b/Lib/test/test_urllib2_localnet.py Wed May 23 21:09:05 2012 +0200 @@ -474,13 +474,6 @@ self.urlopen("https://localhost:%s/bizarre" % handler.port, cafile=CERT_fakehostname) - def test_https_with_cadefault(self): - handler = self.start_https_server(certfile=CERT_localhost) - # Self-signed cert should fail verification with system certificate store - with self.assertRaises(urllib.error.URLError) as cm: - self.urlopen("https://localhost:%s/bizarre" % handler.port, - cadefault=True) - def test_sending_headers(self): handler = self.start_server() req = urllib.request.Request("http://localhost:%s/" % handler.port, diff -r 0be296605165 -r faa88c50a3d2 Lib/test/test_urlparse.py --- a/Lib/test/test_urlparse.py Wed May 23 22:26:55 2012 +0200 +++ b/Lib/test/test_urlparse.py Wed May 23 21:09:05 2012 +0200 @@ -636,20 +636,11 @@ ('s3', 'foo.com', '/stuff', '', '', '')) self.assertEqual(urllib.parse.urlparse("x-newscheme://foo.com/stuff"), ('x-newscheme', 'foo.com', '/stuff', '', '', '')) - self.assertEqual(urllib.parse.urlparse("x-newscheme://foo.com/stuff?query#fragment"), - ('x-newscheme', 'foo.com', '/stuff', '', 'query', 'fragment')) - self.assertEqual(urllib.parse.urlparse("x-newscheme://foo.com/stuff?query"), - ('x-newscheme', 'foo.com', '/stuff', '', 'query', '')) - # And for bytes... self.assertEqual(urllib.parse.urlparse(b"s3://foo.com/stuff"), (b's3', b'foo.com', b'/stuff', b'', b'', b'')) self.assertEqual(urllib.parse.urlparse(b"x-newscheme://foo.com/stuff"), (b'x-newscheme', b'foo.com', b'/stuff', b'', b'', b'')) - self.assertEqual(urllib.parse.urlparse(b"x-newscheme://foo.com/stuff?query#fragment"), - (b'x-newscheme', b'foo.com', b'/stuff', b'', b'query', b'fragment')) - self.assertEqual(urllib.parse.urlparse(b"x-newscheme://foo.com/stuff?query"), - (b'x-newscheme', b'foo.com', b'/stuff', b'', b'query', b'')) def test_mixed_types_rejected(self): # Several functions that process either strings or ASCII encoded bytes @@ -806,13 +797,6 @@ encoding='utf-8') self.assertRaises(TypeError, urllib.parse.quote, b'foo', errors='strict') - def test_issue14072(self): - p1 = urllib.parse.urlsplit('tel:+31-641044153') - self.assertEqual(p1.scheme, 'tel') - self.assertEqual(p1.path, '+31-641044153') - p2 = urllib.parse.urlsplit('tel:+31641044153') - self.assertEqual(p2.scheme, 'tel') - self.assertEqual(p2.path, '+31641044153') def test_main(): support.run_unittest(UrlParseTestCase) diff -r 0be296605165 -r faa88c50a3d2 Lib/test/test_xml_etree.py --- a/Lib/test/test_xml_etree.py Wed May 23 22:26:55 2012 +0200 +++ b/Lib/test/test_xml_etree.py Wed May 23 21:09:05 2012 +0200 @@ -1914,10 +1914,6 @@ self.assertIsInstance(mye, MyElement) self.assertEqual(mye.tag, 'foo') - # test that attribute assignment works (issue 14849) - mye.text = "joe" - self.assertEqual(mye.text, "joe") - def test_Element_subclass_constructor(self): class MyElement(ET.Element): def __init__(self, tag, attrib={}, **extra): diff -r 0be296605165 -r faa88c50a3d2 Lib/textwrap.py --- a/Lib/textwrap.py Wed May 23 22:26:55 2012 +0200 +++ b/Lib/textwrap.py Wed May 23 21:09:05 2012 +0200 @@ -39,11 +39,8 @@ of wrapped output; also counts towards each line's width. expand_tabs (default: true) Expand tabs in input text to spaces before further processing. - Each tab will become 0 .. 'tabsize' spaces, depending on its position - in its line. If false, each tab is treated as a single character. - tabsize (default: 8) - Expand tabs in input text to 0 .. 'tabsize' spaces, unless - 'expand_tabs' is false. + Each tab will become 1 .. 8 spaces, depending on its position in + its line. If false, each tab is treated as a single character. replace_whitespace (default: true) Replace all whitespace characters in the input text by spaces after tab expansion. Note that if expand_tabs is false and @@ -103,8 +100,7 @@ fix_sentence_endings=False, break_long_words=True, drop_whitespace=True, - break_on_hyphens=True, - tabsize=8): + break_on_hyphens=True): self.width = width self.initial_indent = initial_indent self.subsequent_indent = subsequent_indent @@ -114,7 +110,6 @@ self.break_long_words = break_long_words self.drop_whitespace = drop_whitespace self.break_on_hyphens = break_on_hyphens - self.tabsize = tabsize # -- Private methods ----------------------------------------------- @@ -128,7 +123,7 @@ becomes " foo bar baz". """ if self.expand_tabs: - text = text.expandtabs(self.tabsize) + text = text.expandtabs() if self.replace_whitespace: text = text.translate(self.unicode_whitespace_trans) return text diff -r 0be296605165 -r faa88c50a3d2 Lib/tkinter/__init__.py --- a/Lib/tkinter/__init__.py Wed May 23 22:26:55 2012 +0200 +++ b/Lib/tkinter/__init__.py Wed May 23 21:09:05 2012 +0200 @@ -540,19 +540,12 @@ The type keyword specifies the form in which the data is to be returned and should be an atom name such as STRING - or FILE_NAME. Type defaults to STRING, except on X11, where the default - is to try UTF8_STRING and fall back to STRING. + or FILE_NAME. Type defaults to STRING. This command is equivalent to: selection_get(CLIPBOARD) """ - if 'type' not in kw and self._windowingsystem == 'x11': - try: - kw['type'] = 'UTF8_STRING' - return self.tk.call(('clipboard', 'get') + self._options(kw)) - except TclError: - del kw['type'] return self.tk.call(('clipboard', 'get') + self._options(kw)) def clipboard_clear(self, **kw): @@ -634,16 +627,8 @@ A keyword parameter selection specifies the name of the selection and defaults to PRIMARY. A keyword parameter displayof specifies a widget on the display - to use. A keyword parameter type specifies the form of data to be - fetched, defaulting to STRING except on X11, where UTF8_STRING is tried - before STRING.""" + to use.""" if 'displayof' not in kw: kw['displayof'] = self._w - if 'type' not in kw and self._windowingsystem == 'x11': - try: - kw['type'] = 'UTF8_STRING' - return self.tk.call(('selection', 'get') + self._options(kw)) - except TclError: - del kw['type'] return self.tk.call(('selection', 'get') + self._options(kw)) def selection_handle(self, command, **kw): """Specify a function COMMAND to call if the X @@ -1058,15 +1043,6 @@ if displayof is None: return ('-displayof', self._w) return () - @property - def _windowingsystem(self): - """Internal function.""" - try: - return self._root()._windowingsystem_cached - except AttributeError: - ws = self._root()._windowingsystem_cached = \ - self.tk.call('tk', 'windowingsystem') - return ws def _options(self, cnf, kw = None): """Internal function.""" if kw: diff -r 0be296605165 -r faa88c50a3d2 Lib/token.py --- a/Lib/token.py Wed May 23 22:26:55 2012 +0200 +++ b/Lib/token.py Wed May 23 21:09:05 2012 +0200 @@ -70,7 +70,7 @@ tok_name = {value: name for name, value in globals().items() - if isinstance(value, int) and not name.startswith('_')} + if isinstance(value, int)} __all__.extend(tok_name.values()) def ISTERMINAL(x): diff -r 0be296605165 -r faa88c50a3d2 Lib/traceback.py --- a/Lib/traceback.py Wed May 23 22:26:55 2012 +0200 +++ b/Lib/traceback.py Wed May 23 21:09:05 2012 +0200 @@ -119,16 +119,15 @@ seen = set() seen.add(exc) its = [] - context = exc.__context__ cause = exc.__cause__ - if cause is not None and cause not in seen: + if cause is Ellipsis: + context = exc.__context__ + if context is not None and context not in seen: + its.append(_iter_chain(context, None, seen)) + its.append([(_context_message, None)]) + elif cause is not None and cause not in seen: its.append(_iter_chain(cause, False, seen)) its.append([(_cause_message, None)]) - elif (context is not None and - not exc.__suppress_context__ and - context not in seen): - its.append(_iter_chain(context, None, seen)) - its.append([(_context_message, None)]) its.append([(exc, custom_tb or exc.__traceback__)]) # itertools.chain is in an extension module and may be unavailable for it in its: diff -r 0be296605165 -r faa88c50a3d2 Lib/types.py --- a/Lib/types.py Wed May 23 22:26:55 2012 +0200 +++ b/Lib/types.py Wed May 23 21:09:05 2012 +0200 @@ -40,61 +40,3 @@ MemberDescriptorType = type(FunctionType.__globals__) del sys, _f, _g, _C, # Not for export - - -# Provide a PEP 3115 compliant mechanism for class creation -def new_class(name, bases=(), kwds=None, exec_body=None): - """Create a class object dynamically using the appropriate metaclass.""" - meta, ns, kwds = prepare_class(name, bases, kwds) - if exec_body is not None: - exec_body(ns) - return meta(name, bases, ns, **kwds) - -def prepare_class(name, bases=(), kwds=None): - """Call the __prepare__ method of the appropriate metaclass. - - Returns (metaclass, namespace, kwds) as a 3-tuple - - *metaclass* is the appropriate metaclass - *namespace* is the prepared class namespace - *kwds* is an updated copy of the passed in kwds argument with any - 'metaclass' entry removed. If no kwds argument is passed in, this will - be an empty dict. - """ - if kwds is None: - kwds = {} - else: - kwds = dict(kwds) # Don't alter the provided mapping - if 'metaclass' in kwds: - meta = kwds.pop('metaclass') - else: - if bases: - meta = type(bases[0]) - else: - meta = type - if isinstance(meta, type): - # when meta is a type, we first determine the most-derived metaclass - # instead of invoking the initial candidate directly - meta = _calculate_meta(meta, bases) - if hasattr(meta, '__prepare__'): - ns = meta.__prepare__(name, bases, **kwds) - else: - ns = {} - return meta, ns, kwds - -def _calculate_meta(meta, bases): - """Calculate the most derived metaclass.""" - winner = meta - for base in bases: - base_meta = type(base) - if issubclass(winner, base_meta): - continue - if issubclass(base_meta, winner): - winner = base_meta - continue - # else: - raise TypeError("metaclass conflict: " - "the metaclass of a derived class " - "must be a (non-strict) subclass " - "of the metaclasses of all its bases") - return winner diff -r 0be296605165 -r faa88c50a3d2 Lib/urllib/parse.py --- a/Lib/urllib/parse.py Wed May 23 22:26:55 2012 +0200 +++ b/Lib/urllib/parse.py Wed May 23 21:09:05 2012 +0200 @@ -44,9 +44,16 @@ 'imap', 'wais', 'file', 'mms', 'https', 'shttp', 'snews', 'prospero', 'rtsp', 'rtspu', 'rsync', '', 'svn', 'svn+ssh', 'sftp', 'nfs', 'git', 'git+ssh'] +non_hierarchical = ['gopher', 'hdl', 'mailto', 'news', + 'telnet', 'wais', 'imap', 'snews', 'sip', 'sips'] uses_params = ['ftp', 'hdl', 'prospero', 'http', 'imap', 'https', 'shttp', 'rtsp', 'rtspu', 'sip', 'sips', 'mms', '', 'sftp'] +uses_query = ['http', 'wais', 'imap', 'https', 'shttp', 'mms', + 'gopher', 'rtsp', 'rtspu', 'sip', 'sips', ''] +uses_fragment = ['ftp', 'hdl', 'http', 'gopher', 'news', + 'nntp', 'wais', 'https', 'shttp', 'snews', + 'file', 'prospero', ''] # Characters valid in scheme names scheme_chars = ('abcdefghijklmnopqrstuvwxyz' @@ -338,21 +345,21 @@ if c not in scheme_chars: break else: - # make sure "url" is not actually a port number (in which case - # "scheme" is really part of the path) - rest = url[i+1:] - if not rest or any(c not in '0123456789' for c in rest): - # not a port number - scheme, url = url[:i].lower(), rest + try: + # make sure "url" is not actually a port number (in which case + # "scheme" is really part of the path + _testportnum = int(url[i+1:]) + except ValueError: + scheme, url = url[:i].lower(), url[i+1:] if url[:2] == '//': netloc, url = _splitnetloc(url, 2) if (('[' in netloc and ']' not in netloc) or (']' in netloc and '[' not in netloc)): raise ValueError("Invalid IPv6 URL") - if allow_fragments and '#' in url: + if allow_fragments and scheme in uses_fragment and '#' in url: url, fragment = url.split('#', 1) - if '?' in url: + if scheme in uses_query and '?' in url: url, query = url.split('?', 1) v = SplitResult(scheme, netloc, url, query, fragment) _parse_cache[key] = v diff -r 0be296605165 -r faa88c50a3d2 Lib/urllib/request.py --- a/Lib/urllib/request.py Wed May 23 22:26:55 2012 +0200 +++ b/Lib/urllib/request.py Wed May 23 21:09:05 2012 +0200 @@ -135,19 +135,16 @@ _opener = None def urlopen(url, data=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, - *, cafile=None, capath=None, cadefault=False): + *, cafile=None, capath=None): global _opener - if cafile or capath or cadefault: + if cafile or capath: if not _have_ssl: raise ValueError('SSL support not available') context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) context.options |= ssl.OP_NO_SSLv2 - if cafile or capath or cadefault: + if cafile or capath: context.verify_mode = ssl.CERT_REQUIRED - if cafile or capath: - context.load_verify_locations(cafile, capath) - else: - context.set_default_verify_paths() + context.load_verify_locations(cafile, capath) check_hostname = True else: check_hostname = False @@ -898,7 +895,7 @@ # allow for double- and single-quoted realm values # (single quotes are a violation of the RFC, but appear in the wild) rx = re.compile('(?:.*,)*[ \t]*([^ \t]+)[ \t]+' - 'realm=(["\']?)([^"\']*)\\2', re.I) + 'realm=(["\'])(.*?)\\2', re.I) # XXX could pre-emptively send auth info already accepted (RFC 2617, # end of section 2, and section 1.2 immediately after "credentials" @@ -937,9 +934,6 @@ mo = AbstractBasicAuthHandler.rx.search(authreq) if mo: scheme, quote, realm = mo.groups() - if quote not in ['"',"'"]: - warnings.warn("Basic Auth Realm was unquoted", - UserWarning, 2) if scheme.lower() == 'basic': response = self.retry_http_basic_auth(host, req, realm) if response and response.code != 401: diff -r 0be296605165 -r faa88c50a3d2 Misc/ACKS --- a/Misc/ACKS Wed May 23 22:26:55 2012 +0200 +++ b/Misc/ACKS Wed May 23 21:09:05 2012 +0200 @@ -161,7 +161,6 @@ Donn Cave Charles Cazabon Per Cederqvist -Matej Cepl Octavian Cerna Pascal Chambon John Chandler @@ -362,7 +361,6 @@ Dan Gass Andrew Gaul Stephen M. Gava -Xavier de Gaye Harry Henry Gebel Marius Gedminas Thomas Gellekum @@ -703,7 +701,6 @@ Doug Moen The Dragon De Monsyne Skip Montanaro -Peter Moody Paul Moore Derek Morr James A Morrison @@ -749,7 +746,6 @@ John O'Connor Kevin O'Connor Tim O'Malley -James Oakley Jon Oberheide Pascal Oberndoerfer Jeffrey Ollie @@ -779,7 +775,6 @@ Joe Peterson Randy Pausch Samuele Pedroni -Justin Peel Marcel van der Peijl Berker Peksag Steven Pemberton @@ -924,7 +919,6 @@ Michael Schneider Peter Schneider-Kamp Arvin Schnell -Robin Schreiber Chad J. Schroeder Sam Schulenburg Stefan Schwarzer @@ -1135,7 +1129,6 @@ Hirokazu Yamamoto Ka-Ping Yee Jason Yeo -EungJun Yi Bob Yodlowski Danny Yoo George Yoshida diff -r 0be296605165 -r faa88c50a3d2 Misc/NEWS --- a/Misc/NEWS Wed May 23 22:26:55 2012 +0200 +++ b/Misc/NEWS Wed May 23 21:09:05 2012 +0200 @@ -10,22 +10,6 @@ Core and Builtins ----------------- -- Issue #14494: Fix __future__.py and its documentation to note that - absolute imports are the default behavior in 3.0 instead of 2.7. - Patch by Sven Marnach. - -- Issue #9260: A finer-grained import lock. Most of the import sequence - now uses per-module locks rather than the global import lock, eliminating - well-known issues with threads and imports. - -- Issue #14624: UTF-16 decoding is now 3x to 4x faster on various inputs. - Patch by Serhiy Storchaka. - -- asdl_seq and asdl_int_seq are now Py_ssize_t sized. - -- Issue #14133 (PEP 415): Implement suppression of __context__ display with an - attribute on BaseException. This replaces the original mechanism of PEP 409. - - Issue #14417: Mutating a dict during lookup now restarts the lookup instead of raising a RuntimeError (undoes issue #14205). @@ -42,74 +26,10 @@ Library ------- -- Issue #14862: Add missing names to os.__all__ - -- Issue #14875: Use float('inf') instead of float('1e66666') in the json module. - -- Issue #13585: Added contextlib.ExitStack - -- PEP 3144, Issue #14814: Added the ipaddress module - -- Issue #14426: Correct the Date format in Expires attribute of Set-Cookie - Header in Cookie.py. - -- Issue #14588: The types module now provide new_class() and prepare_class() - functions to support PEP 3115 compliant dynamic class creation. Patch by - Daniel Urban and Nick Coghlan. - -- Issue #13152: Allow to specify a custom tabsize for expanding tabs in - textwrap. Patch by John Feuerstein. - -- Issue #14721: Send the correct 'Content-length: 0' header when the body is an - empty string ''. Initial Patch contributed by Arve Knudsen. - -- Issue #14072: Fix parsing of 'tel' URIs in urlparse by making the check for - ports stricter. - -- Issue #9374: Generic parsing of query and fragment portions of url for any - scheme. Supported both by RFC3986 and RFC2396. - -- Issue #14798: Fix the functions in pyclbr to raise an ImportError - when the first part of a dotted name is not a package. Patch by - Xavier de Gaye. - -- Issue #12098: multiprocessing on Windows now starts child processes - using the same sys.flags as the current process. Initial patch by - Sergey Mezentsev. - -- Issue #13031: Small speed-up for tarfile when unzipping tarfiles. - Patch by Justin Peel. - -- Issue #14780: urllib.request.urlopen() now has a ``cadefault`` argument - to use the default certificate store. Initial patch by James Oakley. - -- Issue #14829: Fix bisect and range() indexing with large indices - (>= 2 ** 32) under 64-bit Windows. - -- Issue #14732: The _csv module now uses PEP 3121 module initialization. - Patch by Robin Schreiber. - -- Issue #14809: Add HTTP status codes introduced by RFC 6585 to http.server - and http.client. Patch by EungJun Yi. - -- Issue #14777: tkinter may return undecoded UTF-8 bytes as a string when - accessing the Tk clipboard. Modify clipboad_get() to first request type - UTF8_STRING when no specific type is requested in an X11 windowing - environment, falling back to the current default type STRING if that fails. - Original patch by Thomas Kluyver. - -- Issue #14773: Fix os.fwalk() failing on dangling symlinks. - -- Issue #12541: Be lenient with quotes around Realm field of HTTP Basic - Authentation in urllib2. - -- Issue #14807: move undocumented tarfile.filemode() to stat.filemode() and add - doc entry. Add tarfile.filemode alias with deprecation warning. - - Issue #13815: TarFile.extractfile() now returns io.BufferedReader objects. - Issue #14532: Add a secure_compare() helper to the hmac module, to mitigate - timing attacks. Patch by Jon Oberheide. + timing attacks. Patch by Jon Oberheide. - Add importlib.util.resolve_name(). @@ -174,22 +94,8 @@ Build ----- -- Issue #14472: Update .gitignore. Patch by Matej Cepl. - -- Upgrade Windows library versions: bzip 1.0.6, OpenSSL 1.0.1c. - -- Issue #14693: Under non-Windows platforms, hashlib's fallback modules are - always compiled, even if OpenSSL is present at build time. - - Issue #13210: Windows build now uses VS2010, ported from VS2008. -Documentation -------------- - -- Issue #14588: The language reference now accurately documents the Python 3 - class definition process. Patch by Nick Coghlan. - - What's New in Python 3.3.0 Alpha 3? =================================== diff -r 0be296605165 -r faa88c50a3d2 Modules/_bisectmodule.c --- a/Modules/_bisectmodule.c Wed May 23 22:26:55 2012 +0200 +++ b/Modules/_bisectmodule.c Wed May 23 21:09:05 2012 +0200 @@ -3,7 +3,6 @@ Converted to C by Dmitry Vasiliev (dima at hlabs.spb.ru). */ -#define PY_SSIZE_T_CLEAN #include "Python.h" static Py_ssize_t @@ -196,7 +195,8 @@ return NULL; } else { _Py_IDENTIFIER(insert); - result = _PyObject_CallMethodId(list, &PyId_insert, "nO", index, item); + + result = _PyObject_CallMethodId(list, &PyId_insert, "iO", index, item); if (result == NULL) return NULL; Py_DECREF(result); diff -r 0be296605165 -r faa88c50a3d2 Modules/_csv.c --- a/Modules/_csv.c Wed May 23 22:26:55 2012 +0200 +++ b/Modules/_csv.c Wed May 23 21:09:05 2012 +0200 @@ -16,39 +16,9 @@ #define IS_BASESTRING(o) \ PyUnicode_Check(o) -typedef struct { - PyObject *error_obj; /* CSV exception */ - PyObject *dialects; /* Dialect registry */ - long field_limit; /* max parsed field size */ -} _csvstate; - -#define _csvstate(o) ((_csvstate *)PyModule_GetState(o)) - -static int -_csv_clear(PyObject *m) -{ - Py_CLEAR(_csvstate(m)->error_obj); - Py_CLEAR(_csvstate(m)->dialects); - return 0; -} - -static int -_csv_traverse(PyObject *m, visitproc visit, void *arg) -{ - Py_VISIT(_csvstate(m)->error_obj); - Py_VISIT(_csvstate(m)->dialects); - return 0; -} - -static void -_csv_free(void *m) -{ - _csv_clear((PyObject *)m); -} - -static struct PyModuleDef _csvmodule; - -#define _csvstate_global ((_csvstate *)PyModule_GetState(PyState_FindModule(&_csvmodule))) +static PyObject *error_obj; /* CSV exception */ +static PyObject *dialects; /* Dialect registry */ +static long field_limit = 128 * 1024; /* max parsed field size */ typedef enum { START_RECORD, START_FIELD, ESCAPED_CHAR, IN_FIELD, @@ -133,10 +103,10 @@ { PyObject *dialect_obj; - dialect_obj = PyDict_GetItem(_csvstate_global->dialects, name_obj); + dialect_obj = PyDict_GetItem(dialects, name_obj); if (dialect_obj == NULL) { if (!PyErr_Occurred()) - PyErr_Format(_csvstate_global->error_obj, "unknown dialect"); + PyErr_Format(error_obj, "unknown dialect"); } else Py_INCREF(dialect_obj); @@ -574,9 +544,9 @@ static int parse_add_char(ReaderObj *self, Py_UCS4 c) { - if (self->field_len >= _csvstate_global->field_limit) { - PyErr_Format(_csvstate_global->error_obj, "field larger than field limit (%ld)", - _csvstate_global->field_limit); + if (self->field_len >= field_limit) { + PyErr_Format(error_obj, "field larger than field limit (%ld)", + field_limit); return -1; } if (self->field_len == self->field_size && !parse_grow_buff(self)) @@ -733,7 +703,7 @@ } else { /* illegal */ - PyErr_Format(_csvstate_global->error_obj, "'%c' expected after '%c'", + PyErr_Format(error_obj, "'%c' expected after '%c'", dialect->delimiter, dialect->quotechar); return -1; @@ -746,7 +716,7 @@ else if (c == '\0') self->state = START_RECORD; else { - PyErr_Format(_csvstate_global->error_obj, "new-line character seen in unquoted field - do you need to open the file in universal-newline mode?"); + PyErr_Format(error_obj, "new-line character seen in unquoted field - do you need to open the file in universal-newline mode?"); return -1; } break; @@ -785,12 +755,12 @@ if (lineobj == NULL) { /* End of input OR exception */ if (!PyErr_Occurred() && self->field_len != 0) - PyErr_Format(_csvstate_global->error_obj, + PyErr_Format(error_obj, "newline inside string"); return NULL; } if (!PyUnicode_Check(lineobj)) { - PyErr_Format(_csvstate_global->error_obj, + PyErr_Format(error_obj, "iterator should return strings, " "not %.200s " "(did you open the file in text mode?)", @@ -808,7 +778,7 @@ c = PyUnicode_READ(kind, data, pos); if (c == '\0') { Py_DECREF(lineobj); - PyErr_Format(_csvstate_global->error_obj, + PyErr_Format(error_obj, "line contains NULL byte"); goto err; } @@ -1024,7 +994,7 @@ } if (want_escape) { if (!dialect->escapechar) { - PyErr_Format(_csvstate_global->error_obj, + PyErr_Format(error_obj, "need to escape, but no escapechar set"); return -1; } @@ -1040,7 +1010,7 @@ */ if (i == 0 && quote_empty) { if (dialect->quoting == QUOTE_NONE) { - PyErr_Format(_csvstate_global->error_obj, + PyErr_Format(error_obj, "single empty field record must be quoted"); return -1; } @@ -1157,7 +1127,7 @@ PyObject *line, *result; if (!PySequence_Check(seq)) - return PyErr_Format(_csvstate_global->error_obj, "sequence expected"); + return PyErr_Format(error_obj, "sequence expected"); len = PySequence_Length(seq); if (len < 0) @@ -1383,7 +1353,7 @@ static PyObject * csv_list_dialects(PyObject *module, PyObject *args) { - return PyDict_Keys(_csvstate_global->dialects); + return PyDict_Keys(dialects); } static PyObject * @@ -1402,7 +1372,7 @@ dialect = _call_dialect(dialect_obj, kwargs); if (dialect == NULL) return NULL; - if (PyDict_SetItem(_csvstate_global->dialects, name_obj, dialect) < 0) { + if (PyDict_SetItem(dialects, name_obj, dialect) < 0) { Py_DECREF(dialect); return NULL; } @@ -1414,8 +1384,8 @@ static PyObject * csv_unregister_dialect(PyObject *module, PyObject *name_obj) { - if (PyDict_DelItem(_csvstate_global->dialects, name_obj) < 0) - return PyErr_Format(_csvstate_global->error_obj, "unknown dialect"); + if (PyDict_DelItem(dialects, name_obj) < 0) + return PyErr_Format(error_obj, "unknown dialect"); Py_INCREF(Py_None); return Py_None; } @@ -1430,7 +1400,7 @@ csv_field_size_limit(PyObject *module, PyObject *args) { PyObject *new_limit = NULL; - long old_limit = _csvstate_global->field_limit; + long old_limit = field_limit; if (!PyArg_UnpackTuple(args, "field_size_limit", 0, 1, &new_limit)) return NULL; @@ -1440,9 +1410,9 @@ "limit must be an integer"); return NULL; } - _csvstate_global->field_limit = PyLong_AsLong(new_limit); - if (_csvstate_global->field_limit == -1 && PyErr_Occurred()) { - _csvstate_global->field_limit = old_limit; + field_limit = PyLong_AsLong(new_limit); + if (field_limit == -1 && PyErr_Occurred()) { + field_limit = old_limit; return NULL; } } @@ -1581,16 +1551,17 @@ { NULL, NULL } }; + static struct PyModuleDef _csvmodule = { PyModuleDef_HEAD_INIT, "_csv", csv_module_doc, - sizeof(_csvstate), + -1, csv_methods, NULL, - _csv_traverse, - _csv_clear, - _csv_free + NULL, + NULL, + NULL }; PyMODINIT_FUNC @@ -1618,16 +1589,11 @@ MODULE_VERSION) == -1) return NULL; - /* Set the field limit */ - _csvstate(module)->field_limit = 128 * 1024; - /* Do I still need to add this var to the Module Dict? */ - /* Add _dialects dictionary */ - _csvstate(module)->dialects = PyDict_New(); - if (_csvstate(module)->dialects == NULL) + dialects = PyDict_New(); + if (dialects == NULL) return NULL; - Py_INCREF(_csvstate(module)->dialects); - if (PyModule_AddObject(module, "_dialects", _csvstate(module)->dialects)) + if (PyModule_AddObject(module, "_dialects", dialects)) return NULL; /* Add quote styles into dictionary */ @@ -1643,10 +1609,9 @@ return NULL; /* Add the CSV exception object to the module. */ - _csvstate(module)->error_obj = PyErr_NewException("_csv.Error", NULL, NULL); - if (_csvstate(module)->error_obj == NULL) + error_obj = PyErr_NewException("_csv.Error", NULL, NULL); + if (error_obj == NULL) return NULL; - Py_INCREF(_csvstate(module)->error_obj); - PyModule_AddObject(module, "Error", _csvstate(module)->error_obj); + PyModule_AddObject(module, "Error", error_obj); return module; } diff -r 0be296605165 -r faa88c50a3d2 Modules/_decimal/libmpdec/mpdecimal.c --- a/Modules/_decimal/libmpdec/mpdecimal.c Wed May 23 22:26:55 2012 +0200 +++ b/Modules/_decimal/libmpdec/mpdecimal.c Wed May 23 21:09:05 2012 +0200 @@ -3878,97 +3878,53 @@ } #endif -/* Pad the result with trailing zeros if it has fewer digits than prec. */ -static void -_mpd_zeropad(mpd_t *result, const mpd_context_t *ctx, uint32_t *status) -{ - if (!mpd_isspecial(result) && !mpd_iszero(result) && - result->digits < ctx->prec) { - mpd_ssize_t shift = ctx->prec - result->digits; - mpd_qshiftl(result, result, shift, status); - result->exp -= shift; - } -} - -/* Check if the result is guaranteed to be one. */ -static int -_mpd_qexp_check_one(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx, - uint32_t *status) -{ - MPD_NEW_CONST(lim,0,-(ctx->prec+1),1,1,1,9); - MPD_NEW_SHARED(aa, a); - - mpd_set_positive(&aa); - - /* abs(a) <= 9 * 10**(-prec-1) */ - if (_mpd_cmp(&aa, &lim) <= 0) { - _settriple(result, 0, 1, 0); - _mpd_zeropad(result, ctx, status); - *status = MPD_Rounded|MPD_Inexact; - return 1; - } - - return 0; -} - +#if defined(_MSC_VER) + /* conversion from 'double' to 'mpd_ssize_t', possible loss of data */ + #pragma warning(disable:4244) +#endif /* * Get the number of iterations for the Horner scheme in _mpd_qexp(). */ static inline mpd_ssize_t -_mpd_get_exp_iterations(const mpd_t *r, mpd_ssize_t p) -{ - mpd_ssize_t log10pbyr; /* lower bound for log10(p / abs(r)) */ - mpd_ssize_t n; - - assert(p >= 10); - assert(!mpd_iszero(r)); - assert(-p < mpd_adjexp(r) && mpd_adjexp(r) <= -1); +_mpd_get_exp_iterations(const mpd_t *a, mpd_ssize_t prec) +{ + mpd_uint_t dummy; + mpd_uint_t msdigits; + double f; + + /* 9 is MPD_RDIGITS for 32 bit platforms */ + _mpd_get_msdigits(&dummy, &msdigits, a, 9); + f = ((double)msdigits + 1) / mpd_pow10[mpd_word_digits(msdigits)]; #ifdef CONFIG_64 - if (p > (mpd_ssize_t)(1ULL<<52)) { + #ifdef USE_80BIT_LONG_DOUBLE + return ceill((1.435*(long double)prec - 1.182) + / log10l((long double)prec/f)); + #else + /* prec > floor((1ULL<<53) / 1.435) */ + if (prec > 6276793905742851LL) { return MPD_SSIZE_MAX; } + return ceil((1.435*(double)prec - 1.182) / log10((double)prec/f)); + #endif +#else /* CONFIG_32 */ + return ceil((1.435*(double)prec - 1.182) / log10((double)prec/f)); + #if defined(_MSC_VER) + #pragma warning(default:4244) + #endif #endif - - /* - * Lower bound for log10(p / abs(r)): adjexp(p) - (adjexp(r) + 1) - * At this point (for CONFIG_64, CONFIG_32 is not problematic): - * 1) 10 <= p <= 2**52 - * 2) -p < adjexp(r) <= -1 - * 3) 1 <= log10pbyr <= 2**52 + 14 - */ - log10pbyr = (mpd_word_digits(p)-1) - (mpd_adjexp(r)+1); - - /* - * The numerator in the paper is 1.435 * p - 1.182, calculated - * exactly. We compensate for rounding errors by using 1.43503. - * ACL2 proofs: - * 1) exp-iter-approx-lower-bound: The term below evaluated - * in 53-bit floating point arithmetic is greater than or - * equal to the exact term used in the paper. - * 2) exp-iter-approx-upper-bound: The term below is less than - * or equal to 3/2 * p <= 3/2 * 2**52. - */ - n = (mpd_ssize_t)ceil((1.43503*(double)p - 1.182) / (double)log10pbyr); - return n >= 3 ? n : 3; } /* - * Internal function, specials have been dealt with. The result has a - * relative error of less than 0.5 * 10**(-ctx->prec). + * Internal function, specials have been dealt with. * * The algorithm is from Hull&Abrham, Variable Precision Exponential Function, * ACM Transactions on Mathematical Software, Vol. 12, No. 2, June 1986. * * Main differences: * - * - The number of iterations for the Horner scheme is calculated using - * 53-bit floating point arithmetic. - * - * - In the error analysis for ER (relative error accumulated in the - * evaluation of the truncated series) the reduced operand r may - * have any number of digits. - * ACL2 proof: exponent-relative-error + * - The number of iterations for the Horner scheme is calculated using the + * C log10() function. * * - The analysis for early abortion has been adapted for the mpd_t * ranges. @@ -3985,23 +3941,18 @@ assert(!mpd_isspecial(a)); - if (mpd_iszerocoeff(a)) { - _settriple(result, MPD_POS, 1, 0); - return; - } - /* - * We are calculating e^x = e^(r*10^t) = (e^r)^(10^t), where abs(r) < 1 and t >= 0. + * We are calculating e^x = e^(r*10^t) = (e^r)^(10^t), where r < 1 and t >= 0. * * If t > 0, we have: * - * (1) 0.1 <= r < 1, so e^0.1 <= e^r. If t > MAX_T, overflow occurs: + * (1) 0.1 <= r < 1, so e^r >= e^0.1. Overflow in the final power operation + * will occur when (e^0.1)^(10^t) > 10^(emax+1). If we consider MAX_EMAX, + * this will happen for t > 10 (32 bit) or (t > 19) (64 bit). * - * MAX-EMAX+1 < log10(e^(0.1*10*t)) <= log10(e^(r*10^t)) < adjexp(e^(r*10^t))+1 - * - * (2) -1 < r <= -0.1, so e^r <= e^-0.1. It t > MAX_T, underflow occurs: - * - * adjexp(e^(r*10^t)) <= log10(e^(r*10^t)) <= log10(e^(-0.1*10^t) < MIN-ETINY + * (2) -1 < r <= -0.1, so e^r > e^-1. Underflow in the final power operation + * will occur when (e^-1)^(10^t) < 10^(etiny-1). If we consider MIN_ETINY, + * this will also happen for t > 10 (32 bit) or (t > 19) (64 bit). */ #if defined(CONFIG_64) #define MPD_EXP_MAX_T 19 @@ -4023,41 +3974,29 @@ return; } - /* abs(a) <= 9 * 10**(-prec-1) */ - if (_mpd_qexp_check_one(result, a, ctx, status)) { - return; - } - mpd_maxcontext(&workctx); workctx.prec = ctx->prec + t + 2; - workctx.prec = (workctx.prec < 10) ? 10 : workctx.prec; + workctx.prec = (workctx.prec < 9) ? 9 : workctx.prec; workctx.round = MPD_ROUND_HALF_EVEN; + if ((n = _mpd_get_exp_iterations(a, workctx.prec)) == MPD_SSIZE_MAX) { + mpd_seterror(result, MPD_Invalid_operation, status); /* GCOV_UNLIKELY */ + goto finish; /* GCOV_UNLIKELY */ + } + if (!mpd_qcopy(result, a, status)) { - return; + goto finish; } result->exp -= t; - /* - * At this point: - * 1) 9 * 10**(-prec-1) < abs(a) - * 2) 9 * 10**(-prec-t-1) < abs(r) - * 3) log10(9) - prec - t - 1 < log10(abs(r)) < adjexp(abs(r)) + 1 - * 4) - prec - t - 2 < adjexp(abs(r)) <= -1 - */ - n = _mpd_get_exp_iterations(result, workctx.prec); - if (n == MPD_SSIZE_MAX) { - mpd_seterror(result, MPD_Invalid_operation, status); /* GCOV_UNLIKELY */ - return; /* GCOV_UNLIKELY */ - } - _settriple(&sum, MPD_POS, 1, 0); for (j = n-1; j >= 1; j--) { word.data[0] = j; mpd_setdigits(&word); mpd_qdiv(&tmp, result, &word, &workctx, &workctx.status); - mpd_qfma(&sum, &sum, &tmp, &one, &workctx, &workctx.status); + mpd_qmul(&sum, &sum, &tmp, &workctx, &workctx.status); + mpd_qadd(&sum, &sum, &one, &workctx, &workctx.status); } #ifdef CONFIG_64 @@ -4074,8 +4013,8 @@ } #endif - _mpd_zeropad(result, ctx, status); - + +finish: mpd_del(&tmp); mpd_del(&sum); *status |= (workctx.status&MPD_Errors); @@ -4130,18 +4069,8 @@ workctx.prec = prec; _mpd_qexp(result, a, &workctx, status); _ssettriple(&ulp, MPD_POS, 1, - result->exp + result->digits-workctx.prec); - - /* - * At this point: - * 1) abs(result - e**x) < 0.5 * 10**(-prec) * e**x - * 2) result - ulp < e**x < result + ulp - * 3) result - ulp < result < result + ulp - * - * If round(result-ulp)==round(result+ulp), then - * round(result)==round(e**x). Therefore the result - * is correctly rounded. - */ + result->exp + result->digits-workctx.prec-1); + workctx.prec = ctx->prec; mpd_qadd(&t1, result, &ulp, &workctx, &workctx.status); mpd_qsub(&t2, result, &ulp, &workctx, &workctx.status); @@ -7431,7 +7360,7 @@ if (x > 2711437152599294ULL) { return SIZE_MAX; } - return (size_t)((double)x / log10(base) + 3); + return (double)x / log10(base) + 3; #endif #else /* CONFIG_32 */ { diff -r 0be296605165 -r faa88c50a3d2 Modules/_elementtree.c --- a/Modules/_elementtree.c Wed May 23 22:26:55 2012 +0200 +++ b/Modules/_elementtree.c Wed May 23 21:09:05 2012 +0200 @@ -1640,15 +1640,16 @@ return res; } -static PyObject* -element_setattro(ElementObject* self, PyObject* nameobj, PyObject* value) +static int +element_setattr(ElementObject* self, const char* name, PyObject* value) { - char *name = ""; - if (PyUnicode_Check(nameobj)) - name = _PyUnicode_AsString(nameobj); - - if (name == NULL) - return NULL; + if (value == NULL) { + PyErr_SetString( + PyExc_AttributeError, + "can't delete element attributes" + ); + return -1; + } if (strcmp(name, "tag") == 0) { Py_DECREF(self->tag); @@ -1670,10 +1671,10 @@ Py_INCREF(self->extra->attrib); } else { PyErr_SetString(PyExc_AttributeError, name); - return NULL; + return -1; } - return NULL; + return 0; } static PySequenceMethods element_as_sequence = { @@ -1699,7 +1700,7 @@ (destructor)element_dealloc, /* tp_dealloc */ 0, /* tp_print */ 0, /* tp_getattr */ - 0, /* tp_setattr */ + (setattrfunc)element_setattr, /* tp_setattr */ 0, /* tp_reserved */ (reprfunc)element_repr, /* tp_repr */ 0, /* tp_as_number */ @@ -1709,7 +1710,7 @@ 0, /* tp_call */ 0, /* tp_str */ (getattrofunc)element_getattro, /* tp_getattro */ - (setattrofunc)element_setattro, /* tp_setattro */ + 0, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC, /* tp_flags */ diff -r 0be296605165 -r faa88c50a3d2 Modules/_lzmamodule.c --- a/Modules/_lzmamodule.c Wed May 23 22:26:55 2012 +0200 +++ b/Modules/_lzmamodule.c Wed May 23 21:09:05 2012 +0200 @@ -160,7 +160,7 @@ "Value too large for " #TYPE " type"); \ return 0; \ } \ - *(TYPE *)ptr = (TYPE)val; \ + *(TYPE *)ptr = val; \ return 1; \ } diff -r 0be296605165 -r faa88c50a3d2 Modules/_testbuffer.c --- a/Modules/_testbuffer.c Wed May 23 22:26:55 2012 +0200 +++ b/Modules/_testbuffer.c Wed May 23 21:09:05 2012 +0200 @@ -2337,12 +2337,6 @@ return ret; } -static PyObject * -get_sizeof_void_p(PyObject *self) -{ - return PyLong_FromSize_t(sizeof(void *)); -} - static char get_ascii_order(PyObject *order) { @@ -2732,7 +2726,6 @@ static struct PyMethodDef _testbuffer_functions[] = { {"slice_indices", slice_indices, METH_VARARGS, NULL}, {"get_pointer", get_pointer, METH_VARARGS, NULL}, - {"get_sizeof_void_p", (PyCFunction)get_sizeof_void_p, METH_NOARGS, NULL}, {"get_contiguous", get_contiguous, METH_VARARGS, NULL}, {"is_contiguous", is_contiguous, METH_VARARGS, NULL}, {"cmp_contig", cmp_contig, METH_VARARGS, NULL}, diff -r 0be296605165 -r faa88c50a3d2 Modules/itertoolsmodule.c --- a/Modules/itertoolsmodule.c Wed May 23 22:26:55 2012 +0200 +++ b/Modules/itertoolsmodule.c Wed May 23 21:09:05 2012 +0200 @@ -533,8 +533,7 @@ tdo->values[i] = PyList_GET_ITEM(values, i); Py_INCREF(tdo->values[i]); } - /* len <= LINKCELLS < INT_MAX */ - tdo->numread = Py_SAFE_DOWNCAST(len, Py_ssize_t, int); + tdo->numread = len; if (len == LINKCELLS) { if (next != Py_None) { diff -r 0be296605165 -r faa88c50a3d2 Objects/complexobject.c --- a/Objects/complexobject.c Wed May 23 22:26:55 2012 +0200 +++ b/Objects/complexobject.c Wed May 23 21:09:05 2012 +0200 @@ -699,11 +699,22 @@ complex__format__(PyObject* self, PyObject* args) { PyObject *format_spec; + _PyUnicodeWriter writer; + int ret; if (!PyArg_ParseTuple(args, "U:__format__", &format_spec)) - return NULL; - return _PyComplex_FormatAdvanced(self, format_spec, 0, - PyUnicode_GET_LENGTH(format_spec)); + return NULL; + + _PyUnicodeWriter_Init(&writer, 0); + ret = _PyComplex_FormatAdvancedWriter( + self, + format_spec, 0, PyUnicode_GET_LENGTH(format_spec), + &writer); + if (ret == -1) { + _PyUnicodeWriter_Dealloc(&writer); + return NULL; + } + return _PyUnicodeWriter_Finish(&writer); } #if 0 diff -r 0be296605165 -r faa88c50a3d2 Objects/exceptions.c --- a/Objects/exceptions.c Wed May 23 22:26:55 2012 +0200 +++ b/Objects/exceptions.c Wed May 23 21:09:05 2012 +0200 @@ -42,7 +42,6 @@ /* the dict is created on the fly in PyObject_GenericSetAttr */ self->dict = NULL; self->traceback = self->cause = self->context = NULL; - self->suppress_context = 0; self->args = PyTuple_New(0); if (!self->args) { @@ -267,7 +266,24 @@ PyObject *res = PyException_GetCause(self); if (res) return res; /* new reference already returned above */ - Py_RETURN_NONE; + Py_INCREF(Py_Ellipsis); + return Py_Ellipsis; +} + +int +_PyException_SetCauseChecked(PyObject *self, PyObject *arg) { + if (arg == Py_Ellipsis) { + arg = NULL; + } else if (arg != Py_None && !PyExceptionInstance_Check(arg)) { + PyErr_SetString(PyExc_TypeError, "exception cause must be None, " + "Ellipsis or derive from BaseException"); + return -1; + } else { + /* PyException_SetCause steals a reference */ + Py_INCREF(arg); + } + PyException_SetCause(self, arg); + return 0; } static int @@ -275,18 +291,8 @@ if (arg == NULL) { PyErr_SetString(PyExc_TypeError, "__cause__ may not be deleted"); return -1; - } else if (arg == Py_None) { - arg = NULL; - } else if (!PyExceptionInstance_Check(arg)) { - PyErr_SetString(PyExc_TypeError, "exception cause must be None " - "or derive from BaseException"); - return -1; - } else { - /* PyException_SetCause steals this reference */ - Py_INCREF(arg); } - PyException_SetCause(self, arg); - return 0; + return _PyException_SetCauseChecked(self, arg); } @@ -327,7 +333,6 @@ PyException_SetCause(PyObject *self, PyObject *cause) { PyObject *old_cause = ((PyBaseExceptionObject *)self)->cause; ((PyBaseExceptionObject *)self)->cause = cause; - ((PyBaseExceptionObject *)self)->suppress_context = 1; Py_XDECREF(old_cause); } @@ -347,13 +352,6 @@ } -static struct PyMemberDef BaseException_members[] = { - {"__suppress_context__", T_BOOL, - offsetof(PyBaseExceptionObject, suppress_context)}, - {NULL} -}; - - static PyTypeObject _PyExc_BaseException = { PyVarObject_HEAD_INIT(NULL, 0) "BaseException", /*tp_name*/ @@ -384,7 +382,7 @@ 0, /* tp_iter */ 0, /* tp_iternext */ BaseException_methods, /* tp_methods */ - BaseException_members, /* tp_members */ + 0, /* tp_members */ BaseException_getset, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ diff -r 0be296605165 -r faa88c50a3d2 Objects/floatobject.c --- a/Objects/floatobject.c Wed May 23 22:26:55 2012 +0200 +++ b/Objects/floatobject.c Wed May 23 21:09:05 2012 +0200 @@ -273,7 +273,8 @@ NULL); if (!buf) return PyErr_NoMemory(); - result = PyUnicode_FromString(buf); + result = PyUnicode_FromKindAndData(PyUnicode_1BYTE_KIND, + buf, strlen(buf)); PyMem_Free(buf); return result; } @@ -1703,11 +1704,22 @@ float__format__(PyObject *self, PyObject *args) { PyObject *format_spec; + _PyUnicodeWriter writer; + int ret; if (!PyArg_ParseTuple(args, "U:__format__", &format_spec)) return NULL; - return _PyFloat_FormatAdvanced(self, format_spec, 0, - PyUnicode_GET_LENGTH(format_spec)); + + _PyUnicodeWriter_Init(&writer, 0); + ret = _PyFloat_FormatAdvancedWriter( + self, + format_spec, 0, PyUnicode_GET_LENGTH(format_spec), + &writer); + if (ret == -1) { + _PyUnicodeWriter_Dealloc(&writer); + return NULL; + } + return _PyUnicodeWriter_Finish(&writer); } PyDoc_STRVAR(float__format__doc, diff -r 0be296605165 -r faa88c50a3d2 Objects/longobject.c --- a/Objects/longobject.c Wed May 23 22:26:55 2012 +0200 +++ b/Objects/longobject.c Wed May 23 21:09:05 2012 +0200 @@ -1550,20 +1550,22 @@ string. (Return value is non-shared so that callers can modify the returned value if necessary.) */ -static PyObject * -long_to_decimal_string(PyObject *aa) +static int +long_to_decimal_string_internal(PyObject *aa, + PyObject **p_output, + _PyUnicodeWriter *writer) { PyLongObject *scratch, *a; PyObject *str; Py_ssize_t size, strlen, size_a, i, j; digit *pout, *pin, rem, tenpow; - unsigned char *p; int negative; + enum PyUnicode_Kind kind; a = (PyLongObject *)aa; if (a == NULL || !PyLong_Check(a)) { PyErr_BadInternalCall(); - return NULL; + return -1; } size_a = ABS(Py_SIZE(a)); negative = Py_SIZE(a) < 0; @@ -1580,13 +1582,13 @@ if (size_a > PY_SSIZE_T_MAX / PyLong_SHIFT) { PyErr_SetString(PyExc_OverflowError, "long is too large to format"); - return NULL; + return -1; } /* the expression size_a * PyLong_SHIFT is now safe from overflow */ size = 1 + size_a * PyLong_SHIFT / (3 * _PyLong_DECIMAL_SHIFT); scratch = _PyLong_New(size); if (scratch == NULL) - return NULL; + return -1; /* convert array of base _PyLong_BASE digits in pin to an array of base _PyLong_DECIMAL_BASE digits in pout, following Knuth (TAOCP, @@ -1609,7 +1611,7 @@ /* check for keyboard interrupt */ SIGCHECK({ Py_DECREF(scratch); - return NULL; + return -1; }); } /* pout should have at least one digit, so that the case when a = 0 @@ -1625,65 +1627,113 @@ tenpow *= 10; strlen++; } - str = PyUnicode_New(strlen, '9'); - if (str == NULL) { - Py_DECREF(scratch); + if (writer) { + if (_PyUnicodeWriter_Prepare(writer, strlen, '9') == -1) + return -1; + kind = writer->kind; + str = NULL; + } + else { + str = PyUnicode_New(strlen, '9'); + if (str == NULL) { + Py_DECREF(scratch); + return -1; + } + kind = PyUnicode_KIND(str); + } + +#define WRITE_DIGITS(TYPE) \ + do { \ + if (writer) \ + p = (TYPE*)PyUnicode_DATA(writer->buffer) + writer->pos + strlen; \ + else \ + p = (TYPE*)PyUnicode_DATA(str) + strlen; \ + \ + *p = '\0'; \ + /* pout[0] through pout[size-2] contribute exactly \ + _PyLong_DECIMAL_SHIFT digits each */ \ + for (i=0; i < size - 1; i++) { \ + rem = pout[i]; \ + for (j = 0; j < _PyLong_DECIMAL_SHIFT; j++) { \ + *--p = '0' + rem % 10; \ + rem /= 10; \ + } \ + } \ + /* pout[size-1]: always produce at least one decimal digit */ \ + rem = pout[i]; \ + do { \ + *--p = '0' + rem % 10; \ + rem /= 10; \ + } while (rem != 0); \ + \ + /* and sign */ \ + if (negative) \ + *--p = '-'; \ + \ + /* check we've counted correctly */ \ + if (writer) \ + assert(p == ((TYPE*)PyUnicode_DATA(writer->buffer) + writer->pos)); \ + else \ + assert(p == (TYPE*)PyUnicode_DATA(str)); \ + } while (0) + + /* fill the string right-to-left */ + if (kind == PyUnicode_1BYTE_KIND) { + Py_UCS1 *p; + WRITE_DIGITS(Py_UCS1); + } + else if (kind == PyUnicode_2BYTE_KIND) { + Py_UCS2 *p; + WRITE_DIGITS(Py_UCS2); + } + else { + assert (kind == PyUnicode_4BYTE_KIND); + Py_UCS4 *p; + WRITE_DIGITS(Py_UCS4); + } +#undef WRITE_DIGITS + + Py_DECREF(scratch); + if (writer) { + writer->pos += strlen; + } + else { + assert(_PyUnicode_CheckConsistency(str, 1)); + *p_output = (PyObject *)str; + } + return 0; +} + +static PyObject * +long_to_decimal_string(PyObject *aa) +{ + PyObject *v; + if (long_to_decimal_string_internal(aa, &v, NULL) == -1) return NULL; - } - - /* fill the string right-to-left */ - assert(PyUnicode_KIND(str) == PyUnicode_1BYTE_KIND); - p = PyUnicode_1BYTE_DATA(str) + strlen; - *p = '\0'; - /* pout[0] through pout[size-2] contribute exactly - _PyLong_DECIMAL_SHIFT digits each */ - for (i=0; i < size - 1; i++) { - rem = pout[i]; - for (j = 0; j < _PyLong_DECIMAL_SHIFT; j++) { - *--p = '0' + rem % 10; - rem /= 10; - } - } - /* pout[size-1]: always produce at least one decimal digit */ - rem = pout[i]; - do { - *--p = '0' + rem % 10; - rem /= 10; - } while (rem != 0); - - /* and sign */ - if (negative) - *--p = '-'; - - /* check we've counted correctly */ - assert(p == PyUnicode_1BYTE_DATA(str)); - assert(_PyUnicode_CheckConsistency(str, 1)); - Py_DECREF(scratch); - return (PyObject *)str; + return v; } /* Convert a long int object to a string, using a given conversion base, - which should be one of 2, 8, 10 or 16. Return a string object. - If base is 2, 8 or 16, add the proper prefix '0b', '0o' or '0x'. */ - -PyObject * -_PyLong_Format(PyObject *aa, int base) + which should be one of 2, 8 or 16. Return a string object. + If base is 2, 8 or 16, add the proper prefix '0b', '0o' or '0x' + if alternate is nonzero. */ + +static int +long_format_binary(PyObject *aa, int base, int alternate, + PyObject **p_output, _PyUnicodeWriter *writer) { register PyLongObject *a = (PyLongObject *)aa; PyObject *v; Py_ssize_t sz; Py_ssize_t size_a; - Py_UCS1 *p; + enum PyUnicode_Kind kind; int negative; int bits; - assert(base == 2 || base == 8 || base == 10 || base == 16); - if (base == 10) - return long_to_decimal_string((PyObject *)a); - + assert(base == 2 || base == 8 || base == 16); if (a == NULL || !PyLong_Check(a)) { PyErr_BadInternalCall(); - return NULL; + return -1; } size_a = ABS(Py_SIZE(a)); negative = Py_SIZE(a) < 0; @@ -1706,7 +1756,7 @@ /* Compute exact length 'sz' of output string. */ if (size_a == 0) { - sz = 3; + sz = 1; } else { Py_ssize_t size_a_in_bits; @@ -1714,56 +1764,126 @@ if (size_a > (PY_SSIZE_T_MAX - 3) / PyLong_SHIFT) { PyErr_SetString(PyExc_OverflowError, "int is too large to format"); - return NULL; + return -1; } size_a_in_bits = (size_a - 1) * PyLong_SHIFT + bits_in_digit(a->ob_digit[size_a - 1]); - /* Allow 2 characters for prefix and 1 for a '-' sign. */ - sz = 2 + negative + (size_a_in_bits + (bits - 1)) / bits; - } - - v = PyUnicode_New(sz, 'x'); - if (v == NULL) { + /* Allow 1 character for a '-' sign. */ + sz = negative + (size_a_in_bits + (bits - 1)) / bits; + } + if (alternate) { + /* 2 characters for prefix */ + sz += 2; + } + + if (writer) { + if (_PyUnicodeWriter_Prepare(writer, sz, 'x') == -1) + return -1; + kind = writer->kind; + v = NULL; + } + else { + v = PyUnicode_New(sz, 'x'); + if (v == NULL) + return -1; + kind = PyUnicode_KIND(v); + } + +#define WRITE_DIGITS(TYPE) \ + do { \ + if (writer) \ + p = (TYPE*)PyUnicode_DATA(writer->buffer) + writer->pos + sz; \ + else \ + p = (TYPE*)PyUnicode_DATA(v) + sz; \ + \ + if (size_a == 0) { \ + *--p = '0'; \ + } \ + else { \ + /* JRH: special case for power-of-2 bases */ \ + twodigits accum = 0; \ + int accumbits = 0; /* # of bits in accum */ \ + Py_ssize_t i; \ + for (i = 0; i < size_a; ++i) { \ + accum |= (twodigits)a->ob_digit[i] << accumbits; \ + accumbits += PyLong_SHIFT; \ + assert(accumbits >= bits); \ + do { \ + char cdigit; \ + cdigit = (char)(accum & (base - 1)); \ + cdigit += (cdigit < 10) ? '0' : 'a'-10; \ + *--p = cdigit; \ + accumbits -= bits; \ + accum >>= bits; \ + } while (i < size_a-1 ? accumbits >= bits : accum > 0); \ + } \ + } \ + \ + if (alternate) { \ + if (base == 16) \ + *--p = 'x'; \ + else if (base == 8) \ + *--p = 'o'; \ + else /* (base == 2) */ \ + *--p = 'b'; \ + *--p = '0'; \ + } \ + if (negative) \ + *--p = '-'; \ + if (writer) \ + assert(p == ((TYPE*)PyUnicode_DATA(writer->buffer) + writer->pos)); \ + else \ + assert(p == (TYPE*)PyUnicode_DATA(v)); \ + } while (0) + + if (kind == PyUnicode_1BYTE_KIND) { + Py_UCS1 *p; + WRITE_DIGITS(Py_UCS1); + } + else if (kind == PyUnicode_2BYTE_KIND) { + Py_UCS2 *p; + WRITE_DIGITS(Py_UCS2); + } + else { + assert (kind == PyUnicode_4BYTE_KIND); + Py_UCS4 *p; + WRITE_DIGITS(Py_UCS4); + } +#undef WRITE_DIGITS + + if (writer) { + writer->pos += sz; + } + else { + assert(_PyUnicode_CheckConsistency(v, 1)); + *p_output = v; + } + return 0; +} + +PyObject * +_PyLong_Format(PyObject *aa, int base) +{ + PyObject *str; + int err; + if (base == 10) + err = long_to_decimal_string_internal(aa, &str, NULL); + else + err = long_format_binary(aa, base, 1, &str, NULL); + if (err == -1) return NULL; - } - assert(PyUnicode_KIND(v) == PyUnicode_1BYTE_KIND); - - p = PyUnicode_1BYTE_DATA(v) + sz; - if (size_a == 0) { - *--p = '0'; - } - else { - /* JRH: special case for power-of-2 bases */ - twodigits accum = 0; - int accumbits = 0; /* # of bits in accum */ - Py_ssize_t i; - for (i = 0; i < size_a; ++i) { - accum |= (twodigits)a->ob_digit[i] << accumbits; - accumbits += PyLong_SHIFT; - assert(accumbits >= bits); - do { - char cdigit; - cdigit = (char)(accum & (base - 1)); - cdigit += (cdigit < 10) ? '0' : 'a'-10; - *--p = cdigit; - accumbits -= bits; - accum >>= bits; - } while (i < size_a-1 ? accumbits >= bits : accum > 0); - } - } - - if (base == 16) - *--p = 'x'; - else if (base == 8) - *--p = 'o'; - else /* (base == 2) */ - *--p = 'b'; - *--p = '0'; - if (negative) - *--p = '-'; - assert(p == PyUnicode_1BYTE_DATA(v)); - assert(_PyUnicode_CheckConsistency(v, 1)); - return v; + return str; +} + +int +_PyLong_FormatWriter(PyObject *aa, + int base, int alternate, + _PyUnicodeWriter *writer) +{ + if (base == 10) + return long_to_decimal_string_internal(aa, NULL, writer); + else + return long_format_binary(aa, base, alternate, NULL, writer); } /* Table of digit values for 8-bit string -> integer conversion. @@ -4232,11 +4352,22 @@ long__format__(PyObject *self, PyObject *args) { PyObject *format_spec; + _PyUnicodeWriter writer; + int ret; if (!PyArg_ParseTuple(args, "U:__format__", &format_spec)) return NULL; - return _PyLong_FormatAdvanced(self, format_spec, 0, - PyUnicode_GET_LENGTH(format_spec)); + + _PyUnicodeWriter_Init(&writer, 0); + ret = _PyLong_FormatAdvancedWriter( + self, + format_spec, 0, PyUnicode_GET_LENGTH(format_spec), + &writer); + if (ret == -1) { + _PyUnicodeWriter_Dealloc(&writer); + return NULL; + } + return _PyUnicodeWriter_Finish(&writer); } /* Return a pair (q, r) such that a = b * q + r, and diff -r 0be296605165 -r faa88c50a3d2 Objects/rangeobject.c --- a/Objects/rangeobject.c Wed May 23 22:26:55 2012 +0200 +++ b/Objects/rangeobject.c Wed May 23 21:09:05 2012 +0200 @@ -308,7 +308,7 @@ static PyObject * range_item(rangeobject *r, Py_ssize_t i) { - PyObject *res, *arg = PyLong_FromSsize_t(i); + PyObject *res, *arg = PyLong_FromLong(i); if (!arg) { return NULL; } diff -r 0be296605165 -r faa88c50a3d2 Objects/stringlib/codecs.h --- a/Objects/stringlib/codecs.h Wed May 23 22:26:55 2012 +0200 +++ b/Objects/stringlib/codecs.h Wed May 23 21:09:05 2012 +0200 @@ -215,6 +215,7 @@ goto Return; } +#undef LONG_PTR_MASK #undef ASCII_CHAR_MASK @@ -414,152 +415,4 @@ #undef MAX_SHORT_UNICHARS } -/* The pattern for constructing UCS2-repeated masks. */ -#if SIZEOF_LONG == 8 -# define UCS2_REPEAT_MASK 0x0001000100010001ul -#elif SIZEOF_LONG == 4 -# define UCS2_REPEAT_MASK 0x00010001ul -#else -# error C 'long' size should be either 4 or 8! -#endif - -/* The mask for fast checking. */ -#if STRINGLIB_SIZEOF_CHAR == 1 -/* The mask for fast checking of whether a C 'long' contains a - non-ASCII or non-Latin1 UTF16-encoded characters. */ -# define FAST_CHAR_MASK (UCS2_REPEAT_MASK * (0xFFFFu & ~STRINGLIB_MAX_CHAR)) -#else -/* The mask for fast checking of whether a C 'long' may contain - UTF16-encoded surrogate characters. This is an efficient heuristic, - assuming that non-surrogate characters with a code point >= 0x8000 are - rare in most input. -*/ -# define FAST_CHAR_MASK (UCS2_REPEAT_MASK * 0x8000u) -#endif -/* The mask for fast byte-swapping. */ -#define STRIPPED_MASK (UCS2_REPEAT_MASK * 0x00FFu) -/* Swap bytes. */ -#define SWAB(value) ((((value) >> 8) & STRIPPED_MASK) | \ - (((value) & STRIPPED_MASK) << 8)) - -Py_LOCAL_INLINE(Py_UCS4) -STRINGLIB(utf16_decode)(const unsigned char **inptr, const unsigned char *e, - STRINGLIB_CHAR *dest, Py_ssize_t *outpos, - int native_ordering) -{ - Py_UCS4 ch; - const unsigned char *aligned_end = - (const unsigned char *) ((size_t) e & ~LONG_PTR_MASK); - const unsigned char *q = *inptr; - STRINGLIB_CHAR *p = dest + *outpos; - /* Offsets from q for retrieving byte pairs in the right order. */ -#ifdef BYTEORDER_IS_LITTLE_ENDIAN - int ihi = !!native_ordering, ilo = !native_ordering; -#else - int ihi = !native_ordering, ilo = !!native_ordering; -#endif - --e; - - while (q < e) { - Py_UCS4 ch2; - /* First check for possible aligned read of a C 'long'. Unaligned - reads are more expensive, better to defer to another iteration. */ - if (!((size_t) q & LONG_PTR_MASK)) { - /* Fast path for runs of in-range non-surrogate chars. */ - register const unsigned char *_q = q; - while (_q < aligned_end) { - unsigned long block = * (unsigned long *) _q; - if (native_ordering) { - /* Can use buffer directly */ - if (block & FAST_CHAR_MASK) - break; - } - else { - /* Need to byte-swap */ - if (block & SWAB(FAST_CHAR_MASK)) - break; -#if STRINGLIB_SIZEOF_CHAR == 1 - block >>= 8; -#else - block = SWAB(block); -#endif - } -#ifdef BYTEORDER_IS_LITTLE_ENDIAN -# if SIZEOF_LONG == 4 - p[0] = (STRINGLIB_CHAR)(block & 0xFFFFu); - p[1] = (STRINGLIB_CHAR)(block >> 16); -# elif SIZEOF_LONG == 8 - p[0] = (STRINGLIB_CHAR)(block & 0xFFFFu); - p[1] = (STRINGLIB_CHAR)((block >> 16) & 0xFFFFu); - p[2] = (STRINGLIB_CHAR)((block >> 32) & 0xFFFFu); - p[3] = (STRINGLIB_CHAR)(block >> 48); -# endif -#else -# if SIZEOF_LONG == 4 - p[0] = (STRINGLIB_CHAR)(block >> 16); - p[1] = (STRINGLIB_CHAR)(block & 0xFFFFu); -# elif SIZEOF_LONG == 8 - p[0] = (STRINGLIB_CHAR)(block >> 48); - p[1] = (STRINGLIB_CHAR)((block >> 32) & 0xFFFFu); - p[2] = (STRINGLIB_CHAR)((block >> 16) & 0xFFFFu); - p[3] = (STRINGLIB_CHAR)(block & 0xFFFFu); -# endif -#endif - _q += SIZEOF_LONG; - p += SIZEOF_LONG / 2; - } - q = _q; - if (q >= e) - break; - } - - ch = (q[ihi] << 8) | q[ilo]; - q += 2; - if (!Py_UNICODE_IS_SURROGATE(ch)) { -#if STRINGLIB_SIZEOF_CHAR < 2 - if (ch > STRINGLIB_MAX_CHAR) - /* Out-of-range */ - goto Return; -#endif - *p++ = (STRINGLIB_CHAR)ch; - continue; - } - - /* UTF-16 code pair: */ - if (q >= e) - goto UnexpectedEnd; - if (!Py_UNICODE_IS_HIGH_SURROGATE(ch)) - goto IllegalEncoding; - ch2 = (q[ihi] << 8) | q[ilo]; - q += 2; - if (!Py_UNICODE_IS_LOW_SURROGATE(ch2)) - goto IllegalSurrogate; - ch = Py_UNICODE_JOIN_SURROGATES(ch, ch2); -#if STRINGLIB_SIZEOF_CHAR < 4 - /* Out-of-range */ - goto Return; -#else - *p++ = (STRINGLIB_CHAR)ch; -#endif - } - ch = 0; -Return: - *inptr = q; - *outpos = p - dest; - return ch; -UnexpectedEnd: - ch = 1; - goto Return; -IllegalEncoding: - ch = 2; - goto Return; -IllegalSurrogate: - ch = 3; - goto Return; -} -#undef UCS2_REPEAT_MASK -#undef FAST_CHAR_MASK -#undef STRIPPED_MASK -#undef SWAB -#undef LONG_PTR_MASK #endif /* STRINGLIB_IS_UNICODE */ diff -r 0be296605165 -r faa88c50a3d2 Objects/stringlib/unicode_format.h --- a/Objects/stringlib/unicode_format.h Wed May 23 22:26:55 2012 +0200 +++ b/Objects/stringlib/unicode_format.h Wed May 23 21:09:05 2012 +0200 @@ -499,26 +499,27 @@ int ok = 0; PyObject *result = NULL; PyObject *format_spec_object = NULL; - PyObject *(*formatter)(PyObject *, PyObject *, Py_ssize_t, Py_ssize_t) = NULL; - Py_ssize_t len; + int (*formatter) (PyObject *, PyObject *, Py_ssize_t, Py_ssize_t, _PyUnicodeWriter*) = NULL; + int err; /* If we know the type exactly, skip the lookup of __format__ and just call the formatter directly. */ if (PyUnicode_CheckExact(fieldobj)) - formatter = _PyUnicode_FormatAdvanced; + formatter = _PyUnicode_FormatAdvancedWriter; else if (PyLong_CheckExact(fieldobj)) - formatter =_PyLong_FormatAdvanced; + formatter = _PyLong_FormatAdvancedWriter; else if (PyFloat_CheckExact(fieldobj)) - formatter = _PyFloat_FormatAdvanced; - - /* XXX: for 2.6, convert format_spec to the appropriate type - (unicode, str) */ + formatter = _PyFloat_FormatAdvancedWriter; + else if (PyComplex_CheckExact(fieldobj)) + formatter = _PyComplex_FormatAdvancedWriter; if (formatter) { /* we know exactly which formatter will be called when __format__ is looked up, so call it directly, instead. */ - result = formatter(fieldobj, format_spec->str, - format_spec->start, format_spec->end); + err = formatter(fieldobj, format_spec->str, + format_spec->start, format_spec->end, + writer); + return (err == 0); } else { /* We need to create an object out of the pointers we have, because @@ -536,17 +537,11 @@ } if (result == NULL) goto done; - if (PyUnicode_READY(result) == -1) + + if (_PyUnicodeWriter_WriteStr(writer, result) == -1) goto done; + ok = 1; - len = PyUnicode_GET_LENGTH(result); - if (_PyUnicodeWriter_Prepare(writer, - len, PyUnicode_MAX_CHAR_VALUE(result)) == -1) - goto done; - copy_characters(writer->buffer, writer->pos, - result, 0, len); - writer->pos += len; - ok = 1; done: Py_XDECREF(format_spec_object); Py_XDECREF(result); @@ -897,16 +892,19 @@ err = _PyUnicodeWriter_Prepare(writer, sublen, maxchar); if (err == -1) return 0; - copy_characters(writer->buffer, writer->pos, - literal.str, literal.start, sublen); + _PyUnicode_FastCopyCharacters(writer->buffer, writer->pos, + literal.str, literal.start, sublen); writer->pos += sublen; } - if (field_present) + if (field_present) { + if (iter.str.start == iter.str.end) + writer->flags.overallocate = 0; if (!output_markup(&field_name, &format_spec, format_spec_needs_expanding, conversion, writer, args, kwargs, recursion_depth, auto_number)) return 0; + } } return result; } @@ -921,7 +919,7 @@ int recursion_depth, AutoNumber *auto_number) { _PyUnicodeWriter writer; - Py_ssize_t initlen; + Py_ssize_t minlen; /* check the recursion level */ if (recursion_depth <= 0) { @@ -930,9 +928,8 @@ return NULL; } - initlen = PyUnicode_GET_LENGTH(input->str) + 100; - if (_PyUnicodeWriter_Init(&writer, initlen, 127) == -1) - return NULL; + minlen = PyUnicode_GET_LENGTH(input->str) + 100; + _PyUnicodeWriter_Init(&writer, minlen); if (!do_markup(input, args, kwargs, &writer, recursion_depth, auto_number)) { diff -r 0be296605165 -r faa88c50a3d2 Objects/typeobject.c --- a/Objects/typeobject.c Wed May 23 22:26:55 2012 +0200 +++ b/Objects/typeobject.c Wed May 23 21:09:05 2012 +0200 @@ -2406,7 +2406,7 @@ /* need to make a copy of the docstring slot, which usually points to a static string literal */ if (slot->slot == Py_tp_doc) { - size_t len = strlen(slot->pfunc)+1; + ssize_t len = strlen(slot->pfunc)+1; char *tp_doc = PyObject_MALLOC(len); if (tp_doc == NULL) goto fail; diff -r 0be296605165 -r faa88c50a3d2 Objects/unicodeobject.c --- a/Objects/unicodeobject.c Wed May 23 22:26:55 2012 +0200 +++ b/Objects/unicodeobject.c Wed May 23 21:09:05 2012 +0200 @@ -225,10 +225,6 @@ /* forward */ static PyUnicodeObject *_PyUnicode_New(Py_ssize_t length); static PyObject* get_latin1_char(unsigned char ch); -static void copy_characters( - PyObject *to, Py_ssize_t to_start, - PyObject *from, Py_ssize_t from_start, - Py_ssize_t how_many); static int unicode_modifiable(PyObject *unicode); @@ -783,7 +779,7 @@ return NULL; copy_length = Py_MIN(length, PyUnicode_GET_LENGTH(unicode)); - copy_characters(copy, 0, unicode, 0, copy_length); + _PyUnicode_FastCopyCharacters(copy, 0, unicode, 0, copy_length); return copy; } else { @@ -1154,15 +1150,16 @@ assert(0 <= from_start); assert(0 <= to_start); assert(PyUnicode_Check(from)); - assert(PyUnicode_Check(to)); assert(PyUnicode_IS_READY(from)); - assert(PyUnicode_IS_READY(to)); assert(from_start + how_many <= PyUnicode_GET_LENGTH(from)); - assert(to_start + how_many <= PyUnicode_GET_LENGTH(to)); if (how_many == 0) return 0; + assert(PyUnicode_Check(to)); + assert(PyUnicode_IS_READY(to)); + assert(to_start + how_many <= PyUnicode_GET_LENGTH(to)); + from_kind = PyUnicode_KIND(from); from_data = PyUnicode_DATA(from); to_kind = PyUnicode_KIND(to); @@ -1267,10 +1264,10 @@ return 0; } -static void -copy_characters(PyObject *to, Py_ssize_t to_start, - PyObject *from, Py_ssize_t from_start, - Py_ssize_t how_many) +void +_PyUnicode_FastCopyCharacters( + PyObject *to, Py_ssize_t to_start, + PyObject *from, Py_ssize_t from_start, Py_ssize_t how_many) { (void)_copy_characters(to, to_start, from, from_start, how_many, 0); } @@ -2085,7 +2082,7 @@ return; } copy = PyUnicode_New(len, max_char); - copy_characters(copy, 0, unicode, 0, len); + _PyUnicode_FastCopyCharacters(copy, 0, unicode, 0, len); Py_DECREF(unicode); *p_unicode = copy; } @@ -2753,7 +2750,7 @@ (void) va_arg(vargs, char *); size = PyUnicode_GET_LENGTH(*callresult); assert(PyUnicode_KIND(*callresult) <= PyUnicode_KIND(string)); - copy_characters(string, i, *callresult, 0, size); + _PyUnicode_FastCopyCharacters(string, i, *callresult, 0, size); i += size; /* We're done with the unicode()/repr() => forget it */ Py_DECREF(*callresult); @@ -2767,7 +2764,7 @@ Py_ssize_t size; assert(PyUnicode_KIND(obj) <= PyUnicode_KIND(string)); size = PyUnicode_GET_LENGTH(obj); - copy_characters(string, i, obj, 0, size); + _PyUnicode_FastCopyCharacters(string, i, obj, 0, size); i += size; break; } @@ -2779,13 +2776,13 @@ if (obj) { size = PyUnicode_GET_LENGTH(obj); assert(PyUnicode_KIND(obj) <= PyUnicode_KIND(string)); - copy_characters(string, i, obj, 0, size); + _PyUnicode_FastCopyCharacters(string, i, obj, 0, size); i += size; } else { size = PyUnicode_GET_LENGTH(*callresult); assert(PyUnicode_KIND(*callresult) <= PyUnicode_KIND(string)); - copy_characters(string, i, *callresult, 0, size); + _PyUnicode_FastCopyCharacters(string, i, *callresult, 0, size); i += size; Py_DECREF(*callresult); } @@ -2800,7 +2797,7 @@ /* unused, since we already have the result */ (void) va_arg(vargs, PyObject *); assert(PyUnicode_KIND(*callresult) <= PyUnicode_KIND(string)); - copy_characters(string, i, *callresult, 0, size); + _PyUnicode_FastCopyCharacters(string, i, *callresult, 0, size); i += size; /* We're done with the unicode()/repr() => forget it */ Py_DECREF(*callresult); @@ -4171,7 +4168,7 @@ if (unicode_widen(output, *outpos, PyUnicode_MAX_CHAR_VALUE(repunicode)) < 0) goto onError; - copy_characters(*output, *outpos, repunicode, 0, replen); + _PyUnicode_FastCopyCharacters(*output, *outpos, repunicode, 0, replen); *outpos += replen; } else { @@ -5195,6 +5192,25 @@ return PyUnicode_DecodeUTF16Stateful(s, size, errors, byteorder, NULL); } +/* Two masks for fast checking of whether a C 'long' may contain + UTF16-encoded surrogate characters. This is an efficient heuristic, + assuming that non-surrogate characters with a code point >= 0x8000 are + rare in most input. + FAST_CHAR_MASK is used when the input is in native byte ordering, + SWAPPED_FAST_CHAR_MASK when the input is in byteswapped ordering. +*/ +#if (SIZEOF_LONG == 8) +# define FAST_CHAR_MASK 0x8000800080008000L +# define SWAPPED_FAST_CHAR_MASK 0x0080008000800080L +# define STRIPPED_MASK 0x00FF00FF00FF00FFL +#elif (SIZEOF_LONG == 4) +# define FAST_CHAR_MASK 0x80008000L +# define SWAPPED_FAST_CHAR_MASK 0x00800080L +# define STRIPPED_MASK 0x00FF00FFL +#else +# error C 'long' size should be either 4 or 8! +#endif + PyObject * PyUnicode_DecodeUTF16Stateful(const char *s, Py_ssize_t size, @@ -5207,15 +5223,30 @@ Py_ssize_t endinpos; Py_ssize_t outpos; PyObject *unicode; - const unsigned char *q, *e; + const unsigned char *q, *e, *aligned_end; int bo = 0; /* assume native ordering by default */ - int native_ordering; + int native_ordering = 0; const char *errmsg = ""; + /* Offsets from q for retrieving byte pairs in the right order. */ +#ifdef BYTEORDER_IS_LITTLE_ENDIAN + int ihi = 1, ilo = 0; +#else + int ihi = 0, ilo = 1; +#endif PyObject *errorHandler = NULL; PyObject *exc = NULL; + /* Note: size will always be longer than the resulting Unicode + character count */ + unicode = PyUnicode_New(size, 127); + if (!unicode) + return NULL; + if (size == 0) + return unicode; + outpos = 0; + q = (unsigned char *)s; - e = q + size; + e = q + size - 1; if (byteorder) bo = *byteorder; @@ -5224,98 +5255,155 @@ byte order setting accordingly. In native mode, the leading BOM mark is skipped, in all other modes, it is copied to the output stream as-is (giving a ZWNBSP character). */ - if (bo == 0 && size >= 2) { - const Py_UCS4 bom = (q[1] << 8) | q[0]; - if (bom == 0xFEFF) { - q += 2; - bo = -1; - } - else if (bom == 0xFFFE) { - q += 2; - bo = 1; - } - if (byteorder) - *byteorder = bo; - } - - if (q == e) { - if (consumed) - *consumed = size; - Py_INCREF(unicode_empty); - return unicode_empty; - } - + if (bo == 0) { + if (size >= 2) { + const Py_UCS4 bom = (q[ihi] << 8) | q[ilo]; #ifdef BYTEORDER_IS_LITTLE_ENDIAN - native_ordering = bo <= 0; + if (bom == 0xFEFF) { + q += 2; + bo = -1; + } + else if (bom == 0xFFFE) { + q += 2; + bo = 1; + } #else - native_ordering = bo >= 0; -#endif - - /* Note: size will always be longer than the resulting Unicode - character count */ - unicode = PyUnicode_New((e - q + 1) / 2, 127); - if (!unicode) - return NULL; - - outpos = 0; - while (1) { - Py_UCS4 ch = 0; - if (e - q >= 2) { + if (bom == 0xFEFF) { + q += 2; + bo = 1; + } + else if (bom == 0xFFFE) { + q += 2; + bo = -1; + } +#endif + } + } + + if (bo == -1) { + /* force LE */ + ihi = 1; + ilo = 0; + } + else if (bo == 1) { + /* force BE */ + ihi = 0; + ilo = 1; + } +#ifdef BYTEORDER_IS_LITTLE_ENDIAN + native_ordering = ilo < ihi; +#else + native_ordering = ilo > ihi; +#endif + + aligned_end = (const unsigned char *) ((size_t) e & ~LONG_PTR_MASK); + while (q < e) { + Py_UCS4 ch; + /* First check for possible aligned read of a C 'long'. Unaligned + reads are more expensive, better to defer to another iteration. */ + if (!((size_t) q & LONG_PTR_MASK)) { + /* Fast path for runs of non-surrogate chars. */ + register const unsigned char *_q = q; int kind = PyUnicode_KIND(unicode); - if (kind == PyUnicode_1BYTE_KIND) { - if (PyUnicode_IS_ASCII(unicode)) - ch = asciilib_utf16_decode(&q, e, - PyUnicode_1BYTE_DATA(unicode), &outpos, - native_ordering); - else - ch = ucs1lib_utf16_decode(&q, e, - PyUnicode_1BYTE_DATA(unicode), &outpos, - native_ordering); - } else if (kind == PyUnicode_2BYTE_KIND) { - ch = ucs2lib_utf16_decode(&q, e, - PyUnicode_2BYTE_DATA(unicode), &outpos, - native_ordering); - } else { - assert(kind == PyUnicode_4BYTE_KIND); - ch = ucs4lib_utf16_decode(&q, e, - PyUnicode_4BYTE_DATA(unicode), &outpos, - native_ordering); - } - } - - switch (ch) - { - case 0: - /* remaining byte at the end? (size should be even) */ - if (q == e || consumed) - goto End; - errmsg = "truncated data"; - startinpos = ((const char *)q) - starts; - endinpos = ((const char *)e) - starts; - break; - /* The remaining input chars are ignored if the callback - chooses to skip the input */ - case 1: - errmsg = "unexpected end of data"; - startinpos = ((const char *)q) - 2 - starts; - endinpos = ((const char *)e) - starts; - break; - case 2: - errmsg = "illegal encoding"; - startinpos = ((const char *)q) - 2 - starts; - endinpos = startinpos + 2; - break; - case 3: - errmsg = "illegal UTF-16 surrogate"; - startinpos = ((const char *)q) - 4 - starts; - endinpos = startinpos + 2; - break; - default: + void *data = PyUnicode_DATA(unicode); + while (_q < aligned_end) { + unsigned long block = * (unsigned long *) _q; + Py_UCS4 maxch; + if (native_ordering) { + /* Can use buffer directly */ + if (block & FAST_CHAR_MASK) + break; + } + else { + /* Need to byte-swap */ + if (block & SWAPPED_FAST_CHAR_MASK) + break; + block = ((block >> 8) & STRIPPED_MASK) | + ((block & STRIPPED_MASK) << 8); + } + maxch = (Py_UCS2)(block & 0xFFFF); +#if SIZEOF_LONG == 8 + ch = (Py_UCS2)((block >> 16) & 0xFFFF); + maxch = MAX_MAXCHAR(maxch, ch); + ch = (Py_UCS2)((block >> 32) & 0xFFFF); + maxch = MAX_MAXCHAR(maxch, ch); + ch = (Py_UCS2)(block >> 48); + maxch = MAX_MAXCHAR(maxch, ch); +#else + ch = (Py_UCS2)(block >> 16); + maxch = MAX_MAXCHAR(maxch, ch); +#endif + if (maxch > PyUnicode_MAX_CHAR_VALUE(unicode)) { + if (unicode_widen(&unicode, outpos, maxch) < 0) + goto onError; + kind = PyUnicode_KIND(unicode); + data = PyUnicode_DATA(unicode); + } +#ifdef BYTEORDER_IS_LITTLE_ENDIAN + PyUnicode_WRITE(kind, data, outpos++, (Py_UCS2)(block & 0xFFFF)); +#if SIZEOF_LONG == 8 + PyUnicode_WRITE(kind, data, outpos++, (Py_UCS2)((block >> 16) & 0xFFFF)); + PyUnicode_WRITE(kind, data, outpos++, (Py_UCS2)((block >> 32) & 0xFFFF)); + PyUnicode_WRITE(kind, data, outpos++, (Py_UCS2)((block >> 48))); +#else + PyUnicode_WRITE(kind, data, outpos++, (Py_UCS2)(block >> 16)); +#endif +#else +#if SIZEOF_LONG == 8 + PyUnicode_WRITE(kind, data, outpos++, (Py_UCS2)((block >> 48))); + PyUnicode_WRITE(kind, data, outpos++, (Py_UCS2)((block >> 32) & 0xFFFF)); + PyUnicode_WRITE(kind, data, outpos++, (Py_UCS2)((block >> 16) & 0xFFFF)); +#else + PyUnicode_WRITE(kind, data, outpos++, (Py_UCS2)(block >> 16)); +#endif + PyUnicode_WRITE(kind, data, outpos++, (Py_UCS2)(block & 0xFFFF)); +#endif + _q += SIZEOF_LONG; + } + q = _q; + if (q >= e) + break; + } + ch = (q[ihi] << 8) | q[ilo]; + + q += 2; + + if (!Py_UNICODE_IS_SURROGATE(ch)) { if (unicode_putchar(&unicode, &outpos, ch) < 0) goto onError; continue; } + /* UTF-16 code pair: */ + if (q > e) { + errmsg = "unexpected end of data"; + startinpos = (((const char *)q) - 2) - starts; + endinpos = ((const char *)e) + 1 - starts; + goto utf16Error; + } + if (Py_UNICODE_IS_HIGH_SURROGATE(ch)) { + Py_UCS4 ch2 = (q[ihi] << 8) | q[ilo]; + q += 2; + if (Py_UNICODE_IS_LOW_SURROGATE(ch2)) { + if (unicode_putchar(&unicode, &outpos, + Py_UNICODE_JOIN_SURROGATES(ch, ch2)) < 0) + goto onError; + continue; + } + else { + errmsg = "illegal UTF-16 surrogate"; + startinpos = (((const char *)q)-4)-starts; + endinpos = startinpos+2; + goto utf16Error; + } + + } + errmsg = "illegal encoding"; + startinpos = (((const char *)q)-2)-starts; + endinpos = startinpos+2; + /* Fall through to report the error */ + + utf16Error: if (unicode_decode_call_errorhandler( errors, &errorHandler, @@ -5330,8 +5418,33 @@ &outpos)) goto onError; } - -End: + /* remaining byte at the end? (size should be even) */ + if (e == q) { + if (!consumed) { + errmsg = "truncated data"; + startinpos = ((const char *)q) - starts; + endinpos = ((const char *)e) + 1 - starts; + if (unicode_decode_call_errorhandler( + errors, + &errorHandler, + "utf16", errmsg, + &starts, + (const char **)&e, + &startinpos, + &endinpos, + &exc, + (const char **)&q, + &unicode, + &outpos)) + goto onError; + /* The remaining input chars are ignored if the callback + chooses to skip the input */ + } + } + + if (byteorder) + *byteorder = bo; + if (consumed) *consumed = (const char *)q-starts; @@ -5350,6 +5463,9 @@ return NULL; } +#undef FAST_CHAR_MASK +#undef SWAPPED_FAST_CHAR_MASK + PyObject * _PyUnicode_EncodeUTF16(PyObject *str, const char *errors, @@ -9216,12 +9332,14 @@ /* If the maxchar increased so that the kind changed, not all characters are representable anymore and we need to fix the string again. This only happens in very few cases. */ - copy_characters(v, 0, self, 0, PyUnicode_GET_LENGTH(self)); + _PyUnicode_FastCopyCharacters(v, 0, + self, 0, PyUnicode_GET_LENGTH(self)); maxchar_old = fixfct(v); assert(maxchar_old > 0 && maxchar_old <= maxchar_new); } else { - copy_characters(v, 0, u, 0, PyUnicode_GET_LENGTH(self)); + _PyUnicode_FastCopyCharacters(v, 0, + u, 0, PyUnicode_GET_LENGTH(self)); } Py_DECREF(u); assert(_PyUnicode_CheckConsistency(v, 1)); @@ -9603,7 +9721,7 @@ res_data += kind * seplen; } else { - copy_characters(res, res_offset, sep, 0, seplen); + _PyUnicode_FastCopyCharacters(res, res_offset, sep, 0, seplen); res_offset += seplen; } } @@ -9616,7 +9734,7 @@ res_data += kind * itemlen; } else { - copy_characters(res, res_offset, item, 0, itemlen); + _PyUnicode_FastCopyCharacters(res, res_offset, item, 0, itemlen); res_offset += itemlen; } } @@ -9734,7 +9852,7 @@ FILL(kind, data, fill, 0, left); if (right) FILL(kind, data, fill, left + _PyUnicode_LENGTH(self), right); - copy_characters(u, left, self, 0, _PyUnicode_LENGTH(self)); + _PyUnicode_FastCopyCharacters(u, left, self, 0, _PyUnicode_LENGTH(self)); assert(_PyUnicode_CheckConsistency(u, 1)); return u; } @@ -10058,7 +10176,7 @@ u = PyUnicode_New(slen, maxchar); if (!u) goto error; - copy_characters(u, 0, self, 0, slen); + _PyUnicode_FastCopyCharacters(u, 0, self, 0, slen); rkind = PyUnicode_KIND(u); PyUnicode_WRITE(rkind, PyUnicode_DATA(u), pos, u2); @@ -10626,8 +10744,8 @@ w = PyUnicode_New(new_len, maxchar); if (w == NULL) goto onError; - copy_characters(w, 0, u, 0, u_len); - copy_characters(w, u_len, v, 0, v_len); + _PyUnicode_FastCopyCharacters(w, 0, u, 0, u_len); + _PyUnicode_FastCopyCharacters(w, u_len, v, 0, v_len); Py_DECREF(u); Py_DECREF(v); assert(_PyUnicode_CheckConsistency(w, 1)); @@ -10702,7 +10820,7 @@ goto error; } /* copy 'right' into the newly allocated area of 'left' */ - copy_characters(*p_left, left_len, right, 0, right_len); + _PyUnicode_FastCopyCharacters(*p_left, left_len, right, 0, right_len); } else { maxchar = PyUnicode_MAX_CHAR_VALUE(left); @@ -10713,8 +10831,8 @@ res = PyUnicode_New(new_len, maxchar); if (res == NULL) goto error; - copy_characters(res, 0, left, 0, left_len); - copy_characters(res, left_len, right, 0, right_len); + _PyUnicode_FastCopyCharacters(res, 0, left, 0, left_len); + _PyUnicode_FastCopyCharacters(res, left_len, right, 0, right_len); Py_DECREF(left); *p_left = res; } @@ -12769,60 +12887,74 @@ return PyBool_FromLong(result); } -typedef struct { - PyObject *buffer; - void *data; - enum PyUnicode_Kind kind; - Py_UCS4 maxchar; - Py_ssize_t pos; -} _PyUnicodeWriter ; - Py_LOCAL_INLINE(void) _PyUnicodeWriter_Update(_PyUnicodeWriter *writer) { + writer->size = PyUnicode_GET_LENGTH(writer->buffer); writer->maxchar = PyUnicode_MAX_CHAR_VALUE(writer->buffer); writer->data = PyUnicode_DATA(writer->buffer); writer->kind = PyUnicode_KIND(writer->buffer); } -Py_LOCAL(int) -_PyUnicodeWriter_Init(_PyUnicodeWriter *writer, - Py_ssize_t length, Py_UCS4 maxchar) -{ - writer->pos = 0; - writer->buffer = PyUnicode_New(length, maxchar); - if (writer->buffer == NULL) - return -1; - _PyUnicodeWriter_Update(writer); - return 0; -} - -Py_LOCAL_INLINE(int) -_PyUnicodeWriter_Prepare(_PyUnicodeWriter *writer, - Py_ssize_t length, Py_UCS4 maxchar) +void +_PyUnicodeWriter_Init(_PyUnicodeWriter *writer, Py_ssize_t min_length) +{ + memset(writer, 0, sizeof(*writer)); +#ifdef Py_DEBUG + writer->kind = 5; /* invalid kind */ +#endif + writer->min_length = Py_MAX(min_length, 100); + writer->flags.overallocate = 1; +} + +int +_PyUnicodeWriter_PrepareInternal(_PyUnicodeWriter *writer, + Py_ssize_t length, Py_UCS4 maxchar) { Py_ssize_t newlen; PyObject *newbuffer; + assert(length > 0); + if (length > PY_SSIZE_T_MAX - writer->pos) { PyErr_NoMemory(); return -1; } newlen = writer->pos + length; - if (newlen > PyUnicode_GET_LENGTH(writer->buffer)) { - /* overallocate 25% to limit the number of resize */ - if (newlen <= (PY_SSIZE_T_MAX - newlen / 4)) - newlen += newlen / 4; - - if (maxchar > writer->maxchar) { + if (writer->buffer == NULL) { + if (writer->flags.overallocate) { + /* overallocate 25% to limit the number of resize */ + if (newlen <= (PY_SSIZE_T_MAX - newlen / 4)) + newlen += newlen / 4; + if (newlen < writer->min_length) + newlen = writer->min_length; + } + writer->buffer = PyUnicode_New(newlen, maxchar); + if (writer->buffer == NULL) + return -1; + _PyUnicodeWriter_Update(writer); + return 0; + } + + if (newlen > writer->size) { + if (writer->flags.overallocate) { + /* overallocate 25% to limit the number of resize */ + if (newlen <= (PY_SSIZE_T_MAX - newlen / 4)) + newlen += newlen / 4; + if (newlen < 100) + newlen = 100; + } + + if (maxchar > writer->maxchar || writer->flags.readonly) { /* resize + widen */ newbuffer = PyUnicode_New(newlen, maxchar); if (newbuffer == NULL) return -1; - PyUnicode_CopyCharacters(newbuffer, 0, - writer->buffer, 0, writer->pos); + _PyUnicode_FastCopyCharacters(newbuffer, 0, + writer->buffer, 0, writer->pos); Py_DECREF(writer->buffer); + writer->flags.readonly = 0; } else { newbuffer = resize_compact(writer->buffer, newlen); @@ -12840,18 +12972,63 @@ return 0; } -Py_LOCAL(PyObject *) +int +_PyUnicodeWriter_WriteStr(_PyUnicodeWriter *writer, PyObject *str) +{ + Py_UCS4 maxchar; + Py_ssize_t len; + + if (PyUnicode_READY(str) == -1) + return -1; + len = PyUnicode_GET_LENGTH(str); + if (len == 0) + return 0; + maxchar = PyUnicode_MAX_CHAR_VALUE(str); + if (maxchar > writer->maxchar || len > writer->size - writer->pos) { + if (writer->buffer == NULL && !writer->flags.overallocate) { + Py_INCREF(str); + writer->buffer = str; + _PyUnicodeWriter_Update(writer); + writer->flags.readonly = 1; + writer->size = 0; + writer->pos += len; + return 0; + } + if (_PyUnicodeWriter_PrepareInternal(writer, len, maxchar) == -1) + return -1; + } + _PyUnicode_FastCopyCharacters(writer->buffer, writer->pos, + str, 0, len); + writer->pos += len; + return 0; +} + +PyObject * _PyUnicodeWriter_Finish(_PyUnicodeWriter *writer) { - if (PyUnicode_Resize(&writer->buffer, writer->pos) < 0) { - Py_DECREF(writer->buffer); - return NULL; + if (writer->pos == 0) { + Py_XDECREF(writer->buffer); + Py_INCREF(unicode_empty); + return unicode_empty; + } + if (writer->flags.readonly) { + assert(PyUnicode_GET_LENGTH(writer->buffer) == writer->pos); + return writer->buffer; + } + if (PyUnicode_GET_LENGTH(writer->buffer) != writer->pos) { + PyObject *newbuffer; + newbuffer = resize_compact(writer->buffer, writer->pos); + if (newbuffer == NULL) { + Py_DECREF(writer->buffer); + return NULL; + } + writer->buffer = newbuffer; } assert(_PyUnicode_CheckConsistency(writer->buffer, 1)); return writer->buffer; } -Py_LOCAL(void) +void _PyUnicodeWriter_Dealloc(_PyUnicodeWriter *writer) { Py_CLEAR(writer->buffer); @@ -12874,14 +13051,24 @@ static PyObject * unicode__format__(PyObject* self, PyObject* args) { - PyObject *format_spec, *out; + PyObject *format_spec; + _PyUnicodeWriter writer; + int ret; if (!PyArg_ParseTuple(args, "U:__format__", &format_spec)) return NULL; - out = _PyUnicode_FormatAdvanced(self, format_spec, 0, - PyUnicode_GET_LENGTH(format_spec)); - return out; + if (PyUnicode_READY(self) == -1) + return NULL; + _PyUnicodeWriter_Init(&writer, 0); + ret = _PyUnicode_FormatAdvancedWriter(self, format_spec, 0, + PyUnicode_GET_LENGTH(format_spec), + &writer); + if (ret == -1) { + _PyUnicodeWriter_Dealloc(&writer); + return NULL; + } + return _PyUnicodeWriter_Finish(&writer); } PyDoc_STRVAR(p_format__doc__, @@ -13111,16 +13298,17 @@ /* Returns a new reference to a PyUnicode object, or NULL on failure. */ -static PyObject * -formatfloat(PyObject *v, int flags, int prec, int type) +static int +formatfloat(PyObject *v, int flags, int prec, int type, + PyObject **p_output, _PyUnicodeWriter *writer) { char *p; - PyObject *result; double x; + Py_ssize_t len; x = PyFloat_AsDouble(v); if (x == -1.0 && PyErr_Occurred()) - return NULL; + return -1; if (prec < 0) prec = 6; @@ -13128,10 +13316,20 @@ p = PyOS_double_to_string(x, type, prec, (flags & F_ALT) ? Py_DTSF_ALT : 0, NULL); if (p == NULL) - return NULL; - result = unicode_fromascii((unsigned char*)p, strlen(p)); + return -1; + len = strlen(p); + if (writer) { + if (_PyUnicodeWriter_Prepare(writer, len, 127) == -1) + return -1; + memcpy(writer->data + writer->pos * writer->kind, + p, + len); + writer->pos += len; + } + else + *p_output = unicode_fromascii((unsigned char*)p, strlen(p)); PyMem_Free(p); - return result; + return 0; } /* formatlong() emulates the format codes d, u, o, x and X, and @@ -13336,8 +13534,7 @@ fmtcnt = PyUnicode_GET_LENGTH(uformat); fmtpos = 0; - if (_PyUnicodeWriter_Init(&writer, fmtcnt + 100, 127) < 0) - goto onError; + _PyUnicodeWriter_Init(&writer, fmtcnt + 100); if (PyTuple_Check(args)) { arglen = PyTuple_Size(args); @@ -13368,8 +13565,8 @@ if (_PyUnicodeWriter_Prepare(&writer, sublen, maxchar) == -1) goto onError; - copy_characters(writer.buffer, writer.pos, - uformat, nonfmtpos, sublen); + _PyUnicode_FastCopyCharacters(writer.buffer, writer.pos, + uformat, nonfmtpos, sublen); writer.pos += sublen; } else { @@ -13472,10 +13669,7 @@ c = PyUnicode_READ(fmtkind, fmt, fmtpos++); if (c < '0' || c > '9') break; - /* Since c is unsigned, the RHS would end up as unsigned, - mixing signed and unsigned comparison. Since c is between - '0' and '9', casting to int is safe. */ - if (width > (PY_SSIZE_T_MAX - ((int)c - '0')) / 10) { + if (width > (PY_SSIZE_T_MAX - (c - '0')) / 10) { PyErr_SetString(PyExc_ValueError, "width too big"); goto onError; @@ -13510,7 +13704,7 @@ c = PyUnicode_READ(fmtkind, fmt, fmtpos++); if (c < '0' || c > '9') break; - if (prec > (INT_MAX - ((int)c - '0')) / 10) { + if (prec > (INT_MAX - (c - '0')) / 10) { PyErr_SetString(PyExc_ValueError, "prec too big"); goto onError; @@ -13530,6 +13724,8 @@ "incomplete format"); goto onError; } + if (fmtcnt == 0) + writer.flags.overallocate = 0; if (c == '%') { if (_PyUnicodeWriter_Prepare(&writer, 1, '%') == -1) @@ -13539,7 +13735,6 @@ continue; } - v = getnextarg(args, arglen, &argidx); if (v == NULL) goto onError; @@ -13552,6 +13747,13 @@ case 's': case 'r': case 'a': + if (PyLong_CheckExact(v) && width == -1 && prec == -1) { + /* Fast path */ + if (_PyLong_FormatWriter(v, 10, flags & F_ALT, &writer) == -1) + goto onError; + goto nextarg; + } + if (PyUnicode_CheckExact(v) && c == 's') { temp = v; Py_INCREF(temp); @@ -13572,6 +13774,32 @@ case 'o': case 'x': case 'X': + if (PyLong_CheckExact(v) + && width == -1 && prec == -1 + && !(flags & (F_SIGN | F_BLANK))) + { + /* Fast path */ + switch(c) + { + case 'd': + case 'i': + case 'u': + if (_PyLong_FormatWriter(v, 10, flags & F_ALT, &writer) == -1) + goto onError; + goto nextarg; + case 'x': + if (_PyLong_FormatWriter(v, 16, flags & F_ALT, &writer) == -1) + goto onError; + goto nextarg; + case 'o': + if (_PyLong_FormatWriter(v, 8, flags & F_ALT, &writer) == -1) + goto onError; + goto nextarg; + default: + break; + } + } + isnumok = 0; if (PyNumber_Check(v)) { PyObject *iobj=NULL; @@ -13611,10 +13839,20 @@ case 'F': case 'g': case 'G': + if (width == -1 && prec == -1 + && !(flags & (F_SIGN | F_BLANK))) + { + /* Fast path */ + if (formatfloat(v, flags, prec, c, NULL, &writer) == -1) + goto onError; + goto nextarg; + } + sign = 1; if (flags & F_ZERO) fill = '0'; - temp = formatfloat(v, flags, prec, c); + if (formatfloat(v, flags, prec, c, &temp, NULL) == -1) + temp = NULL; break; case 'c': @@ -13622,6 +13860,14 @@ Py_UCS4 ch = formatchar(v); if (ch == (Py_UCS4) -1) goto onError; + if (width == -1 && prec == -1) { + /* Fast path */ + if (_PyUnicodeWriter_Prepare(&writer, 1, ch) == -1) + goto onError; + PyUnicode_WRITE(writer.kind, writer.data, writer.pos, ch); + writer.pos += 1; + goto nextarg; + } temp = PyUnicode_FromOrdinal(ch); break; } @@ -13638,6 +13884,16 @@ if (temp == NULL) goto onError; assert (PyUnicode_Check(temp)); + + if (width == -1 && prec == -1 + && !(flags & (F_SIGN | F_BLANK))) + { + /* Fast path */ + if (_PyUnicodeWriter_WriteStr(&writer, temp) == -1) + goto onError; + goto nextarg; + } + if (PyUnicode_READY(temp) == -1) { Py_CLEAR(temp); goto onError; @@ -13676,15 +13932,15 @@ if (!(flags & F_LJUST)) { if (sign) { if ((width-1) > len) - bufmaxchar = Py_MAX(bufmaxchar, fill); + bufmaxchar = MAX_MAXCHAR(bufmaxchar, fill); } else { if (width > len) - bufmaxchar = Py_MAX(bufmaxchar, fill); + bufmaxchar = MAX_MAXCHAR(bufmaxchar, fill); } } maxchar = _PyUnicode_FindMaxChar(temp, 0, pindex+len); - bufmaxchar = Py_MAX(bufmaxchar, maxchar); + bufmaxchar = MAX_MAXCHAR(bufmaxchar, maxchar); buflen = width; if (sign && len == width) @@ -13737,8 +13993,8 @@ } } - copy_characters(writer.buffer, writer.pos, - temp, pindex, len); + _PyUnicode_FastCopyCharacters(writer.buffer, writer.pos, + temp, pindex, len); writer.pos += len; if (width > len) { sublen = width - len; @@ -13746,6 +14002,7 @@ writer.pos += sublen; } +nextarg: if (dict && (argidx < arglen) && c != '%') { PyErr_SetString(PyExc_TypeError, "not all arguments converted during string formatting"); diff -r 0be296605165 -r faa88c50a3d2 PC/VC6/bz2.dsp --- a/PC/VC6/bz2.dsp Wed May 23 22:26:55 2012 +0200 +++ b/PC/VC6/bz2.dsp Wed May 23 21:09:05 2012 +0200 @@ -44,7 +44,7 @@ # PROP Target_Dir "" F90=df.exe # ADD BASE CPP /nologo /MT /W3 /GX /O2 /D "Py_BUILD_CORE_MODULE" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX /FD /c -# ADD CPP /nologo /MD /W3 /GX /Zi /O2 /I "..\..\Include" /I ".." /I "..\..\..\bzip2-1.0.6" /D "Py_BUILD_CORE_MODULE" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /YX /FD /c +# ADD CPP /nologo /MD /W3 /GX /Zi /O2 /I "..\..\Include" /I ".." /I "..\..\..\bzip2-1.0.5" /D "Py_BUILD_CORE_MODULE" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /YX /FD /c # ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32 # ADD MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32 # ADD BASE RSC /l 0x409 /d "NDEBUG" @@ -54,7 +54,7 @@ # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /machine:I386 -# ADD LINK32 ..\..\..\bzip2-1.0.6\libbz2.lib /nologo /base:"0x1D170000" /subsystem:windows /dll /debug /machine:I386 /nodefaultlib:"libc" /out:"./bz2.pyd" +# ADD LINK32 ..\..\..\bzip2-1.0.5\libbz2.lib /nologo /base:"0x1D170000" /subsystem:windows /dll /debug /machine:I386 /nodefaultlib:"libc" /out:"./bz2.pyd" # SUBTRACT LINK32 /pdb:none /nodefaultlib !ELSEIF "$(CFG)" == "bz2 - Win32 Debug" @@ -72,7 +72,7 @@ # PROP Target_Dir "" F90=df.exe # ADD BASE CPP /nologo /MTd /W3 /Gm /GX /Zi /Od /D "Py_BUILD_CORE_MODULE" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX /FD /c -# ADD CPP /nologo /MDd /W3 /Gm /GX /Zi /Od /I "..\..\Include" /I ".." /I "..\..\..\bzip2-1.0.6" /D "Py_BUILD_CORE_MODULE" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /YX /FD /c +# ADD CPP /nologo /MDd /W3 /Gm /GX /Zi /Od /I "..\..\Include" /I ".." /I "..\..\..\bzip2-1.0.5" /D "Py_BUILD_CORE_MODULE" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /YX /FD /c # ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32 # ADD MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32 # ADD BASE RSC /l 0x409 /d "_DEBUG" @@ -82,7 +82,7 @@ # ADD BSC32 /nologo LINK32=link.exe # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /debug /machine:I386 /pdbtype:sept -# ADD LINK32 ..\..\..\bzip2-1.0.6\libbz2.lib /nologo /base:"0x1D170000" /subsystem:windows /dll /debug /machine:I386 /nodefaultlib:"msvcrt" /nodefaultlib:"libc" /out:"./bz2_d.pyd" /pdbtype:sept +# ADD LINK32 ..\..\..\bzip2-1.0.5\libbz2.lib /nologo /base:"0x1D170000" /subsystem:windows /dll /debug /machine:I386 /nodefaultlib:"msvcrt" /nodefaultlib:"libc" /out:"./bz2_d.pyd" /pdbtype:sept # SUBTRACT LINK32 /pdb:none !ENDIF diff -r 0be296605165 -r faa88c50a3d2 PC/VC6/readme.txt --- a/PC/VC6/readme.txt Wed May 23 22:26:55 2012 +0200 +++ b/PC/VC6/readme.txt Wed May 23 21:09:05 2012 +0200 @@ -120,14 +120,14 @@ Download the source from the python.org copy into the dist directory: - svn export http://svn.python.org/projects/external/bzip2-1.0.6 + svn export http://svn.python.org/projects/external/bzip2-1.0.5 And requires building bz2 first. - cd dist\bzip2-1.0.6 + cd dist\bzip2-1.0.5 nmake -f makefile.msc - All of this managed to build bzip2-1.0.6\libbz2.lib, which the Python + All of this managed to build bzip2-1.0.5\libbz2.lib, which the Python project links in. @@ -153,9 +153,10 @@ Unpack into the "dist" directory, retaining the folder name from the archive - for example, the latest stable OpenSSL will install as - dist/openssl-1.0.1c + dist/openssl-1.0.0a - You need to use version 1.0.1c of OpenSSL. + You can (theoretically) use any version of OpenSSL you like - the + build process will automatically select the latest version. You can install the NASM assembler from http://www.nasm.us/ diff -r 0be296605165 -r faa88c50a3d2 PC/VS8.0/bz2.vcproj --- a/PC/VS8.0/bz2.vcproj Wed May 23 22:26:55 2012 +0200 +++ b/PC/VS8.0/bz2.vcproj Wed May 23 21:09:05 2012 +0200 @@ -532,7 +532,7 @@ @@ -559,39 +559,39 @@ Name="Source Files" > @@ -511,7 +511,7 @@ Name="Source Files" > diff -r 0be296605165 -r faa88c50a3d2 PC/VS9.0/_decimal.vcproj --- a/PC/VS9.0/_decimal.vcproj Wed May 23 22:26:55 2012 +0200 +++ b/PC/VS9.0/_decimal.vcproj Wed May 23 21:09:05 2012 +0200 @@ -1,7 +1,7 @@ @@ -605,67 +605,67 @@ Name="Source Files" > @@ -591,19 +591,19 @@ Name="Source Files" > diff -r 0be296605165 -r faa88c50a3d2 PC/VS9.0/_hashlib.vcproj --- a/PC/VS9.0/_hashlib.vcproj Wed May 23 22:26:55 2012 +0200 +++ b/PC/VS9.0/_hashlib.vcproj Wed May 23 21:09:05 2012 +0200 @@ -1,7 +1,7 @@ diff -r 0be296605165 -r faa88c50a3d2 PC/VS9.0/_lzma.vcproj --- a/PC/VS9.0/_lzma.vcproj Wed May 23 22:26:55 2012 +0200 +++ b/PC/VS9.0/_lzma.vcproj Wed May 23 21:09:05 2012 +0200 @@ -1,7 +1,7 @@ diff -r 0be296605165 -r faa88c50a3d2 PC/VS9.0/_msi.vcproj --- a/PC/VS9.0/_msi.vcproj Wed May 23 22:26:55 2012 +0200 +++ b/PC/VS9.0/_msi.vcproj Wed May 23 21:09:05 2012 +0200 @@ -1,7 +1,7 @@ diff -r 0be296605165 -r faa88c50a3d2 PC/VS9.0/_multiprocessing.vcproj --- a/PC/VS9.0/_multiprocessing.vcproj Wed May 23 22:26:55 2012 +0200 +++ b/PC/VS9.0/_multiprocessing.vcproj Wed May 23 21:09:05 2012 +0200 @@ -1,7 +1,7 @@ @@ -527,11 +527,11 @@ Name="Source Files" > diff -r 0be296605165 -r faa88c50a3d2 PC/VS9.0/_socket.vcproj --- a/PC/VS9.0/_socket.vcproj Wed May 23 22:26:55 2012 +0200 +++ b/PC/VS9.0/_socket.vcproj Wed May 23 21:09:05 2012 +0200 @@ -1,7 +1,7 @@ @@ -527,7 +527,7 @@ Name="Source Files" > diff -r 0be296605165 -r faa88c50a3d2 PC/VS9.0/_sqlite3.vcproj --- a/PC/VS9.0/_sqlite3.vcproj Wed May 23 22:26:55 2012 +0200 +++ b/PC/VS9.0/_sqlite3.vcproj Wed May 23 21:09:05 2012 +0200 @@ -527,43 +527,43 @@ Name="Header Files" > @@ -571,39 +571,39 @@ Name="Source Files" > diff -r 0be296605165 -r faa88c50a3d2 PC/VS9.0/_ssl.vcproj --- a/PC/VS9.0/_ssl.vcproj Wed May 23 22:26:55 2012 +0200 +++ b/PC/VS9.0/_ssl.vcproj Wed May 23 21:09:05 2012 +0200 @@ -1,7 +1,7 @@ diff -r 0be296605165 -r faa88c50a3d2 PC/VS9.0/_testbuffer.vcproj --- a/PC/VS9.0/_testbuffer.vcproj Wed May 23 22:26:55 2012 +0200 +++ b/PC/VS9.0/_testbuffer.vcproj Wed May 23 21:09:05 2012 +0200 @@ -511,7 +511,7 @@ Name="Source Files" > diff -r 0be296605165 -r faa88c50a3d2 PC/VS9.0/_testcapi.vcproj --- a/PC/VS9.0/_testcapi.vcproj Wed May 23 22:26:55 2012 +0200 +++ b/PC/VS9.0/_testcapi.vcproj Wed May 23 21:09:05 2012 +0200 @@ -1,7 +1,7 @@ diff -r 0be296605165 -r faa88c50a3d2 PC/VS9.0/_tkinter.vcproj --- a/PC/VS9.0/_tkinter.vcproj Wed May 23 22:26:55 2012 +0200 +++ b/PC/VS9.0/_tkinter.vcproj Wed May 23 21:09:05 2012 +0200 @@ -1,7 +1,7 @@ diff -r 0be296605165 -r faa88c50a3d2 PC/VS9.0/bdist_wininst.vcproj --- a/PC/VS9.0/bdist_wininst.vcproj Wed May 23 22:26:55 2012 +0200 +++ b/PC/VS9.0/bdist_wininst.vcproj Wed May 23 21:09:05 2012 +0200 @@ -20,7 +20,7 @@ @@ -247,7 +247,7 @@ Filter="h;hpp;hxx;hm;inl" > @@ -256,11 +256,11 @@ Filter="ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe" > diff -r 0be296605165 -r faa88c50a3d2 PC/VS9.0/kill_python.c --- a/PC/VS9.0/kill_python.c Wed May 23 22:26:55 2012 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,178 +0,0 @@ -/* - * Helper program for killing lingering python[_d].exe processes before - * building, thus attempting to avoid build failures due to files being - * locked. - */ - -#include -#include -#include -#include - -#pragma comment(lib, "psapi") - -#ifdef _DEBUG -#define PYTHON_EXE (L"python_d.exe") -#define PYTHON_EXE_LEN (12) -#define KILL_PYTHON_EXE (L"kill_python_d.exe") -#define KILL_PYTHON_EXE_LEN (17) -#else -#define PYTHON_EXE (L"python.exe") -#define PYTHON_EXE_LEN (10) -#define KILL_PYTHON_EXE (L"kill_python.exe") -#define KILL_PYTHON_EXE_LEN (15) -#endif - -int -main(int argc, char **argv) -{ - HANDLE hp, hsp, hsm; /* process, snapshot processes, snapshot modules */ - DWORD dac, our_pid; - size_t len; - wchar_t path[MAX_PATH+1]; - - MODULEENTRY32W me; - PROCESSENTRY32W pe; - - me.dwSize = sizeof(MODULEENTRY32W); - pe.dwSize = sizeof(PROCESSENTRY32W); - - memset(path, 0, MAX_PATH+1); - - our_pid = GetCurrentProcessId(); - - hsm = CreateToolhelp32Snapshot(TH32CS_SNAPMODULE, our_pid); - if (hsm == INVALID_HANDLE_VALUE) { - printf("CreateToolhelp32Snapshot[1] failed: %d\n", GetLastError()); - return 1; - } - - if (!Module32FirstW(hsm, &me)) { - printf("Module32FirstW[1] failed: %d\n", GetLastError()); - CloseHandle(hsm); - return 1; - } - - /* - * Enumerate over the modules for the current process in order to find - * kill_process[_d].exe, then take a note of the directory it lives in. - */ - do { - if (_wcsnicmp(me.szModule, KILL_PYTHON_EXE, KILL_PYTHON_EXE_LEN)) - continue; - - len = wcsnlen_s(me.szExePath, MAX_PATH) - KILL_PYTHON_EXE_LEN; - wcsncpy_s(path, MAX_PATH+1, me.szExePath, len); - - break; - - } while (Module32NextW(hsm, &me)); - - CloseHandle(hsm); - - if (path == NULL) { - printf("failed to discern directory of running process\n"); - return 1; - } - - /* - * Take a snapshot of system processes. Enumerate over the snapshot, - * looking for python processes. When we find one, verify it lives - * in the same directory we live in. If it does, kill it. If we're - * unable to kill it, treat this as a fatal error and return 1. - * - * The rationale behind this is that we're called at the start of the - * build process on the basis that we'll take care of killing any - * running instances, such that the build won't encounter permission - * denied errors during linking. If we can't kill one of the processes, - * we can't provide this assurance, and the build shouldn't start. - */ - - hsp = CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0); - if (hsp == INVALID_HANDLE_VALUE) { - printf("CreateToolhelp32Snapshot[2] failed: %d\n", GetLastError()); - return 1; - } - - if (!Process32FirstW(hsp, &pe)) { - printf("Process32FirstW failed: %d\n", GetLastError()); - CloseHandle(hsp); - return 1; - } - - dac = PROCESS_QUERY_INFORMATION | PROCESS_VM_READ | PROCESS_TERMINATE; - do { - - /* - * XXX TODO: if we really wanted to be fancy, we could check the - * modules for all processes (not just the python[_d].exe ones) - * and see if any of our DLLs are loaded (i.e. python33[_d].dll), - * as that would also inhibit our ability to rebuild the solution. - * Not worth loosing sleep over though; for now, a simple check - * for just the python executable should be sufficient. - */ - - if (_wcsnicmp(pe.szExeFile, PYTHON_EXE, PYTHON_EXE_LEN)) - /* This isn't a python process. */ - continue; - - /* It's a python process, so figure out which directory it's in... */ - hsm = CreateToolhelp32Snapshot(TH32CS_SNAPMODULE, pe.th32ProcessID); - if (hsm == INVALID_HANDLE_VALUE) - /* - * If our module snapshot fails (which will happen if we don't own - * the process), just ignore it and continue. (It seems different - * versions of Windows return different values for GetLastError() - * in this situation; it's easier to just ignore it and move on vs. - * stopping the build for what could be a false positive.) - */ - continue; - - if (!Module32FirstW(hsm, &me)) { - printf("Module32FirstW[2] failed: %d\n", GetLastError()); - CloseHandle(hsp); - CloseHandle(hsm); - return 1; - } - - do { - if (_wcsnicmp(me.szModule, PYTHON_EXE, PYTHON_EXE_LEN)) - /* Wrong module, we're looking for python[_d].exe... */ - continue; - - if (_wcsnicmp(path, me.szExePath, len)) - /* Process doesn't live in our directory. */ - break; - - /* Python process residing in the right directory, kill it! */ - hp = OpenProcess(dac, FALSE, pe.th32ProcessID); - if (!hp) { - printf("OpenProcess failed: %d\n", GetLastError()); - CloseHandle(hsp); - CloseHandle(hsm); - return 1; - } - - if (!TerminateProcess(hp, 1)) { - printf("TerminateProcess failed: %d\n", GetLastError()); - CloseHandle(hsp); - CloseHandle(hsm); - CloseHandle(hp); - return 1; - } - - CloseHandle(hp); - break; - - } while (Module32NextW(hsm, &me)); - - CloseHandle(hsm); - - } while (Process32NextW(hsp, &pe)); - - CloseHandle(hsp); - - return 0; -} - -/* vi: set ts=8 sw=4 sts=4 expandtab */ diff -r 0be296605165 -r faa88c50a3d2 PC/VS9.0/make_buildinfo.c --- a/PC/VS9.0/make_buildinfo.c Wed May 23 22:26:55 2012 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,195 +0,0 @@ -#include -#include -#include -#include -#include - -#define CMD_SIZE 500 - -/* This file creates the getbuildinfo.o object, by first - invoking subwcrev.exe (if found), and then invoking cl.exe. - As a side effect, it might generate PCBuild\getbuildinfo2.c - also. If this isn't a subversion checkout, or subwcrev isn't - found, it compiles ..\\..\\Modules\\getbuildinfo.c instead. - - Currently, subwcrev.exe is found from the registry entries - of TortoiseSVN. - - No attempt is made to place getbuildinfo.o into the proper - binary directory. This isn't necessary, as this tool is - invoked as a pre-link step for pythoncore, so that overwrites - any previous getbuildinfo.o. - - However, if a second argument is provided, this will be used - as a temporary directory where any getbuildinfo2.c and - getbuildinfo.o files are put. This is useful if multiple - configurations are being built in parallel, to avoid them - trampling each other's files. - -*/ - -int make_buildinfo2(const char *tmppath) -{ - struct _stat st; - HKEY hTortoise; - char command[CMD_SIZE+1]; - DWORD type, size; - if (_stat(".svn", &st) < 0) - return 0; - /* Allow suppression of subwcrev.exe invocation if a no_subwcrev file is present. */ - if (_stat("no_subwcrev", &st) == 0) - return 0; - if (RegOpenKey(HKEY_LOCAL_MACHINE, "Software\\TortoiseSVN", &hTortoise) != ERROR_SUCCESS && - RegOpenKey(HKEY_CURRENT_USER, "Software\\TortoiseSVN", &hTortoise) != ERROR_SUCCESS) - /* Tortoise not installed */ - return 0; - command[0] = '"'; /* quote the path to the executable */ - size = sizeof(command) - 1; - if (RegQueryValueEx(hTortoise, "Directory", 0, &type, command+1, &size) != ERROR_SUCCESS || - type != REG_SZ) - /* Registry corrupted */ - return 0; - strcat_s(command, CMD_SIZE, "bin\\subwcrev.exe"); - if (_stat(command+1, &st) < 0) - /* subwcrev.exe not part of the release */ - return 0; - strcat_s(command, CMD_SIZE, "\" ..\\.. ..\\..\\Modules\\getbuildinfo.c \""); - strcat_s(command, CMD_SIZE, tmppath); /* quoted tmppath */ - strcat_s(command, CMD_SIZE, "getbuildinfo2.c\""); - - puts(command); fflush(stdout); - if (system(command) < 0) - return 0; - return 1; -} - -const char DELIMS[] = { " \n" }; - -int get_mercurial_info(char * hgbranch, char * hgtag, char * hgrev, int size) -{ - int result = 0; - char filename[CMD_SIZE]; - char cmdline[CMD_SIZE]; - - strcpy_s(filename, CMD_SIZE, "tmpXXXXXX"); - if (_mktemp_s(filename, CMD_SIZE) == 0) { - int rc; - - strcpy_s(cmdline, CMD_SIZE, "hg id -bit > "); - strcat_s(cmdline, CMD_SIZE, filename); - rc = system(cmdline); - if (rc == 0) { - FILE * fp; - - if (fopen_s(&fp, filename, "r") == 0) { - char * cp = fgets(cmdline, CMD_SIZE, fp); - - if (cp) { - char * context = NULL; - char * tp = strtok_s(cp, DELIMS, &context); - if (tp) { - strcpy_s(hgrev, size, tp); - tp = strtok_s(NULL, DELIMS, &context); - if (tp) { - strcpy_s(hgbranch, size, tp); - tp = strtok_s(NULL, DELIMS, &context); - if (tp) { - strcpy_s(hgtag, size, tp); - result = 1; - } - } - } - } - fclose(fp); - } - } - _unlink(filename); - } - return result; -} - -int main(int argc, char*argv[]) -{ - char command[CMD_SIZE] = "cl.exe -c -D_WIN32 -DUSE_DL_EXPORT -D_WINDOWS -DWIN32 -D_WINDLL "; - char tmppath[CMD_SIZE] = ""; - int do_unlink, result; - char *tmpdir = NULL; - if (argc <= 2 || argc > 3) { - fprintf(stderr, "make_buildinfo $(ConfigurationName) [tmpdir]\n"); - return EXIT_FAILURE; - } - if (strcmp(argv[1], "Release") == 0) { - strcat_s(command, CMD_SIZE, "-MD "); - } - else if (strcmp(argv[1], "Debug") == 0) { - strcat_s(command, CMD_SIZE, "-D_DEBUG -MDd "); - } - else if (strcmp(argv[1], "ReleaseItanium") == 0) { - strcat_s(command, CMD_SIZE, "-MD /USECL:MS_ITANIUM "); - } - else if (strcmp(argv[1], "ReleaseAMD64") == 0) { - strcat_s(command, CMD_SIZE, "-MD "); - strcat_s(command, CMD_SIZE, "-MD /USECL:MS_OPTERON "); - } - else { - fprintf(stderr, "unsupported configuration %s\n", argv[1]); - return EXIT_FAILURE; - } - if (argc > 2) { - tmpdir = argv[2]; - strcat_s(tmppath, _countof(tmppath), tmpdir); - /* Hack fix for bad command line: If the command is issued like this: - * $(SolutionDir)make_buildinfo.exe" Debug "$(IntDir)" - * we will get a trailing quote because IntDir ends with a backslash that then - * escapes the final ". To simplify the life for developers, catch that problem - * here by cutting it off. - * The proper command line, btw is: - * $(SolutionDir)make_buildinfo.exe" Debug "$(IntDir)\" - * Hooray for command line parsing on windows. - */ - if (strlen(tmppath) > 0 && tmppath[strlen(tmppath)-1] == '"') - tmppath[strlen(tmppath)-1] = '\0'; - strcat_s(tmppath, _countof(tmppath), "\\"); - } - - if ((do_unlink = make_buildinfo2(tmppath))) { - strcat_s(command, CMD_SIZE, "\""); - strcat_s(command, CMD_SIZE, tmppath); - strcat_s(command, CMD_SIZE, "getbuildinfo2.c\" -DSUBWCREV "); - } - else { - char hgtag[CMD_SIZE]; - char hgbranch[CMD_SIZE]; - char hgrev[CMD_SIZE]; - - if (get_mercurial_info(hgbranch, hgtag, hgrev, CMD_SIZE)) { - strcat_s(command, CMD_SIZE, "-DHGBRANCH=\\\""); - strcat_s(command, CMD_SIZE, hgbranch); - strcat_s(command, CMD_SIZE, "\\\""); - - strcat_s(command, CMD_SIZE, " -DHGTAG=\\\""); - strcat_s(command, CMD_SIZE, hgtag); - strcat_s(command, CMD_SIZE, "\\\""); - - strcat_s(command, CMD_SIZE, " -DHGVERSION=\\\""); - strcat_s(command, CMD_SIZE, hgrev); - strcat_s(command, CMD_SIZE, "\\\" "); - } - strcat_s(command, CMD_SIZE, "..\\..\\Modules\\getbuildinfo.c"); - } - strcat_s(command, CMD_SIZE, " -Fo\""); - strcat_s(command, CMD_SIZE, tmppath); - strcat_s(command, CMD_SIZE, "getbuildinfo.o\" -I..\\..\\Include -I..\\..\\PC"); - puts(command); fflush(stdout); - result = system(command); - if (do_unlink) { - command[0] = '\0'; - strcat_s(command, CMD_SIZE, "\""); - strcat_s(command, CMD_SIZE, tmppath); - strcat_s(command, CMD_SIZE, "getbuildinfo2.c\""); - _unlink(command); - } - if (result < 0) - return EXIT_FAILURE; - return 0; -} diff -r 0be296605165 -r faa88c50a3d2 PC/VS9.0/make_buildinfo.vcproj --- a/PC/VS9.0/make_buildinfo.vcproj Wed May 23 22:26:55 2012 +0200 +++ b/PC/VS9.0/make_buildinfo.vcproj Wed May 23 21:09:05 2012 +0200 @@ -1,7 +1,7 @@ @@ -314,7 +314,7 @@ Name="Source Files" > diff -r 0be296605165 -r faa88c50a3d2 PC/VS9.0/pcbuild.sln --- a/PC/VS9.0/pcbuild.sln Wed May 23 22:26:55 2012 +0200 +++ b/PC/VS9.0/pcbuild.sln Wed May 23 21:09:05 2012 +0200 @@ -29,7 +29,7 @@ EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{553EC33E-9816-4996-A660-5D6186A0B0B3}" ProjectSection(SolutionItems) = preProject - ..\..\Modules\getbuildinfo.c = ..\..\Modules\getbuildinfo.c + ..\Modules\getbuildinfo.c = ..\Modules\getbuildinfo.c readme.txt = readme.txt EndProjectSection EndProject diff -r 0be296605165 -r faa88c50a3d2 PC/VS9.0/pyexpat.vcproj --- a/PC/VS9.0/pyexpat.vcproj Wed May 23 22:26:55 2012 +0200 +++ b/PC/VS9.0/pyexpat.vcproj Wed May 23 21:09:05 2012 +0200 @@ -1,7 +1,7 @@ @@ -531,19 +531,19 @@ Name="Source Files" > diff -r 0be296605165 -r faa88c50a3d2 PC/VS9.0/pyproject.vsprops --- a/PC/VS9.0/pyproject.vsprops Wed May 23 22:26:55 2012 +0200 +++ b/PC/VS9.0/pyproject.vsprops Wed May 23 21:09:05 2012 +0200 @@ -11,7 +11,7 @@ Optimization="2" InlineFunctionExpansion="1" EnableIntrinsicFunctions="true" - AdditionalIncludeDirectories="..\..\Include; ..\..\PC" + AdditionalIncludeDirectories="..\Include; ..\PC" PreprocessorDefinitions="_WIN32" StringPooling="true" ExceptionHandling="0" @@ -34,7 +34,7 @@ /> @@ -627,7 +627,7 @@ Name="Source Files" > diff -r 0be296605165 -r faa88c50a3d2 PC/VS9.0/python3dll.vcproj --- a/PC/VS9.0/python3dll.vcproj Wed May 23 22:26:55 2012 +0200 +++ b/PC/VS9.0/python3dll.vcproj Wed May 23 21:09:05 2012 +0200 @@ -1,7 +1,7 @@ @@ -54,7 +54,7 @@ Name="VCResourceCompilerTool" PreprocessorDefinitions="NDEBUG" Culture="1033" - AdditionalIncludeDirectories="..\..\Include" + AdditionalIncludeDirectories="..\Include" /> @@ -129,7 +129,7 @@ Name="VCResourceCompilerTool" PreprocessorDefinitions="NDEBUG" Culture="1033" - AdditionalIncludeDirectories="..\..\Include" + AdditionalIncludeDirectories="..\Include" /> @@ -207,7 +207,7 @@ Name="VCResourceCompilerTool" PreprocessorDefinitions="_DEBUG" Culture="1033" - AdditionalIncludeDirectories="..\..\Include" + AdditionalIncludeDirectories="..\Include" /> @@ -285,7 +285,7 @@ Name="VCResourceCompilerTool" PreprocessorDefinitions="_DEBUG" Culture="1033" - AdditionalIncludeDirectories="..\..\Include" + AdditionalIncludeDirectories="..\Include" /> @@ -359,7 +359,7 @@ Name="VCResourceCompilerTool" PreprocessorDefinitions="NDEBUG" Culture="1033" - AdditionalIncludeDirectories="..\..\Include" + AdditionalIncludeDirectories="..\Include" /> @@ -434,7 +434,7 @@ Name="VCResourceCompilerTool" PreprocessorDefinitions="NDEBUG" Culture="1033" - AdditionalIncludeDirectories="..\..\Include" + AdditionalIncludeDirectories="..\Include" /> @@ -509,7 +509,7 @@ Name="VCResourceCompilerTool" PreprocessorDefinitions="NDEBUG" Culture="1033" - AdditionalIncludeDirectories="..\..\Include" + AdditionalIncludeDirectories="..\Include" /> @@ -584,7 +584,7 @@ Name="VCResourceCompilerTool" PreprocessorDefinitions="NDEBUG" Culture="1033" - AdditionalIncludeDirectories="..\..\Include" + AdditionalIncludeDirectories="..\Include" /> @@ -991,222 +991,222 @@ Name="Modules" > @@ -1214,31 +1214,31 @@ Name="zlib" > @@ -1374,71 +1374,71 @@ Name="cjkcodecs" > @@ -1447,175 +1447,175 @@ Name="Objects" > @@ -1623,59 +1623,59 @@ Name="Parser" > @@ -1683,31 +1683,31 @@ Name="PC" > @@ -1715,191 +1715,191 @@ Name="Python" > @@ -1907,7 +1907,7 @@ Name="Resource Files" > diff -r 0be296605165 -r faa88c50a3d2 PC/VS9.0/pythonw.vcproj --- a/PC/VS9.0/pythonw.vcproj Wed May 23 22:26:55 2012 +0200 +++ b/PC/VS9.0/pythonw.vcproj Wed May 23 21:09:05 2012 +0200 @@ -1,7 +1,7 @@ @@ -608,7 +608,7 @@ Name="Source Files" > diff -r 0be296605165 -r faa88c50a3d2 PC/VS9.0/select.vcproj --- a/PC/VS9.0/select.vcproj Wed May 23 22:26:55 2012 +0200 +++ b/PC/VS9.0/select.vcproj Wed May 23 21:09:05 2012 +0200 @@ -1,7 +1,7 @@ diff -r 0be296605165 -r faa88c50a3d2 PC/VS9.0/sqlite3.vcproj --- a/PC/VS9.0/sqlite3.vcproj Wed May 23 22:26:55 2012 +0200 +++ b/PC/VS9.0/sqlite3.vcproj Wed May 23 21:09:05 2012 +0200 @@ -1,7 +1,7 @@ @@ -523,7 +523,7 @@ Name="Source Files" > diff -r 0be296605165 -r faa88c50a3d2 PC/VS9.0/w9xpopen.vcproj --- a/PC/VS9.0/w9xpopen.vcproj Wed May 23 22:26:55 2012 +0200 +++ b/PC/VS9.0/w9xpopen.vcproj Wed May 23 21:09:05 2012 +0200 @@ -1,7 +1,7 @@ diff -r 0be296605165 -r faa88c50a3d2 PC/VS9.0/winsound.vcproj --- a/PC/VS9.0/winsound.vcproj Wed May 23 22:26:55 2012 +0200 +++ b/PC/VS9.0/winsound.vcproj Wed May 23 21:09:05 2012 +0200 @@ -1,7 +1,7 @@ diff -r 0be296605165 -r faa88c50a3d2 PC/VS9.0/xxlimited.vcproj --- a/PC/VS9.0/xxlimited.vcproj Wed May 23 22:26:55 2012 +0200 +++ b/PC/VS9.0/xxlimited.vcproj Wed May 23 21:09:05 2012 +0200 @@ -1,7 +1,7 @@ diff -r 0be296605165 -r faa88c50a3d2 PCbuild/_bz2.vcxproj --- a/PCbuild/_bz2.vcxproj Wed May 23 22:26:55 2012 +0200 +++ b/PCbuild/_bz2.vcxproj Wed May 23 21:09:05 2012 +0200 @@ -148,6 +148,12 @@ AllRules.ruleset + .pyd + .pyd + .pyd + .pyd + .pyd + .pyd diff -r 0be296605165 -r faa88c50a3d2 PCbuild/_bz2.vcxproj.filters --- a/PCbuild/_bz2.vcxproj.filters Wed May 23 22:26:55 2012 +0200 +++ b/PCbuild/_bz2.vcxproj.filters Wed May 23 21:09:05 2012 +0200 @@ -4,10 +4,10 @@ {f53a859d-dad2-4d5b-ae41-f28d8b571f5a} - + {7e0bed05-ae33-43b7-8797-656455bbb7f3} - + {ed574b89-6983-4cdf-9f98-fe7048d9e89c} @@ -16,33 +16,33 @@ Source Files - bzip2 1.0.6 Source Files + bzip2 1.0.5 Source Files - bzip2 1.0.6 Source Files + bzip2 1.0.5 Source Files - bzip2 1.0.6 Source Files + bzip2 1.0.5 Source Files - bzip2 1.0.6 Source Files + bzip2 1.0.5 Source Files - bzip2 1.0.6 Source Files + bzip2 1.0.5 Source Files - bzip2 1.0.6 Source Files + bzip2 1.0.5 Source Files - bzip2 1.0.6 Source Files + bzip2 1.0.5 Source Files - bzip2 1.0.6 Header Files + bzip2 1.0.5 Header Files - bzip2 1.0.6 Header Files + bzip2 1.0.5 Header Files - + \ No newline at end of file diff -r 0be296605165 -r faa88c50a3d2 PCbuild/_ctypes.vcxproj --- a/PCbuild/_ctypes.vcxproj Wed May 23 22:26:55 2012 +0200 +++ b/PCbuild/_ctypes.vcxproj Wed May 23 21:09:05 2012 +0200 @@ -148,6 +148,18 @@ AllRules.ruleset + .pyd + .pyd + .pyd + $(SolutionDir)$(PlatformName)-pgo\ + $(SolutionDir)$(PlatformName)-temp-pgi\$(ProjectName)\ + $(SolutionDir)$(PlatformName)-pgo\ + $(SolutionDir)$(PlatformName)-temp-pgi\$(ProjectName)\ + .pyd + $(OutDirPGI)\ + $(SolutionDir)$(PlatformName)-temp-pgi\$(ProjectName)\ + .pyd + .pyd @@ -166,6 +178,7 @@ 0x1D1A0000 + $(OutDir)python33_d.lib;%(AdditionalDependencies) @@ -189,6 +202,7 @@ /EXPORT:DllGetClassObject,PRIVATE /EXPORT:DllCanUnloadNow,PRIVATE %(AdditionalOptions) NotSet 0x1D1A0000 + $(OutDir)python33.lib;%(AdditionalDependencies) @@ -213,6 +227,7 @@ NotSet 0x1D1A0000 MachineX64 + $(OutDir)python33.lib;%(AdditionalDependencies) @@ -237,6 +252,7 @@ NotSet 0x1D1A0000 MachineX64 + $(SolutionDir)\$(PlatformShortName)\python33.lib;%(AdditionalDependencies) @@ -283,11 +299,6 @@ $(IntDir)win64.obj;%(Outputs) - - - {cf7ac3d1-e2df-41d2-bea6-1e2556cdea26} - - diff -r 0be296605165 -r faa88c50a3d2 PCbuild/_ctypes_test.vcxproj --- a/PCbuild/_ctypes_test.vcxproj Wed May 23 22:26:55 2012 +0200 +++ b/PCbuild/_ctypes_test.vcxproj Wed May 23 21:09:05 2012 +0200 @@ -148,6 +148,12 @@ AllRules.ruleset + .pyd + .pyd + .pyd + .pyd + .pyd + .pyd @@ -184,6 +190,7 @@ {cf7ac3d1-e2df-41d2-bea6-1e2556cdea26} + false diff -r 0be296605165 -r faa88c50a3d2 PCbuild/_decimal.vcxproj --- a/PCbuild/_decimal.vcxproj Wed May 23 22:26:55 2012 +0200 +++ b/PCbuild/_decimal.vcxproj Wed May 23 21:09:05 2012 +0200 @@ -113,8 +113,6 @@ - - @@ -148,6 +146,20 @@ AllRules.ruleset + .pyd + .pyd + $(SolutionDir)\amd64\ + $(SolutionDir)$(PlatformName)-temp-$(Configuration)\$(ProjectName)\ + .pyd + $(SolutionDir)$(PlatformName)-pgo\ + $(SolutionDir)$(PlatformName)-temp-pgi\$(ProjectName)\ + $(SolutionDir)$(PlatformName)-pgo\ + $(SolutionDir)$(PlatformName)-temp-pgi\$(ProjectName)\ + .pyd + $(OutDirPGI)\ + $(SolutionDir)$(PlatformName)-temp-pgi\$(ProjectName)\ + .pyd + .pyd @@ -301,11 +313,6 @@ $(IntDir)vcdiv64.obj;%(Outputs) - - - {cf7ac3d1-e2df-41d2-bea6-1e2556cdea26} - - diff -r 0be296605165 -r faa88c50a3d2 PCbuild/_elementtree.vcxproj --- a/PCbuild/_elementtree.vcxproj Wed May 23 22:26:55 2012 +0200 +++ b/PCbuild/_elementtree.vcxproj Wed May 23 21:09:05 2012 +0200 @@ -148,6 +148,12 @@ AllRules.ruleset + .pyd + .pyd + .pyd + .pyd + .pyd + .pyd diff -r 0be296605165 -r faa88c50a3d2 PCbuild/_hashlib.vcxproj --- a/PCbuild/_hashlib.vcxproj Wed May 23 22:26:55 2012 +0200 +++ b/PCbuild/_hashlib.vcxproj Wed May 23 21:09:05 2012 +0200 @@ -148,6 +148,11 @@ AllRules.ruleset + .pyd + .pyd + .pyd + .pyd + .pyd diff -r 0be296605165 -r faa88c50a3d2 PCbuild/_lzma.vcxproj --- a/PCbuild/_lzma.vcxproj Wed May 23 22:26:55 2012 +0200 +++ b/PCbuild/_lzma.vcxproj Wed May 23 21:09:05 2012 +0200 @@ -148,6 +148,12 @@ AllRules.ruleset + .pyd + .pyd + .pyd + .pyd + .pyd + .pyd diff -r 0be296605165 -r faa88c50a3d2 PCbuild/_msi.vcxproj --- a/PCbuild/_msi.vcxproj Wed May 23 22:26:55 2012 +0200 +++ b/PCbuild/_msi.vcxproj Wed May 23 21:09:05 2012 +0200 @@ -148,6 +148,12 @@ AllRules.ruleset + .pyd + .pyd + .pyd + .pyd + .pyd + .pyd diff -r 0be296605165 -r faa88c50a3d2 PCbuild/_multiprocessing.vcxproj --- a/PCbuild/_multiprocessing.vcxproj Wed May 23 22:26:55 2012 +0200 +++ b/PCbuild/_multiprocessing.vcxproj Wed May 23 21:09:05 2012 +0200 @@ -148,6 +148,12 @@ AllRules.ruleset + .pyd + .pyd + .pyd + .pyd + .pyd + .pyd diff -r 0be296605165 -r faa88c50a3d2 PCbuild/_socket.vcxproj --- a/PCbuild/_socket.vcxproj Wed May 23 22:26:55 2012 +0200 +++ b/PCbuild/_socket.vcxproj Wed May 23 21:09:05 2012 +0200 @@ -148,6 +148,12 @@ AllRules.ruleset + .pyd + .pyd + .pyd + .pyd + .pyd + .pyd diff -r 0be296605165 -r faa88c50a3d2 PCbuild/_sqlite3.vcxproj --- a/PCbuild/_sqlite3.vcxproj Wed May 23 22:26:55 2012 +0200 +++ b/PCbuild/_sqlite3.vcxproj Wed May 23 21:09:05 2012 +0200 @@ -148,6 +148,12 @@ AllRules.ruleset + .pyd + .pyd + .pyd + .pyd + .pyd + .pyd diff -r 0be296605165 -r faa88c50a3d2 PCbuild/_ssl.vcxproj --- a/PCbuild/_ssl.vcxproj Wed May 23 22:26:55 2012 +0200 +++ b/PCbuild/_ssl.vcxproj Wed May 23 21:09:05 2012 +0200 @@ -148,6 +148,11 @@ AllRules.ruleset + .pyd + .pyd + .pyd + .pyd + .pyd diff -r 0be296605165 -r faa88c50a3d2 PCbuild/_testbuffer.vcxproj --- a/PCbuild/_testbuffer.vcxproj Wed May 23 22:26:55 2012 +0200 +++ b/PCbuild/_testbuffer.vcxproj Wed May 23 21:09:05 2012 +0200 @@ -148,6 +148,19 @@ AllRules.ruleset + .pyd + .pyd + .pyd + $(SolutionDir)$(PlatformName)-pgo\ + $(SolutionDir)$(PlatformName)-temp-pgi\$(ProjectName)\ + $(SolutionDir)$(PlatformName)-pgo\ + $(SolutionDir)$(PlatformName)-temp-pgi\$(ProjectName)\ + .pyd + $(OutDirPGI)\ + $(ProjectName) + $(SolutionDir)$(PlatformName)-temp-pgi\$(ProjectName)\ + .pyd + .pyd @@ -160,6 +173,7 @@ 0x1e1F0000 + $(OutDir)python33_d.lib;%(AdditionalDependencies) @@ -173,6 +187,7 @@ 0x1e1F0000 + $(OutDir)python33.lib;%(AdditionalDependencies) @@ -187,6 +202,7 @@ 0x1e1F0000 MachineX64 + $(OutDir)python33.lib;%(AdditionalDependencies) @@ -201,16 +217,12 @@ 0x1e1F0000 MachineX64 + $(SolutionDir)\$(PlatformShortName)\python33.lib;%(AdditionalDependencies) - - - {cf7ac3d1-e2df-41d2-bea6-1e2556cdea26} - - diff -r 0be296605165 -r faa88c50a3d2 PCbuild/_testcapi.vcxproj --- a/PCbuild/_testcapi.vcxproj Wed May 23 22:26:55 2012 +0200 +++ b/PCbuild/_testcapi.vcxproj Wed May 23 21:09:05 2012 +0200 @@ -148,6 +148,12 @@ AllRules.ruleset + .pyd + .pyd + .pyd + .pyd + .pyd + .pyd diff -r 0be296605165 -r faa88c50a3d2 PCbuild/_tkinter.vcxproj --- a/PCbuild/_tkinter.vcxproj Wed May 23 22:26:55 2012 +0200 +++ b/PCbuild/_tkinter.vcxproj Wed May 23 21:09:05 2012 +0200 @@ -148,6 +148,11 @@ AllRules.ruleset + .pyd + .pyd + .pyd + .pyd + .pyd diff -r 0be296605165 -r faa88c50a3d2 PCbuild/build.bat --- a/PCbuild/build.bat Wed May 23 22:26:55 2012 +0200 +++ b/PCbuild/build.bat Wed May 23 21:09:05 2012 +0200 @@ -5,15 +5,14 @@ setlocal set platf=Win32 set conf=Release -set target=build -set dir=%~dp0 +set build= :CheckOpts if "%1"=="-c" (set conf=%2) & shift & shift & goto CheckOpts if "%1"=="-p" (set platf=%2) & shift & shift & goto CheckOpts -if "%1"=="-r" (set target=rebuild) & shift & goto CheckOpts +if "%1"=="-r" (set build=/rebuild) & shift & goto CheckOpts if "%1"=="-d" (set conf=Debug) & shift & goto CheckOpts -set cmd=msbuild /p:useenv=true %dir%pcbuild.sln /t:%target% /p:Configuration=%conf% /p:Platform=%platf% +set cmd=vcbuild /useenv pcbuild.sln %build% "%conf%|%platf%" echo %cmd% %cmd% diff -r 0be296605165 -r faa88c50a3d2 PCbuild/build_ssl.py --- a/PCbuild/build_ssl.py Wed May 23 22:26:55 2012 +0200 +++ b/PCbuild/build_ssl.py Wed May 23 21:09:05 2012 +0200 @@ -63,13 +63,37 @@ print(" Please install ActivePerl and ensure it appears on your path") return None -# Fetch SSL directory from VC properties -def get_ssl_dir(): - propfile = (os.path.join(os.path.dirname(__file__), 'pyproject.props')) - with open(propfile) as f: - m = re.search('openssl-([^<]+)<', f.read()) - return "..\..\openssl-"+m.group(1) - +# Locate the best SSL directory given a few roots to look into. +def find_best_ssl_dir(sources): + candidates = [] + for s in sources: + try: + # note: do not abspath s; the build will fail if any + # higher up directory name has spaces in it. + fnames = os.listdir(s) + except os.error: + fnames = [] + for fname in fnames: + fqn = os.path.join(s, fname) + if os.path.isdir(fqn) and fname.startswith("openssl-"): + candidates.append(fqn) + # Now we have all the candidates, locate the best. + best_parts = [] + best_name = None + for c in candidates: + parts = re.split("[.-]", os.path.basename(c))[1:] + # eg - openssl-0.9.7-beta1 - ignore all "beta" or any other qualifiers + if len(parts) >= 4: + continue + if parts > best_parts: + best_parts = parts + best_name = c + if best_name is not None: + print("Found an SSL directory at '%s'" % (best_name,)) + else: + print("Could not find an SSL directory in '%s'" % (sources,)) + sys.stdout.flush() + return best_name def create_makefile64(makefile, m32): """Create and fix makefile for 64bit @@ -178,7 +202,7 @@ print("No Perl installation was found. Existing Makefiles are used.") sys.stdout.flush() # Look for SSL 2 levels up from pcbuild - ie, same place zlib etc all live. - ssl_dir = get_ssl_dir() + ssl_dir = find_best_ssl_dir(("..\\..",)) if ssl_dir is None: sys.exit(1) @@ -228,9 +252,9 @@ # Now run make. if arch == "amd64": - rc = os.system("nasm -f win64 -DNEAR -Ox -g ms\\uptable.asm") + rc = os.system("ml64 -c -Foms\\uptable.obj ms\\uptable.asm") if rc: - print("nasm assembler has failed.") + print("ml64 assembler has failed.") sys.exit(rc) copy(r"crypto\buildinf_%s.h" % arch, r"crypto\buildinf.h") diff -r 0be296605165 -r faa88c50a3d2 PCbuild/debug.props --- a/PCbuild/debug.props Wed May 23 22:26:55 2012 +0200 +++ b/PCbuild/debug.props Wed May 23 21:09:05 2012 +0200 @@ -1,7 +1,6 @@  - _d $(OutDir)kill_python_d.exe @@ -12,11 +11,11 @@ _DEBUG;%(PreprocessorDefinitions) + + $(OutDir)$(TargetName)$(TargetExt) + - - $(PyDebugExt) - $(KillPythonExe) diff -r 0be296605165 -r faa88c50a3d2 PCbuild/env.bat --- a/PCbuild/env.bat Wed May 23 22:26:55 2012 +0200 +++ b/PCbuild/env.bat Wed May 23 21:09:05 2012 +0200 @@ -1,9 +1,5 @@ @echo off -set VS10=%ProgramFiles(x86)%\Microsoft Visual Studio 10.0 -IF EXIST "%VS10%" GOTO ok -set VS10=%ProgramFiles%\Microsoft Visual Studio 10.0 -:ok - +set VS9=%ProgramFiles%\Microsoft Visual Studio 9.0 echo Build environments: x86, ia64, amd64, x86_amd64, x86_ia64 echo. -call "%VS10%\VC\vcvarsall.bat" %1 +call "%VS9%\VC\vcvarsall.bat" %1 diff -r 0be296605165 -r faa88c50a3d2 PCbuild/kill_python.vcxproj --- a/PCbuild/kill_python.vcxproj Wed May 23 22:26:55 2012 +0200 +++ b/PCbuild/kill_python.vcxproj Wed May 23 21:09:05 2012 +0200 @@ -82,6 +82,12 @@ AllRules.ruleset + .exe + .exe + .exe + .exe + $(SolutionDir)$(PlatformName)-pgo\ + $(SolutionDir)$(PlatformName)-temp-pgi\$(ProjectName)\ diff -r 0be296605165 -r faa88c50a3d2 PCbuild/make_buildinfo.vcxproj --- a/PCbuild/make_buildinfo.vcxproj Wed May 23 22:26:55 2012 +0200 +++ b/PCbuild/make_buildinfo.vcxproj Wed May 23 21:09:05 2012 +0200 @@ -30,6 +30,7 @@ AllRules.ruleset + .exe @@ -40,6 +41,7 @@ $(OutDir)make_buildinfo.exe + $(TargetDir)$(TargetName).pdb Console diff -r 0be296605165 -r faa88c50a3d2 PCbuild/make_versioninfo.vcxproj --- a/PCbuild/make_versioninfo.vcxproj Wed May 23 22:26:55 2012 +0200 +++ b/PCbuild/make_versioninfo.vcxproj Wed May 23 21:09:05 2012 +0200 @@ -54,15 +54,9 @@ - - - - - - @@ -79,6 +73,8 @@ AllRules.ruleset + .exe + .exe @@ -101,6 +97,7 @@ $(SolutionDir)make_versioninfo.exe + $(TargetDir)$(TargetName).pdb Console 0x1d000000 @@ -154,6 +151,7 @@ $(SolutionDir)make_versioninfo_d.exe + $(TargetDir)$(TargetName).pdb Console 0x1d000000 diff -r 0be296605165 -r faa88c50a3d2 PCbuild/pcbuild.sln --- a/PCbuild/pcbuild.sln Wed May 23 22:26:55 2012 +0200 +++ b/PCbuild/pcbuild.sln Wed May 23 21:09:05 2012 +0200 @@ -21,8 +21,14 @@ Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "winsound", "winsound.vcxproj", "{28B5D777-DDF2-4B6B-B34F-31D938813856}" EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "_decimal", "_decimal.vcxproj", "{0E9791DB-593A-465F-98BC-681011311617}" + ProjectSection(ProjectDependencies) = postProject + {CF7AC3D1-E2DF-41D2-BEA6-1E2556CDEA26} = {CF7AC3D1-E2DF-41D2-BEA6-1E2556CDEA26} + EndProjectSection EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "_ctypes", "_ctypes.vcxproj", "{0E9791DB-593A-465F-98BC-681011311618}" + ProjectSection(ProjectDependencies) = postProject + {CF7AC3D1-E2DF-41D2-BEA6-1E2556CDEA26} = {CF7AC3D1-E2DF-41D2-BEA6-1E2556CDEA26} + EndProjectSection EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "_ctypes_test", "_ctypes_test.vcxproj", "{9EC7190A-249F-4180-A900-548FDCF3055F}" EndProject @@ -65,8 +71,14 @@ Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "python3dll", "python3dll.vcxproj", "{885D4898-D08D-4091-9C40-C700CFE3FC5A}" EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "xxlimited", "xxlimited.vcxproj", "{F749B822-B489-4CA5-A3AD-CE078F5F338A}" + ProjectSection(ProjectDependencies) = postProject + {CF7AC3D1-E2DF-41D2-BEA6-1E2556CDEA26} = {CF7AC3D1-E2DF-41D2-BEA6-1E2556CDEA26} + EndProjectSection EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "_testbuffer", "_testbuffer.vcxproj", "{A2697BD3-28C1-4AEC-9106-8B748639FD16}" + ProjectSection(ProjectDependencies) = postProject + {CF7AC3D1-E2DF-41D2-BEA6-1E2556CDEA26} = {CF7AC3D1-E2DF-41D2-BEA6-1E2556CDEA26} + EndProjectSection EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution @@ -535,8 +547,10 @@ {885D4898-D08D-4091-9C40-C700CFE3FC5A}.Release|Win32.Build.0 = Release|Win32 {885D4898-D08D-4091-9C40-C700CFE3FC5A}.Release|x64.ActiveCfg = Release|x64 {885D4898-D08D-4091-9C40-C700CFE3FC5A}.Release|x64.Build.0 = Release|x64 - {F749B822-B489-4CA5-A3AD-CE078F5F338A}.Debug|Win32.ActiveCfg = Release|Win32 - {F749B822-B489-4CA5-A3AD-CE078F5F338A}.Debug|x64.ActiveCfg = Release|x64 + {F749B822-B489-4CA5-A3AD-CE078F5F338A}.Debug|Win32.ActiveCfg = Debug|Win32 + {F749B822-B489-4CA5-A3AD-CE078F5F338A}.Debug|Win32.Build.0 = Debug|Win32 + {F749B822-B489-4CA5-A3AD-CE078F5F338A}.Debug|x64.ActiveCfg = Debug|x64 + {F749B822-B489-4CA5-A3AD-CE078F5F338A}.Debug|x64.Build.0 = Debug|x64 {F749B822-B489-4CA5-A3AD-CE078F5F338A}.PGInstrument|Win32.ActiveCfg = PGInstrument|Win32 {F749B822-B489-4CA5-A3AD-CE078F5F338A}.PGInstrument|Win32.Build.0 = PGInstrument|Win32 {F749B822-B489-4CA5-A3AD-CE078F5F338A}.PGInstrument|x64.ActiveCfg = PGInstrument|x64 diff -r 0be296605165 -r faa88c50a3d2 PCbuild/pyd.props --- a/PCbuild/pyd.props Wed May 23 22:26:55 2012 +0200 +++ b/PCbuild/pyd.props Wed May 23 21:09:05 2012 +0200 @@ -1,14 +1,13 @@  + - <_ProjectFileVersion>10.0.30319.1 false false - .pyd @@ -16,6 +15,9 @@ MultiThreadedDLL + $(OutDir)$(ProjectName).pyd + $(OutDir)$(ProjectName).pdb + $(OutDir)$(TargetName).lib diff -r 0be296605165 -r faa88c50a3d2 PCbuild/pyd_d.props --- a/PCbuild/pyd_d.props Wed May 23 22:26:55 2012 +0200 +++ b/PCbuild/pyd_d.props Wed May 23 21:09:05 2012 +0200 @@ -1,10 +1,12 @@  + - - + + $(SolutionDir)python_d.exe + <_ProjectFileVersion>10.0.30319.1 false @@ -21,11 +23,19 @@ Py_BUILD_CORE_MODULE;%(PreprocessorDefinitions) MultiThreadedDebugDLL - + + $(OutDir)$(ProjectName)_d.pyd + $(OutDir)$(ProjectName)_d.pdb + $(OutDir)$(TargetName).lib + - + + + $(PythonExe) + + \ No newline at end of file diff -r 0be296605165 -r faa88c50a3d2 PCbuild/pyexpat.vcxproj --- a/PCbuild/pyexpat.vcxproj Wed May 23 22:26:55 2012 +0200 +++ b/PCbuild/pyexpat.vcxproj Wed May 23 21:09:05 2012 +0200 @@ -148,6 +148,12 @@ AllRules.ruleset + .pyd + .pyd + .pyd + .pyd + .pyd + .pyd diff -r 0be296605165 -r faa88c50a3d2 PCbuild/pyproject.props --- a/PCbuild/pyproject.props Wed May 23 22:26:55 2012 +0200 +++ b/PCbuild/pyproject.props Wed May 23 21:09:05 2012 +0200 @@ -1,14 +1,13 @@  - python33$(PyDebugExt) - $(SolutionDir)python$(PyDebugExt).exe - $(OutDir)kill_python$(PyDebugExt).exe + python33 + $(SolutionDir)\python.exe ..\.. - $(externalsDir)\sqlite-3.7.12 - $(externalsDir)\bzip2-1.0.6 + $(externalsDir)\sqlite-3.7.4 + $(externalsDir)\bzip2-1.0.5 $(externalsDir)\xz-5.0.3 - $(externalsDir)\openssl-1.0.1c + $(externalsDir)\openssl-1.0.0a $(externalsDir)\tcltk $(externalsDir)\tcltk64 $(tcltkDir)\lib\tcl85.lib;$(tcltkDir)\lib\tk85.lib @@ -18,9 +17,10 @@ <_ProjectFileVersion>10.0.30319.1 - $(SolutionDir) + $(SolutionDir)\ $(SolutionDir)$(PlatformName)-temp-$(Configuration)\$(ProjectName)\ false + .dll @@ -59,9 +59,6 @@ $(PythonExe) - - $(KillPythonExe) - $(externalsDir) diff -r 0be296605165 -r faa88c50a3d2 PCbuild/python.vcxproj --- a/PCbuild/python.vcxproj Wed May 23 22:26:55 2012 +0200 +++ b/PCbuild/python.vcxproj Wed May 23 21:09:05 2012 +0200 @@ -156,6 +156,14 @@ AllRules.ruleset + .exe + .exe + .exe + .exe + .exe + .exe + .exe + .exe diff -r 0be296605165 -r faa88c50a3d2 PCbuild/pythoncore.vcxproj --- a/PCbuild/pythoncore.vcxproj Wed May 23 22:26:55 2012 +0200 +++ b/PCbuild/pythoncore.vcxproj Wed May 23 21:09:05 2012 +0200 @@ -77,51 +77,51 @@ + - + - + - + - - + - + - + - + @@ -150,14 +150,15 @@ AllRules.ruleset - $(PyDllName) + $(PyDllName)_d + $(PyDllName) + $(PyDllName)_d + $(PyDllName) + .dll + $(PyDllName) + $(PyDllName) $(PyDllName) - $(PyDllName) - $(PyDllName) - $(PyDllName) $(PyDllName) - $(PyDllName) - $(PyDllName) @@ -179,7 +180,9 @@ $(IntDir)getbuildinfo.o;%(AdditionalDependencies) $(OutDir)$(PyDllName).dll libc;%(IgnoreSpecificDefaultLibraries) + $(OutDir)$(PyDllName).pdb 0x1e000000 + $(OutDir)$(PyDllName).lib @@ -205,7 +208,9 @@ $(IntDir)getbuildinfo.o;%(AdditionalDependencies) $(OutDir)$(PyDllName).dll libc;%(IgnoreSpecificDefaultLibraries) + $(OutDir)$(PyDllName).pdb 0x1e000000 + $(OutDir)$(PyDllName).lib @@ -229,8 +234,11 @@ $(IntDir)getbuildinfo.o;%(AdditionalDependencies) + $(OutDir)$(PyDllName)_d.dll libc;%(IgnoreSpecificDefaultLibraries) + $(OutDir)$(PyDllName)_d.pdb 0x1e000000 + $(OutDir)$(PyDllName)_d.lib @@ -259,7 +267,9 @@ $(IntDir)getbuildinfo.o;%(AdditionalDependencies) $(OutDir)$(PyDllName)_d.dll libc;%(IgnoreSpecificDefaultLibraries) + $(OutDir)$(PyDllName)_d.pdb 0x1e000000 + $(OutDir)$(PyDllName)_d.lib @@ -282,7 +292,9 @@ $(IntDir)getbuildinfo.o;%(AdditionalDependencies) $(OutDir)$(PyDllName).dll libc;%(IgnoreSpecificDefaultLibraries) + $(OutDir)$(PyDllName).pdb 0x1e000000 + $(OutDirPGI)$(PyDllName).lib @@ -308,7 +320,9 @@ $(IntDir)getbuildinfo.o;%(AdditionalDependencies) $(OutDir)$(PyDllName).dll libc;%(IgnoreSpecificDefaultLibraries) + $(OutDir)$(PyDllName).pdb 0x1e000000 + $(OutDirPGI)$(PyDllName).lib MachineX64 @@ -332,7 +346,9 @@ $(IntDir)getbuildinfo.o;%(AdditionalDependencies) $(OutDir)$(PyDllName).dll libc;%(IgnoreSpecificDefaultLibraries) + $(OutDir)$(PyDllName).pdb 0x1e000000 + $(OutDirPGI)$(PyDllName).lib @@ -358,7 +374,9 @@ $(IntDir)getbuildinfo.o;%(AdditionalDependencies) $(OutDir)$(PyDllName).dll libc;%(IgnoreSpecificDefaultLibraries) + $(OutDir)$(PyDllName).pdb 0x1e000000 + $(OutDirPGI)$(PyDllName).lib MachineX64 @@ -389,6 +407,7 @@ + @@ -424,6 +443,7 @@ + @@ -444,6 +464,7 @@ + @@ -451,6 +472,7 @@ + @@ -542,6 +564,12 @@ + + _CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) + _CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) + _CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) + _CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) + diff -r 0be296605165 -r faa88c50a3d2 PCbuild/pythoncore.vcxproj.filters --- a/PCbuild/pythoncore.vcxproj.filters Wed May 23 22:26:55 2012 +0200 +++ b/PCbuild/pythoncore.vcxproj.filters Wed May 23 21:09:05 2012 +0200 @@ -111,6 +111,9 @@ Include + + Include + Include @@ -216,6 +219,9 @@ Include + + Include + Include @@ -276,6 +282,9 @@ Include + + Include + Include @@ -297,6 +306,9 @@ Modules + + Modules + Modules @@ -566,6 +578,9 @@ Modules\zlib + + Modules\zlib + Modules\zlib diff -r 0be296605165 -r faa88c50a3d2 PCbuild/pythonw.vcxproj --- a/PCbuild/pythonw.vcxproj Wed May 23 22:26:55 2012 +0200 +++ b/PCbuild/pythonw.vcxproj Wed May 23 21:09:05 2012 +0200 @@ -149,6 +149,14 @@ AllRules.ruleset + .exe + .exe + .exe + .pyd + .exe + .exe + .exe + .exe diff -r 0be296605165 -r faa88c50a3d2 PCbuild/readme.txt --- a/PCbuild/readme.txt Wed May 23 22:26:55 2012 +0200 +++ b/PCbuild/readme.txt Wed May 23 21:09:05 2012 +0200 @@ -1,4 +1,4 @@ -Building Python using VC++ 10.0 +Building Python using VC++ 9.0 ------------------------------ This directory is used to build Python for Win32 and x64 platforms, e.g. @@ -62,11 +62,17 @@ C RUNTIME --------- -Visual Studio 2010 uses version 10 of the C runtime (MSVCRT9). The executables -no longer use the "Side by Side" assemblies used in previous versions of the -compiler. This simplifies distribution of applications. -The run time libraries are avalible under the VC/Redist folder of your visual studio -distribution. For more info, see the Readme in the VC/Redist folder. +Visual Studio 2008 uses version 9 of the C runtime (MSVCRT9). The executables +are linked to a CRT "side by side" assembly which must be present on the target +machine. This is avalible under the VC/Redist folder of your visual studio +distribution. On XP and later operating systems that support +side-by-side assemblies it is not enough to have the msvcrt90.dll present, +it has to be there as a whole assembly, that is, a folder with the .dll +and a .manifest. Also, a check is made for the correct version. +Therefore, one should distribute this assembly with the dlls, and keep +it in the same directory. For compatibility with older systems, one should +also set the PATH to this directory so that the dll can be found. +For more info, see the Readme in the VC/Redist folder. SUBPROJECTS ----------- @@ -115,21 +121,21 @@ Download the source from the python.org copy into the dist directory: - svn export http://svn.python.org/projects/external/bzip2-1.0.6 + svn export http://svn.python.org/projects/external/bzip2-1.0.5 ** NOTE: if you use the Tools\buildbot\external(-amd64).bat approach for obtaining external sources then you don't need to manually get the source above via subversion. ** A custom pre-link step in the bz2 project settings should manage to - build bzip2-1.0.6\libbz2.lib by magic before bz2.pyd (or bz2_d.pyd) is + build bzip2-1.0.5\libbz2.lib by magic before bz2.pyd (or bz2_d.pyd) is linked in PCbuild\. However, the bz2 project is not smart enough to remove anything under - bzip2-1.0.6\ when you do a clean, so if you want to rebuild bzip2.lib - you need to clean up bzip2-1.0.6\ by hand. + bzip2-1.0.5\ when you do a clean, so if you want to rebuild bzip2.lib + you need to clean up bzip2-1.0.5\ by hand. All of this managed to build libbz2.lib in - bzip2-1.0.6\$platform-$configuration\, which the Python project links in. + bzip2-1.0.5\$platform-$configuration\, which the Python project links in. _lzma Python wrapper for the liblzma compression library. @@ -142,7 +148,7 @@ Get the source code through - svn export http://svn.python.org/projects/external/openssl-1.0.1c + svn export http://svn.python.org/projects/external/openssl-1.0.0a ** NOTE: if you use the Tools\buildbot\external(-amd64).bat approach for obtaining external sources then you don't need to manually get the source diff -r 0be296605165 -r faa88c50a3d2 PCbuild/release.props --- a/PCbuild/release.props Wed May 23 22:26:55 2012 +0200 +++ b/PCbuild/release.props Wed May 23 21:09:05 2012 +0200 @@ -1,7 +1,7 @@  - + $(OutDir)kill_python.exe <_ProjectFileVersion>10.0.30319.1 @@ -12,8 +12,8 @@ - - $(PyDebugExt) + + $(KillPythonExe) \ No newline at end of file diff -r 0be296605165 -r faa88c50a3d2 PCbuild/select.vcxproj --- a/PCbuild/select.vcxproj Wed May 23 22:26:55 2012 +0200 +++ b/PCbuild/select.vcxproj Wed May 23 21:09:05 2012 +0200 @@ -148,6 +148,12 @@ AllRules.ruleset + .pyd + .pyd + .pyd + .pyd + .pyd + .pyd diff -r 0be296605165 -r faa88c50a3d2 PCbuild/sqlite3.vcxproj --- a/PCbuild/sqlite3.vcxproj Wed May 23 22:26:55 2012 +0200 +++ b/PCbuild/sqlite3.vcxproj Wed May 23 21:09:05 2012 +0200 @@ -150,6 +150,9 @@ AllRules.ruleset + .dll + .dll + .dll diff -r 0be296605165 -r faa88c50a3d2 PCbuild/unicodedata.vcxproj --- a/PCbuild/unicodedata.vcxproj Wed May 23 22:26:55 2012 +0200 +++ b/PCbuild/unicodedata.vcxproj Wed May 23 21:09:05 2012 +0200 @@ -148,6 +148,12 @@ AllRules.ruleset + .pyd + .pyd + .pyd + .pyd + .pyd + .pyd diff -r 0be296605165 -r faa88c50a3d2 PCbuild/vs9to10.py --- a/PCbuild/vs9to10.py Wed May 23 22:26:55 2012 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,56 +0,0 @@ -#Run this file after automatic convertsion of the VisualStudio 2008 solution by VisualStudio 2010. -#This can be done whenever the 2008 solution changes. -#It will make the necessary cleanup and updates to the vcxproj files -#the .props files need to be maintained by hand if the .vsprops files change - -from __future__ import with_statement -import sys -import os -import os.path - -def vs9to10(src, dest): - for name in os.listdir(src): - path, ext = os.path.splitext(name) - if ext.lower() not in ('.vcxproj',): - continue - - filename = os.path.normpath(os.path.join(src, name)) - destname = os.path.normpath(os.path.join(dest, name)) - print("%s -> %s" % (filename, destname)) - - lines = [] - lastline = b"" - importgroup = False - with open(filename, 'rb') as fin: - for line in fin: - #remove redundant linker output info - if b"" in line: - continue - if b"" in line: - continue - if b"" in line and b"" in line: - continue - - #add new property sheet to the pythoncore - if importgroup and "pythoncore" in name.lower(): - if b"" in line: - if b"debug.props" in lastline: - lines.append(b' \r\n') - elif b"pythoncore" not in lastline: - lines.append(b' \r\n') - if b"" in line: - importgroup = False - lines.append(line) - lastline = line - with open(destname, 'wb') as fout: - for line in lines: - fout.write(line) - -if __name__ == "__main__": - src = "." if len(sys.argv) < 2 else sys.argv[1] - name = os.path.basename(os.path.abspath(src)) - dest = os.path.abspath(os.path.join(src, "..", name + "Upd")) - os.makedirs(dest) - vs9to10(src, dest) diff -r 0be296605165 -r faa88c50a3d2 PCbuild/w9xpopen.vcxproj --- a/PCbuild/w9xpopen.vcxproj Wed May 23 22:26:55 2012 +0200 +++ b/PCbuild/w9xpopen.vcxproj Wed May 23 21:09:05 2012 +0200 @@ -84,51 +84,27 @@ - - - - - - - - - - - - - - - - - - - - - - - - diff -r 0be296605165 -r faa88c50a3d2 PCbuild/winsound.vcxproj --- a/PCbuild/winsound.vcxproj Wed May 23 22:26:55 2012 +0200 +++ b/PCbuild/winsound.vcxproj Wed May 23 21:09:05 2012 +0200 @@ -148,6 +148,12 @@ AllRules.ruleset + .pyd + .pyd + .pyd + .pyd + .pyd + .pyd diff -r 0be296605165 -r faa88c50a3d2 PCbuild/x64.props --- a/PCbuild/x64.props Wed May 23 22:26:55 2012 +0200 +++ b/PCbuild/x64.props Wed May 23 21:09:05 2012 +0200 @@ -6,7 +6,7 @@ <_ProjectFileVersion>10.0.30319.1 <_PropertySheetDisplayName>amd64 - $(SolutionDir)amd64\ + $(SolutionDir)\amd64\ $(SolutionDir)$(PlatformName)-temp-$(Configuration)\$(ProjectName)\ diff -r 0be296605165 -r faa88c50a3d2 PCbuild/xxlimited.vcxproj --- a/PCbuild/xxlimited.vcxproj Wed May 23 22:26:55 2012 +0200 +++ b/PCbuild/xxlimited.vcxproj Wed May 23 21:09:05 2012 +0200 @@ -117,6 +117,12 @@ AllRules.ruleset + .pyd + .pyd + $(SolutionDir)$(PlatformName)-pgo\ + $(SolutionDir)$(PlatformName)-temp-pgi\$(ProjectName)\ + .pyd + .pyd @@ -126,6 +132,7 @@ wsock32.lib;%(AdditionalDependencies) libc;%(IgnoreSpecificDefaultLibraries) 0x1D110000 + $(OutDir)$(ProjectName).pyd @@ -133,7 +140,7 @@ X64 - wsock32.lib;%(AdditionalDependencies) + wsock32.lib;$(SolutionDir)\$(PlatformShortName)\python33.lib;%(AdditionalDependencies) libc;%(IgnoreSpecificDefaultLibraries) 0x1D110000 @@ -146,6 +153,7 @@ wsock32.lib;%(AdditionalDependencies) libc;%(IgnoreSpecificDefaultLibraries) 0x1D110000 + $(OutDirPGI)$(ProjectName).pyd @@ -153,10 +161,11 @@ X64 - wsock32.lib;%(AdditionalDependencies) + wsock32.lib;$(OutDir)python33.lib;%(AdditionalDependencies) libc;%(IgnoreSpecificDefaultLibraries) 0x1D110000 MachineX64 + $(OutDirPGI)$(ProjectName).pyd @@ -183,11 +192,6 @@ - - - {cf7ac3d1-e2df-41d2-bea6-1e2556cdea26} - - diff -r 0be296605165 -r faa88c50a3d2 Parser/asdl_c.py --- a/Parser/asdl_c.py Wed May 23 22:26:55 2012 +0200 +++ b/Parser/asdl_c.py Wed May 23 21:09:05 2012 +0200 @@ -784,7 +784,7 @@ static PyObject* ast2obj_list(asdl_seq *seq, PyObject* (*func)(void*)) { - Py_ssize_t i, n = asdl_seq_LEN(seq); + int i, n = asdl_seq_LEN(seq); PyObject *result = PyList_New(n); PyObject *value; if (!result) @@ -1106,7 +1106,7 @@ # While the sequence elements are stored as void*, # ast2obj_cmpop expects an enum self.emit("{", depth) - self.emit("Py_ssize_t i, n = asdl_seq_LEN(%s);" % value, depth+1) + self.emit("int i, n = asdl_seq_LEN(%s);" % value, depth+1) self.emit("value = PyList_New(n);", depth+1) self.emit("if (!value) goto failed;", depth+1) self.emit("for(i = 0; i < n; i++)", depth+1) diff -r 0be296605165 -r faa88c50a3d2 Python/Python-ast.c --- a/Python/Python-ast.c Wed May 23 22:26:55 2012 +0200 +++ b/Python/Python-ast.c Wed May 23 21:09:05 2012 +0200 @@ -636,7 +636,7 @@ static PyObject* ast2obj_list(asdl_seq *seq, PyObject* (*func)(void*)) { - Py_ssize_t i, n = asdl_seq_LEN(seq); + int i, n = asdl_seq_LEN(seq); PyObject *result = PyList_New(n); PyObject *value; if (!result) @@ -2857,7 +2857,7 @@ goto failed; Py_DECREF(value); { - Py_ssize_t i, n = asdl_seq_LEN(o->v.Compare.ops); + int i, n = asdl_seq_LEN(o->v.Compare.ops); value = PyList_New(n); if (!value) goto failed; for(i = 0; i < n; i++) diff -r 0be296605165 -r faa88c50a3d2 Python/asdl.c --- a/Python/asdl.c Wed May 23 22:26:55 2012 +0200 +++ b/Python/asdl.c Wed May 23 21:09:05 2012 +0200 @@ -2,7 +2,7 @@ #include "asdl.h" asdl_seq * -asdl_seq_new(Py_ssize_t size, PyArena *arena) +asdl_seq_new(int size, PyArena *arena) { asdl_seq *seq = NULL; size_t n = (size ? (sizeof(void *) * (size - 1)) : 0); @@ -33,7 +33,7 @@ } asdl_int_seq * -asdl_int_seq_new(Py_ssize_t size, PyArena *arena) +asdl_int_seq_new(int size, PyArena *arena) { asdl_int_seq *seq = NULL; size_t n = (size ? (sizeof(void *) * (size - 1)) : 0); diff -r 0be296605165 -r faa88c50a3d2 Python/ceval.c --- a/Python/ceval.c Wed May 23 22:26:55 2012 +0200 +++ b/Python/ceval.c Wed May 23 21:09:05 2012 +0200 @@ -3572,26 +3572,23 @@ if (cause) { PyObject *fixed_cause; + int result; if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto raise_error; - Py_DECREF(cause); + Py_CLEAR(cause); + } else { + /* Let "exc.__cause__ = cause" handle all further checks */ + fixed_cause = cause; + cause = NULL; /* Steal the reference */ } - else if (PyExceptionInstance_Check(cause)) { - fixed_cause = cause; - } - else if (cause == Py_None) { - Py_DECREF(cause); - fixed_cause = NULL; - } - else { - PyErr_SetString(PyExc_TypeError, - "exception causes must derive from " - "BaseException"); + /* We retain ownership of the reference to fixed_cause */ + result = _PyException_SetCauseChecked(value, fixed_cause); + Py_DECREF(fixed_cause); + if (result < 0) { goto raise_error; } - PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); diff -r 0be296605165 -r faa88c50a3d2 Python/formatter_unicode.c --- a/Python/formatter_unicode.c Wed May 23 22:26:55 2012 +0200 +++ b/Python/formatter_unicode.c Wed May 23 21:09:05 2012 +0200 @@ -316,21 +316,32 @@ /* Do the padding, and return a pointer to where the caller-supplied content goes. */ static Py_ssize_t -fill_padding(PyObject *s, Py_ssize_t start, Py_ssize_t nchars, +fill_padding(_PyUnicodeWriter *writer, + Py_ssize_t nchars, Py_UCS4 fill_char, Py_ssize_t n_lpadding, Py_ssize_t n_rpadding) { + Py_ssize_t pos, r; + /* Pad on left. */ - if (n_lpadding) - PyUnicode_Fill(s, start, start + n_lpadding, fill_char); + if (n_lpadding) { + pos = writer->pos; + r = PyUnicode_Fill(writer->buffer, pos, pos + n_lpadding, fill_char); + if (r == -1) + return -1; + } /* Pad on right. */ - if (n_rpadding) - PyUnicode_Fill(s, start + nchars + n_lpadding, - start + nchars + n_lpadding + n_rpadding, fill_char); + if (n_rpadding) { + pos = writer->pos + nchars + n_lpadding; + r = PyUnicode_Fill(writer->buffer, pos, pos + n_rpadding, fill_char); + if (r == -1) + return -1; + } /* Pointer to the user content. */ - return start + n_lpadding; + writer->pos += n_lpadding; + return 0; } /************************************************************************/ @@ -541,7 +552,7 @@ as determined in calc_number_widths(). Return -1 on error, or 0 on success. */ static int -fill_number(PyObject *out, Py_ssize_t pos, const NumberFieldWidths *spec, +fill_number(_PyUnicodeWriter *writer, const NumberFieldWidths *spec, PyObject *digits, Py_ssize_t d_start, Py_ssize_t d_end, PyObject *prefix, Py_ssize_t p_start, Py_UCS4 fill_char, @@ -549,36 +560,36 @@ { /* Used to keep track of digits, decimal, and remainder. */ Py_ssize_t d_pos = d_start; - unsigned int kind = PyUnicode_KIND(out); - void *data = PyUnicode_DATA(out); + const enum PyUnicode_Kind kind = writer->kind; + const void *data = writer->data; Py_ssize_t r; if (spec->n_lpadding) { - PyUnicode_Fill(out, pos, pos + spec->n_lpadding, fill_char); - pos += spec->n_lpadding; + PyUnicode_Fill(writer->buffer, writer->pos, spec->n_lpadding, fill_char); + writer->pos += spec->n_lpadding; } if (spec->n_sign == 1) { - PyUnicode_WRITE(kind, data, pos++, spec->sign); + PyUnicode_WRITE(kind, data, writer->pos, spec->sign); + writer->pos++; } if (spec->n_prefix) { - if (PyUnicode_CopyCharacters(out, pos, - prefix, p_start, - spec->n_prefix) < 0) - return -1; + _PyUnicode_FastCopyCharacters(writer->buffer, writer->pos, + prefix, p_start, + spec->n_prefix); if (toupper) { Py_ssize_t t; for (t = 0; t < spec->n_prefix; t++) { - Py_UCS4 c = PyUnicode_READ(kind, data, pos + t); + Py_UCS4 c = PyUnicode_READ(kind, data, writer->pos + t); c = Py_TOUPPER(c); assert (c <= 127); - PyUnicode_WRITE(kind, data, pos + t, c); + PyUnicode_WRITE(kind, data, writer->pos + t, c); } } - pos += spec->n_prefix; + writer->pos += spec->n_prefix; } if (spec->n_spadding) { - PyUnicode_Fill(out, pos, pos + spec->n_spadding, fill_char); - pos += spec->n_spadding; + PyUnicode_Fill(writer->buffer, writer->pos, spec->n_spadding, fill_char); + writer->pos += spec->n_spadding; } /* Only for type 'c' special case, it has no digits. */ @@ -594,7 +605,7 @@ return -1; } r = _PyUnicode_InsertThousandsGrouping( - out, pos, + writer->buffer, writer->pos, spec->n_grouped_digits, pdigits + kind * d_pos, spec->n_digits, spec->n_min_width, @@ -609,34 +620,32 @@ if (toupper) { Py_ssize_t t; for (t = 0; t < spec->n_grouped_digits; t++) { - Py_UCS4 c = PyUnicode_READ(kind, data, pos + t); + Py_UCS4 c = PyUnicode_READ(kind, data, writer->pos + t); c = Py_TOUPPER(c); if (c > 127) { PyErr_SetString(PyExc_SystemError, "non-ascii grouped digit"); return -1; } - PyUnicode_WRITE(kind, data, pos + t, c); + PyUnicode_WRITE(kind, data, writer->pos + t, c); } } - pos += spec->n_grouped_digits; + writer->pos += spec->n_grouped_digits; if (spec->n_decimal) { - if (PyUnicode_CopyCharacters(out, pos, locale->decimal_point, 0, spec->n_decimal) < 0) - return -1; - pos += spec->n_decimal; + _PyUnicode_FastCopyCharacters(writer->buffer, writer->pos, locale->decimal_point, 0, spec->n_decimal); + writer->pos += spec->n_decimal; d_pos += 1; } if (spec->n_remainder) { - if (PyUnicode_CopyCharacters(out, pos, digits, d_pos, spec->n_remainder) < 0) - return -1; - pos += spec->n_remainder; + _PyUnicode_FastCopyCharacters(writer->buffer, writer->pos, digits, d_pos, spec->n_remainder); + writer->pos += spec->n_remainder; d_pos += spec->n_remainder; } if (spec->n_rpadding) { - PyUnicode_Fill(out, pos, pos + spec->n_rpadding, fill_char); - pos += spec->n_rpadding; + PyUnicode_Fill(writer->buffer, writer->pos, writer->pos + spec->n_rpadding, fill_char); + writer->pos += spec->n_rpadding; } return 0; } @@ -707,17 +716,20 @@ /*********** string formatting ******************************************/ /************************************************************************/ -static PyObject * -format_string_internal(PyObject *value, const InternalFormatSpec *format) +static int +format_string_internal(PyObject *value, const InternalFormatSpec *format, + _PyUnicodeWriter *writer) { Py_ssize_t lpad; Py_ssize_t rpad; Py_ssize_t total; - Py_ssize_t pos; - Py_ssize_t len = PyUnicode_GET_LENGTH(value); - PyObject *result = NULL; + Py_ssize_t len; + int result = -1; Py_UCS4 maxchar; + assert(PyUnicode_IS_READY(value)); + len = PyUnicode_GET_LENGTH(value); + /* sign is not allowed on strings */ if (format->sign != '\0') { PyErr_SetString(PyExc_ValueError, @@ -741,6 +753,11 @@ goto done; } + if (format->width == -1 && format->precision == -1) { + /* Fast path */ + return _PyUnicodeWriter_WriteStr(writer, value); + } + /* if precision is specified, output no more that format.precision characters */ if (format->precision >= 0 && len >= format->precision) { @@ -754,21 +771,23 @@ maxchar = Py_MAX(maxchar, format->fill_char); /* allocate the resulting string */ - result = PyUnicode_New(total, maxchar); - if (result == NULL) + if (_PyUnicodeWriter_Prepare(writer, total, maxchar) == -1) goto done; /* Write into that space. First the padding. */ - pos = fill_padding(result, 0, len, - format->fill_char=='\0'?' ':format->fill_char, - lpad, rpad); + result = fill_padding(writer, len, + format->fill_char=='\0'?' ':format->fill_char, + lpad, rpad); + if (result == -1) + goto done; /* Then the source string. */ - if (PyUnicode_CopyCharacters(result, pos, value, 0, len) < 0) - Py_CLEAR(result); + _PyUnicode_FastCopyCharacters(writer->buffer, writer->pos, + value, 0, len); + writer->pos += (len + rpad); + result = 0; done: - assert(!result || _PyUnicode_CheckConsistency(result, 1)); return result; } @@ -780,11 +799,11 @@ typedef PyObject* (*IntOrLongToString)(PyObject *value, int base); -static PyObject * -format_int_or_long_internal(PyObject *value, const InternalFormatSpec *format, - IntOrLongToString tostring) +static int +format_long_internal(PyObject *value, const InternalFormatSpec *format, + _PyUnicodeWriter *writer) { - PyObject *result = NULL; + int result = -1; Py_UCS4 maxchar = 127; PyObject *tmp = NULL; Py_ssize_t inumeric_chars; @@ -798,7 +817,6 @@ Py_ssize_t prefix = 0; NumberFieldWidths spec; long x; - int err; /* Locale settings, either from the actual locale or from a hard-code pseudo-locale */ @@ -872,13 +890,23 @@ break; } + if (format->sign != '+' && format->sign != ' ' + && format->width == -1 + && format->type != 'X' && format->type != 'n' + && !format->thousands_separators + && PyLong_CheckExact(value)) + { + /* Fast path */ + return _PyLong_FormatWriter(value, base, format->alternate, writer); + } + /* The number of prefix chars is the same as the leading chars to skip */ if (format->alternate) n_prefix = leading_chars_to_skip; /* Do the hard part, converting to a string in a given base */ - tmp = tostring(value, base); + tmp = _PyLong_Format(value, base); if (tmp == NULL || PyUnicode_READY(tmp) == -1) goto done; @@ -914,23 +942,19 @@ &locale, format, &maxchar); /* Allocate the memory. */ - result = PyUnicode_New(n_total, maxchar); - if (!result) + if (_PyUnicodeWriter_Prepare(writer, n_total, maxchar) == -1) goto done; /* Populate the memory. */ - err = fill_number(result, 0, &spec, - tmp, inumeric_chars, inumeric_chars + n_digits, - tmp, prefix, - format->fill_char == '\0' ? ' ' : format->fill_char, - &locale, format->type == 'X'); - if (err) - Py_CLEAR(result); + result = fill_number(writer, &spec, + tmp, inumeric_chars, inumeric_chars + n_digits, + tmp, prefix, + format->fill_char == '\0' ? ' ' : format->fill_char, + &locale, format->type == 'X'); done: Py_XDECREF(tmp); free_locale_info(&locale); - assert(!result || _PyUnicode_CheckConsistency(result, 1)); return result; } @@ -945,9 +969,10 @@ } /* much of this is taken from unicodeobject.c */ -static PyObject * +static int format_float_internal(PyObject *value, - const InternalFormatSpec *format) + const InternalFormatSpec *format, + _PyUnicodeWriter *writer) { char *buf = NULL; /* buffer returned from PyOS_double_to_string */ Py_ssize_t n_digits; @@ -962,12 +987,11 @@ Py_ssize_t index; NumberFieldWidths spec; int flags = 0; - PyObject *result = NULL; + int result = -1; Py_UCS4 maxchar = 127; Py_UCS4 sign_char = '\0'; int float_type; /* Used to see if we have a nan, inf, or regular float. */ PyObject *unicode_tmp = NULL; - int err; /* Locale settings, either from the actual locale or from a hard-code pseudo-locale */ @@ -1025,12 +1049,24 @@ /* Since there is no unicode version of PyOS_double_to_string, just use the 8 bit version and then convert to unicode. */ unicode_tmp = strtounicode(buf, n_digits); + PyMem_Free(buf); if (unicode_tmp == NULL) goto done; - index = 0; + + if (format->sign != '+' && format->sign != ' ' + && format->width == -1 + && format->type != 'n' + && !format->thousands_separators) + { + /* Fast path */ + result = _PyUnicodeWriter_WriteStr(writer, unicode_tmp); + Py_DECREF(unicode_tmp); + return result; + } /* Is a sign character present in the output? If so, remember it and skip it */ + index = 0; if (PyUnicode_READ_CHAR(unicode_tmp, index) == '-') { sign_char = '-'; ++index; @@ -1055,24 +1091,19 @@ &locale, format, &maxchar); /* Allocate the memory. */ - result = PyUnicode_New(n_total, maxchar); - if (result == NULL) + if (_PyUnicodeWriter_Prepare(writer, n_total, maxchar) == -1) goto done; /* Populate the memory. */ - err = fill_number(result, 0, &spec, - unicode_tmp, index, index + n_digits, - NULL, 0, - format->fill_char == '\0' ? ' ' : format->fill_char, - &locale, 0); - if (err) - Py_CLEAR(result); + result = fill_number(writer, &spec, + unicode_tmp, index, index + n_digits, + NULL, 0, + format->fill_char == '\0' ? ' ' : format->fill_char, + &locale, 0); done: - PyMem_Free(buf); Py_DECREF(unicode_tmp); free_locale_info(&locale); - assert(!result || _PyUnicode_CheckConsistency(result, 1)); return result; } @@ -1080,9 +1111,10 @@ /*********** complex formatting *****************************************/ /************************************************************************/ -static PyObject * +static int format_complex_internal(PyObject *value, - const InternalFormatSpec *format) + const InternalFormatSpec *format, + _PyUnicodeWriter *writer) { double re; double im; @@ -1106,11 +1138,10 @@ NumberFieldWidths re_spec; NumberFieldWidths im_spec; int flags = 0; - PyObject *result = NULL; + int result = -1; Py_UCS4 maxchar = 127; - int rkind; + enum PyUnicode_Kind rkind; void *rdata; - Py_ssize_t index; Py_UCS4 re_sign_char = '\0'; Py_UCS4 im_sign_char = '\0'; int re_float_type; /* Used to see if we have a nan, inf, or regular float. */ @@ -1122,7 +1153,6 @@ Py_ssize_t total; PyObject *re_unicode_tmp = NULL; PyObject *im_unicode_tmp = NULL; - int err; /* Locale settings, either from the actual locale or from a hard-code pseudo-locale */ @@ -1261,47 +1291,49 @@ if (lpad || rpad) maxchar = Py_MAX(maxchar, format->fill_char); - result = PyUnicode_New(total, maxchar); - if (result == NULL) + if (_PyUnicodeWriter_Prepare(writer, total, maxchar) == -1) goto done; - rkind = PyUnicode_KIND(result); - rdata = PyUnicode_DATA(result); + rkind = writer->kind; + rdata = writer->data; /* Populate the memory. First, the padding. */ - index = fill_padding(result, 0, - n_re_total + n_im_total + 1 + add_parens * 2, - format->fill_char=='\0' ? ' ' : format->fill_char, - lpad, rpad); + result = fill_padding(writer, + n_re_total + n_im_total + 1 + add_parens * 2, + format->fill_char=='\0' ? ' ' : format->fill_char, + lpad, rpad); + if (result == -1) + goto done; - if (add_parens) - PyUnicode_WRITE(rkind, rdata, index++, '('); + if (add_parens) { + PyUnicode_WRITE(rkind, rdata, writer->pos, '('); + writer->pos++; + } if (!skip_re) { - err = fill_number(result, index, &re_spec, - re_unicode_tmp, i_re, i_re + n_re_digits, - NULL, 0, - 0, - &locale, 0); - if (err) { - Py_CLEAR(result); + result = fill_number(writer, &re_spec, + re_unicode_tmp, i_re, i_re + n_re_digits, + NULL, 0, + 0, + &locale, 0); + if (result == -1) goto done; - } - index += n_re_total; } - err = fill_number(result, index, &im_spec, - im_unicode_tmp, i_im, i_im + n_im_digits, - NULL, 0, - 0, - &locale, 0); - if (err) { - Py_CLEAR(result); + result = fill_number(writer, &im_spec, + im_unicode_tmp, i_im, i_im + n_im_digits, + NULL, 0, + 0, + &locale, 0); + if (result == -1) goto done; + PyUnicode_WRITE(rkind, rdata, writer->pos, 'j'); + writer->pos++; + + if (add_parens) { + PyUnicode_WRITE(rkind, rdata, writer->pos, ')'); + writer->pos++; } - index += n_im_total; - PyUnicode_WRITE(rkind, rdata, index++, 'j'); - if (add_parens) - PyUnicode_WRITE(rkind, rdata, index++, ')'); + writer->pos += rpad; done: PyMem_Free(re_buf); @@ -1309,61 +1341,79 @@ Py_XDECREF(re_unicode_tmp); Py_XDECREF(im_unicode_tmp); free_locale_info(&locale); - assert(!result || _PyUnicode_CheckConsistency(result, 1)); return result; } /************************************************************************/ /*********** built in formatters ****************************************/ /************************************************************************/ -PyObject * -_PyUnicode_FormatAdvanced(PyObject *obj, - PyObject *format_spec, - Py_ssize_t start, Py_ssize_t end) +int +format_obj(PyObject *obj, _PyUnicodeWriter *writer) +{ + PyObject *str; + int err; + + str = PyObject_Str(obj); + if (str == NULL) + return -1; + err = _PyUnicodeWriter_WriteStr(writer, str); + Py_DECREF(str); + return err; +} + +int +_PyUnicode_FormatAdvancedWriter(PyObject *obj, + PyObject *format_spec, + Py_ssize_t start, Py_ssize_t end, + _PyUnicodeWriter *writer) { InternalFormatSpec format; - PyObject *result; + + assert(PyUnicode_Check(obj)); /* check for the special case of zero length format spec, make it equivalent to str(obj) */ - if (start == end) - return PyObject_Str(obj); + if (start == end) { + if (PyUnicode_CheckExact(obj)) + return _PyUnicodeWriter_WriteStr(writer, obj); + else + return format_obj(obj, writer); + } /* parse the format_spec */ if (!parse_internal_render_format_spec(format_spec, start, end, &format, 's', '<')) - return NULL; + return -1; /* type conversion? */ switch (format.type) { case 's': /* no type conversion needed, already a string. do the formatting */ - result = format_string_internal(obj, &format); - if (result != NULL) - assert(_PyUnicode_CheckConsistency(result, 1)); - break; + return format_string_internal(obj, &format, writer); default: /* unknown */ unknown_presentation_type(format.type, obj->ob_type->tp_name); - result = NULL; + return -1; } - return result; } -static PyObject* -format_int_or_long(PyObject* obj, PyObject* format_spec, - Py_ssize_t start, Py_ssize_t end, - IntOrLongToString tostring) +int +_PyLong_FormatAdvancedWriter(PyObject *obj, + PyObject *format_spec, + Py_ssize_t start, Py_ssize_t end, + _PyUnicodeWriter *writer) { - PyObject *result = NULL; - PyObject *tmp = NULL; + PyObject *tmp = NULL, *str = NULL; InternalFormatSpec format; + int result = -1; /* check for the special case of zero length format spec, make it equivalent to str(obj) */ if (start == end) { - result = PyObject_Str(obj); - goto done; + if (PyLong_CheckExact(obj)) + return _PyLong_FormatWriter(obj, 10, 0, writer); + else + return format_obj(obj, writer); } /* parse the format_spec */ @@ -1382,7 +1432,7 @@ case 'n': /* no type conversion needed, already an int (or long). do the formatting */ - result = format_int_or_long_internal(obj, &format, tostring); + result = format_long_internal(obj, &format, writer); break; case 'e': @@ -1396,7 +1446,7 @@ tmp = PyNumber_Float(obj); if (tmp == NULL) goto done; - result = format_float_internal(tmp, &format); + result = format_float_internal(tmp, &format, writer); break; default: @@ -1407,41 +1457,27 @@ done: Py_XDECREF(tmp); + Py_XDECREF(str); return result; } -/* Need to define long_format as a function that will convert a long - to a string. In 3.0, _PyLong_Format has the correct signature. */ -#define long_format _PyLong_Format - -PyObject * -_PyLong_FormatAdvanced(PyObject *obj, - PyObject *format_spec, - Py_ssize_t start, Py_ssize_t end) +int +_PyFloat_FormatAdvancedWriter(PyObject *obj, + PyObject *format_spec, + Py_ssize_t start, Py_ssize_t end, + _PyUnicodeWriter *writer) { - return format_int_or_long(obj, format_spec, start, end, - long_format); -} - -PyObject * -_PyFloat_FormatAdvanced(PyObject *obj, - PyObject *format_spec, - Py_ssize_t start, Py_ssize_t end) -{ - PyObject *result = NULL; InternalFormatSpec format; /* check for the special case of zero length format spec, make it equivalent to str(obj) */ - if (start == end) { - result = PyObject_Str(obj); - goto done; - } + if (start == end) + return format_obj(obj, writer); /* parse the format_spec */ if (!parse_internal_render_format_spec(format_spec, start, end, &format, '\0', '>')) - goto done; + return -1; /* type conversion? */ switch (format.type) { @@ -1455,38 +1491,32 @@ case 'n': case '%': /* no conversion, already a float. do the formatting */ - result = format_float_internal(obj, &format); - break; + return format_float_internal(obj, &format, writer); default: /* unknown */ unknown_presentation_type(format.type, obj->ob_type->tp_name); - goto done; + return -1; } - -done: - return result; } -PyObject * -_PyComplex_FormatAdvanced(PyObject *obj, - PyObject *format_spec, - Py_ssize_t start, Py_ssize_t end) +int +_PyComplex_FormatAdvancedWriter(PyObject *obj, + PyObject *format_spec, + Py_ssize_t start, Py_ssize_t end, + _PyUnicodeWriter *writer) { - PyObject *result = NULL; InternalFormatSpec format; /* check for the special case of zero length format spec, make it equivalent to str(obj) */ - if (start == end) { - result = PyObject_Str(obj); - goto done; - } + if (start == end) + return format_obj(obj, writer); /* parse the format_spec */ if (!parse_internal_render_format_spec(format_spec, start, end, &format, '\0', '>')) - goto done; + return -1; /* type conversion? */ switch (format.type) { @@ -1499,15 +1529,11 @@ case 'G': case 'n': /* no conversion, already a complex. do the formatting */ - result = format_complex_internal(obj, &format); - break; + return format_complex_internal(obj, &format, writer); default: /* unknown */ unknown_presentation_type(format.type, obj->ob_type->tp_name); - goto done; + return -1; } - -done: - return result; } diff -r 0be296605165 -r faa88c50a3d2 Python/freeze_importlib.py --- a/Python/freeze_importlib.py Wed May 23 22:26:55 2012 +0200 +++ b/Python/freeze_importlib.py Wed May 23 21:09:05 2012 +0200 @@ -25,8 +25,6 @@ with open(output_path, 'w', encoding='utf-8') as output_file: output_file.write('\n'.join(lines)) output_file.write('/* Mercurial binary marker: \x00 */') - # Avoid a compiler warning for lack of EOL - output_file.write('\n') if __name__ == '__main__': diff -r 0be296605165 -r faa88c50a3d2 Python/getargs.c --- a/Python/getargs.c Wed May 23 22:26:55 2012 +0200 +++ b/Python/getargs.c Wed May 23 21:09:05 2012 +0200 @@ -1167,8 +1167,11 @@ case 'U': { /* PyUnicode object */ PyObject **p = va_arg(*p_va, PyObject **); - if (PyUnicode_Check(arg)) + if (PyUnicode_Check(arg)) { + if (PyUnicode_READY(arg) == -1) + RETURN_ERR_OCCURRED; *p = arg; + } else return converterr("str", arg, msgbuf, bufsize); break; diff -r 0be296605165 -r faa88c50a3d2 Python/import.c --- a/Python/import.c Wed May 23 22:26:55 2012 +0200 +++ b/Python/import.c Wed May 23 21:09:05 2012 +0200 @@ -1370,7 +1370,47 @@ PyObject * PyImport_ImportModuleNoBlock(const char *name) { - return PyImport_ImportModule(name); + PyObject *nameobj, *modules, *result; +#ifdef WITH_THREAD + long me; +#endif + + /* Try to get the module from sys.modules[name] */ + modules = PyImport_GetModuleDict(); + if (modules == NULL) + return NULL; + + nameobj = PyUnicode_FromString(name); + if (nameobj == NULL) + return NULL; + result = PyDict_GetItem(modules, nameobj); + if (result != NULL) { + Py_DECREF(nameobj); + Py_INCREF(result); + return result; + } + PyErr_Clear(); +#ifdef WITH_THREAD + /* check the import lock + * me might be -1 but I ignore the error here, the lock function + * takes care of the problem */ + me = PyThread_get_thread_ident(); + if (import_lock_thread == -1 || import_lock_thread == me) { + /* no thread or me is holding the lock */ + result = PyImport_Import(nameobj); + } + else { + PyErr_Format(PyExc_ImportError, + "Failed to import %R because the import lock" + "is held by another thread.", + nameobj); + result = NULL; + } +#else + result = PyImport_Import(nameobj); +#endif + Py_DECREF(nameobj); + return result; } @@ -1380,13 +1420,11 @@ int level) { _Py_IDENTIFIER(__import__); - _Py_IDENTIFIER(__initializing__); _Py_IDENTIFIER(__package__); _Py_IDENTIFIER(__path__); _Py_IDENTIFIER(__name__); _Py_IDENTIFIER(_find_and_load); _Py_IDENTIFIER(_handle_fromlist); - _Py_IDENTIFIER(_lock_unlock_module); _Py_static_string(single_dot, "."); PyObject *abs_name = NULL; PyObject *builtins_import = NULL; @@ -1569,48 +1607,16 @@ goto error_with_unlock; } else if (mod != NULL) { - PyObject *value; - int initializing = 0; - Py_INCREF(mod); - /* Only call _bootstrap._lock_unlock_module() if __initializing__ is true. */ - value = _PyObject_GetAttrId(mod, &PyId___initializing__); - if (value == NULL) - PyErr_Clear(); - else { - initializing = PyObject_IsTrue(value); - Py_DECREF(value); - if (initializing == -1) - PyErr_Clear(); - } - if (initializing > 0) { - /* _bootstrap._lock_unlock_module() releases the import lock */ - value = _PyObject_CallMethodObjIdArgs(interp->importlib, - &PyId__lock_unlock_module, abs_name, - NULL); - if (value == NULL) - goto error; - Py_DECREF(value); - } - else { -#ifdef WITH_THREAD - if (_PyImport_ReleaseLock() < 0) { - PyErr_SetString(PyExc_RuntimeError, "not holding the import lock"); - goto error; - } -#endif - } } else { - /* _bootstrap._find_and_load() releases the import lock */ mod = _PyObject_CallMethodObjIdArgs(interp->importlib, &PyId__find_and_load, abs_name, builtins_import, NULL); if (mod == NULL) { - goto error; + goto error_with_unlock; } } - /* From now on we don't hold the import lock anymore. */ if (PyObject_Not(fromlist)) { if (level == 0 || PyUnicode_GET_LENGTH(name) > 0) { @@ -1619,12 +1625,12 @@ PyObject *borrowed_dot = _PyUnicode_FromId(&single_dot); if (borrowed_dot == NULL) { - goto error; + goto error_with_unlock; } partition = PyUnicode_Partition(name, borrowed_dot); if (partition == NULL) { - goto error; + goto error_with_unlock; } if (PyUnicode_GET_LENGTH(PyTuple_GET_ITEM(partition, 1)) == 0) { @@ -1632,7 +1638,7 @@ Py_DECREF(partition); final_mod = mod; Py_INCREF(mod); - goto error; + goto exit_with_unlock; } front = PyTuple_GET_ITEM(partition, 0); @@ -1651,7 +1657,7 @@ abs_name_len - cut_off); Py_DECREF(front); if (to_return == NULL) { - goto error; + goto error_with_unlock; } final_mod = PyDict_GetItem(interp->modules, to_return); @@ -1677,8 +1683,8 @@ fromlist, builtins_import, NULL); } - goto error; + exit_with_unlock: error_with_unlock: #ifdef WITH_THREAD if (_PyImport_ReleaseLock() < 0) { diff -r 0be296605165 -r faa88c50a3d2 Python/importlib.h Binary file Python/importlib.h has changed diff -r 0be296605165 -r faa88c50a3d2 Python/pythonrun.c --- a/Python/pythonrun.c Wed May 23 22:26:55 2012 +0200 +++ b/Python/pythonrun.c Wed May 23 21:09:05 2012 +0200 @@ -1761,7 +1761,11 @@ else if (PyExceptionInstance_Check(value)) { cause = PyException_GetCause(value); context = PyException_GetContext(value); - if (cause) { + if (cause && cause == Py_None) { + /* print neither cause nor context */ + ; + } + else if (cause) { res = PySet_Contains(seen, cause); if (res == -1) PyErr_Clear(); @@ -1772,8 +1776,7 @@ cause_message, f); } } - else if (context && - !((PyBaseExceptionObject *)value)->suppress_context) { + else if (context) { res = PySet_Contains(seen, context); if (res == -1) PyErr_Clear(); diff -r 0be296605165 -r faa88c50a3d2 README --- a/README Wed May 23 22:26:55 2012 +0200 +++ b/README Wed May 23 21:09:05 2012 +0200 @@ -15,8 +15,6 @@ On Unix, Linux, BSD, OSX, and Cygwin: -New text - ./configure make make test diff -r 0be296605165 -r faa88c50a3d2 Tools/buildbot/clean.bat --- a/Tools/buildbot/clean.bat Wed May 23 22:26:55 2012 +0200 +++ b/Tools/buildbot/clean.bat Wed May 23 21:09:05 2012 +0200 @@ -1,5 +1,7 @@ @rem Used by the buildbot "clean" step. call "%VS100COMNTOOLS%vsvars32.bat" +@echo Deleting .pyc/.pyo files ... +del /s Lib\*.pyc Lib\*.pyo @echo Deleting test leftovers ... rmdir /s /q build cd PCbuild diff -r 0be296605165 -r faa88c50a3d2 Tools/buildbot/external-common.bat --- a/Tools/buildbot/external-common.bat Wed May 23 22:26:55 2012 +0200 +++ b/Tools/buildbot/external-common.bat Wed May 23 21:09:05 2012 +0200 @@ -4,7 +4,7 @@ cd .. @rem XXX: If you need to force the buildbots to start from a fresh environment, uncomment @rem the following, check it in, then check it out, comment it out, then check it back in. -@rem if exist bzip2-1.0.6 rd /s/q bzip2-1.0.6 +@rem if exist bzip2-1.0.5 rd /s/q bzip2-1.0.5 @rem if exist tcltk rd /s/q tcltk @rem if exist tcltk64 rd /s/q tcltk64 @rem if exist tcl8.4.12 rd /s/q tcl8.4.12 @@ -14,20 +14,20 @@ @rem if exist tk8.4.16 rd /s/q tk8.4.16 @rem if exist tk-8.4.18.1 rd /s/q tk-8.4.18.1 @rem if exist db-4.4.20 rd /s/q db-4.4.20 -@rem if exist openssl-1.0.1c rd /s/q openssl-1.0.1c -@rem if exist sqlite-3.7.12 rd /s/q sqlite-3.7.12 +@rem if exist openssl-1.0.0a rd /s/q openssl-1.0.0a +@rem if exist sqlite-3.7.4 rd /s/q sqlite-3.7.4 @rem bzip -if not exist bzip2-1.0.6 ( - rd /s/q bzip2-1.0.5 - svn export http://svn.python.org/projects/external/bzip2-1.0.6 +if not exist bzip2-1.0.5 ( + rd /s/q bzip2-1.0.3 + svn export http://svn.python.org/projects/external/bzip2-1.0.5 ) +@rem Sleepycat db +if not exist db-4.4.20 svn export http://svn.python.org/projects/external/db-4.4.20-vs9 db-4.4.20 + @rem OpenSSL -if not exist openssl-1.0.1c ( - rd /s/q openssl-1.0.0j - svn export http://svn.python.org/projects/external/openssl-1.0.1c -) +if not exist openssl-1.0.0a svn export http://svn.python.org/projects/external/openssl-1.0.0a @rem tcl/tk if not exist tcl-8.5.11.0 ( @@ -37,9 +37,9 @@ if not exist tk-8.5.11.0 svn export http://svn.python.org/projects/external/tk-8.5.11.0 @rem sqlite3 -if not exist sqlite-3.7.12 ( - rd /s/q sqlite-source-3.7.4 - svn export http://svn.python.org/projects/external/sqlite-3.7.12 +if not exist sqlite-3.7.4 ( + rd /s/q sqlite-source-3.6.21 + svn export http://svn.python.org/projects/external/sqlite-3.7.4 ) @rem lzma diff -r 0be296605165 -r faa88c50a3d2 Tools/scripts/run_tests.py --- a/Tools/scripts/run_tests.py Wed May 23 22:26:55 2012 +0200 +++ b/Tools/scripts/run_tests.py Wed May 23 21:09:05 2012 +0200 @@ -10,10 +10,6 @@ import os import sys import test.support -try: - import threading -except ImportError: - threading = None def is_multiprocess_flag(arg): @@ -38,7 +34,7 @@ ]) if sys.platform == 'win32': args.append('-n') # Silence alerts under Windows - if threading and not any(is_multiprocess_flag(arg) for arg in regrtest_args): + if not any(is_multiprocess_flag(arg) for arg in regrtest_args): args.extend(['-j', '0']) # Use all CPU cores if not any(is_resource_use_flag(arg) for arg in regrtest_args): args.extend(['-u', 'all,-largefile,-audio,-gui']) diff -r 0be296605165 -r faa88c50a3d2 setup.py --- a/setup.py Wed May 23 22:26:55 2012 +0200 +++ b/setup.py Wed May 23 21:09:05 2012 +0200 @@ -749,17 +749,20 @@ openssl_ver) missing.append('_hashlib') - # We always compile these even when OpenSSL is available (issue #14693). - # It's harmless and the object code is tiny (40-50 KB per module, - # only loaded when actually used). - exts.append( Extension('_sha256', ['sha256module.c'], - depends=['hashlib.h']) ) - exts.append( Extension('_sha512', ['sha512module.c'], - depends=['hashlib.h']) ) - exts.append( Extension('_md5', ['md5module.c'], - depends=['hashlib.h']) ) - exts.append( Extension('_sha1', ['sha1module.c'], - depends=['hashlib.h']) ) + min_sha2_openssl_ver = 0x00908000 + if COMPILED_WITH_PYDEBUG or openssl_ver < min_sha2_openssl_ver: + # OpenSSL doesn't do these until 0.9.8 so we'll bring our own hash + exts.append( Extension('_sha256', ['sha256module.c'], + depends=['hashlib.h']) ) + exts.append( Extension('_sha512', ['sha512module.c'], + depends=['hashlib.h']) ) + + if COMPILED_WITH_PYDEBUG or not have_usable_openssl: + # no openssl at all, use our own md5 and sha1 + exts.append( Extension('_md5', ['md5module.c'], + depends=['hashlib.h']) ) + exts.append( Extension('_sha1', ['sha1module.c'], + depends=['hashlib.h']) ) # Modules that provide persistent dictionary-like semantics. You will # probably want to arrange for at least one of them to be available on