diff -r 9877c25d9556 -r b3620777f54c Lib/multiprocessing/__init__.py --- a/Lib/multiprocessing/__init__.py Wed Aug 07 05:54:28 2013 -0700 +++ b/Lib/multiprocessing/__init__.py Wed Aug 07 22:43:26 2013 +0100 @@ -21,6 +21,8 @@ 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event', 'Barrier', 'Queue', 'SimpleQueue', 'JoinableQueue', 'Pool', 'Value', 'Array', 'RawValue', 'RawArray', 'SUBDEBUG', 'SUBWARNING', + 'set_executable', 'set_start_method', 'get_start_method', + 'get_all_start_methods' ] # @@ -30,8 +32,14 @@ import os import sys -from multiprocessing.process import Process, current_process, active_children -from multiprocessing.util import SUBDEBUG, SUBWARNING +from .process import Process, current_process, active_children + +# +# XXX These should not really be documented or public. +# + +SUBDEBUG = 5 +SUBWARNING = 25 # # Alias for main module -- will be reset by bootstrapping child processes @@ -69,7 +77,7 @@ The managers methods such as `Lock()`, `Condition()` and `Queue()` can be used to create shared objects. ''' - from multiprocessing.managers import SyncManager + from .managers import SyncManager m = SyncManager() m.start() return m @@ -78,7 +86,7 @@ ''' Returns two connection object connected by a pipe ''' - from multiprocessing.connection import Pipe + from .connection import Pipe return Pipe(duplex) def cpu_count(): @@ -97,21 +105,21 @@ If so then run code specified by commandline and exit. ''' if sys.platform == 'win32' and getattr(sys, 'frozen', False): - from multiprocessing.forking import freeze_support + from .spawn import freeze_support freeze_support() def get_logger(): ''' Return package logger -- if it does not already exist then it is created ''' - from multiprocessing.util import get_logger + from .util import get_logger return get_logger() def log_to_stderr(level=None): ''' Turn on logging and add a handler which prints to stderr ''' - from multiprocessing.util import log_to_stderr + from .util import log_to_stderr return log_to_stderr(level) def allow_connection_pickling(): @@ -120,7 +128,7 @@ ''' # This is undocumented. In previous versions of multiprocessing # its only effect was to make socket objects inheritable on Windows. - import multiprocessing.connection + from . import connection # # Definitions depending on native semaphores @@ -130,120 +138,137 @@ ''' Returns a non-recursive lock object ''' - from multiprocessing.synchronize import Lock + from .synchronize import Lock return Lock() def RLock(): ''' Returns a recursive lock object ''' - from multiprocessing.synchronize import RLock + from .synchronize import RLock return RLock() def Condition(lock=None): ''' Returns a condition object ''' - from multiprocessing.synchronize import Condition + from .synchronize import Condition return Condition(lock) def Semaphore(value=1): ''' Returns a semaphore object ''' - from multiprocessing.synchronize import Semaphore + from .synchronize import Semaphore return Semaphore(value) def BoundedSemaphore(value=1): ''' Returns a bounded semaphore object ''' - from multiprocessing.synchronize import BoundedSemaphore + from .synchronize import BoundedSemaphore return BoundedSemaphore(value) def Event(): ''' Returns an event object ''' - from multiprocessing.synchronize import Event + from .synchronize import Event return Event() def Barrier(parties, action=None, timeout=None): ''' Returns a barrier object ''' - from multiprocessing.synchronize import Barrier + from .synchronize import Barrier return Barrier(parties, action, timeout) def Queue(maxsize=0): ''' Returns a queue object ''' - from multiprocessing.queues import Queue + from .queues import Queue return Queue(maxsize) def JoinableQueue(maxsize=0): ''' Returns a queue object ''' - from multiprocessing.queues import JoinableQueue + from .queues import JoinableQueue return JoinableQueue(maxsize) def SimpleQueue(): ''' Returns a queue object ''' - from multiprocessing.queues import SimpleQueue + from .queues import SimpleQueue return SimpleQueue() def Pool(processes=None, initializer=None, initargs=(), maxtasksperchild=None): ''' Returns a process pool object ''' - from multiprocessing.pool import Pool + from .pool import Pool return Pool(processes, initializer, initargs, maxtasksperchild) def RawValue(typecode_or_type, *args): ''' Returns a shared object ''' - from multiprocessing.sharedctypes import RawValue + from .sharedctypes import RawValue return RawValue(typecode_or_type, *args) def RawArray(typecode_or_type, size_or_initializer): ''' Returns a shared array ''' - from multiprocessing.sharedctypes import RawArray + from .sharedctypes import RawArray return RawArray(typecode_or_type, size_or_initializer) def Value(typecode_or_type, *args, lock=True): ''' Returns a synchronized shared object ''' - from multiprocessing.sharedctypes import Value + from .sharedctypes import Value return Value(typecode_or_type, *args, lock=lock) def Array(typecode_or_type, size_or_initializer, *, lock=True): ''' Returns a synchronized shared array ''' - from multiprocessing.sharedctypes import Array + from .sharedctypes import Array return Array(typecode_or_type, size_or_initializer, lock=lock) # # # -if sys.platform == 'win32': +def set_executable(executable): + ''' + Sets the path to a python.exe or pythonw.exe binary used to run + child processes on Windows instead of sys.executable. + Useful for people embedding Python. + ''' + from .spawn import set_executable + set_executable(executable) - def set_executable(executable): - ''' - Sets the path to a python.exe or pythonw.exe binary used to run - child processes on Windows instead of sys.executable. - Useful for people embedding Python. - ''' - from multiprocessing.forking import set_executable - set_executable(executable) +def set_start_method(method): + ''' + Set method for starting processes: 'fork', 'spawn' or 'forkserver'. + ''' + from .popen import set_start_method + set_start_method(method) - __all__ += ['set_executable'] +def get_start_method(): + ''' + Get method for starting processes: 'fork', 'spawn' or 'forkserver'. + ''' + from .popen import get_start_method + return get_start_method() + +def get_all_start_methods(): + ''' + Get list of availables start methods, default first. + ''' + from .popen import get_all_start_methods + return get_all_start_methods() diff -r 9877c25d9556 -r b3620777f54c Lib/multiprocessing/connection.py --- a/Lib/multiprocessing/connection.py Wed Aug 07 05:54:28 2013 -0700 +++ b/Lib/multiprocessing/connection.py Wed Aug 07 22:43:26 2013 +0100 @@ -21,9 +21,13 @@ import itertools import _multiprocessing -from multiprocessing import current_process, AuthenticationError, BufferTooShort -from multiprocessing.util import get_temp_dir, Finalize, sub_debug, debug -from multiprocessing.forking import ForkingPickler + +from . import reduction +from . import util + +from . import AuthenticationError, BufferTooShort +from .reduction import ForkingPickler + try: import _winapi from _winapi import WAIT_OBJECT_0, WAIT_TIMEOUT, INFINITE @@ -71,7 +75,7 @@ if family == 'AF_INET': return ('localhost', 0) elif family == 'AF_UNIX': - return tempfile.mktemp(prefix='listener-', dir=get_temp_dir()) + return tempfile.mktemp(prefix='listener-', dir=util.get_temp_dir()) elif family == 'AF_PIPE': return tempfile.mktemp(prefix=r'\\.\pipe\pyc-%d-%d-' % (os.getpid(), next(_mmap_counter))) @@ -577,7 +581,7 @@ self._last_accepted = None if family == 'AF_UNIX': - self._unlink = Finalize( + self._unlink = util.Finalize( self, os.unlink, args=(address,), exitpriority=0 ) else: @@ -625,8 +629,8 @@ self._handle_queue = [self._new_handle(first=True)] self._last_accepted = None - sub_debug('listener created with address=%r', self._address) - self.close = Finalize( + util.sub_debug('listener created with address=%r', self._address) + self.close = util.Finalize( self, PipeListener._finalize_pipe_listener, args=(self._handle_queue, self._address), exitpriority=0 ) @@ -668,7 +672,7 @@ @staticmethod def _finalize_pipe_listener(queue, address): - sub_debug('closing listener with address=%r', address) + util.sub_debug('closing listener with address=%r', address) for handle in queue: _winapi.CloseHandle(handle) @@ -919,15 +923,32 @@ # if sys.platform == 'win32': - from . import reduction - ForkingPickler.register(socket.socket, reduction.reduce_socket) - ForkingPickler.register(Connection, reduction.reduce_connection) - ForkingPickler.register(PipeConnection, reduction.reduce_pipe_connection) + def reduce_connection(conn): + handle = conn.fileno() + with socket.fromfd(handle, socket.AF_INET, socket.SOCK_STREAM) as s: + from . import resource_sharer + ds = resource_sharer.DupSocket(s) + return rebuild_connection, (ds, conn.readable, conn.writable) + def rebuild_connection(ds, readable, writable): + sock = ds.detach() + return Connection(sock.detach(), readable, writable) + reduction.register(Connection, reduce_connection) + + def reduce_pipe_connection(conn): + access = ((_winapi.FILE_GENERIC_READ if conn.readable else 0) | + (_winapi.FILE_GENERIC_WRITE if conn.writable else 0)) + dh = reduction.DupHandle(conn.fileno(), access) + return rebuild_pipe_connection, (dh, conn.readable, conn.writable) + def rebuild_pipe_connection(dh, readable, writable): + handle = dh.detach() + return PipeConnection(handle, readable, writable) + reduction.register(PipeConnection, reduce_pipe_connection) + else: - try: - from . import reduction - except ImportError: - pass - else: - ForkingPickler.register(socket.socket, reduction.reduce_socket) - ForkingPickler.register(Connection, reduction.reduce_connection) + def reduce_connection(conn): + df = reduction.DupFd(conn.fileno()) + return rebuild_connection, (df, conn.readable, conn.writable) + def rebuild_connection(df, readable, writable): + fd = df.detach() + return Connection(fd, readable, writable) + reduction.register(Connection, reduce_connection) diff -r 9877c25d9556 -r b3620777f54c Lib/multiprocessing/dummy/__init__.py --- a/Lib/multiprocessing/dummy/__init__.py Wed Aug 07 05:54:28 2013 -0700 +++ b/Lib/multiprocessing/dummy/__init__.py Wed Aug 07 22:43:26 2013 +0100 @@ -22,7 +22,7 @@ import weakref import array -from multiprocessing.dummy.connection import Pipe +from .connection import Pipe from threading import Lock, RLock, Semaphore, BoundedSemaphore from threading import Event, Condition, Barrier from queue import Queue @@ -113,7 +113,7 @@ pass def Pool(processes=None, initializer=None, initargs=()): - from multiprocessing.pool import ThreadPool + from ..pool import ThreadPool return ThreadPool(processes, initializer, initargs) JoinableQueue = Queue diff -r 9877c25d9556 -r b3620777f54c Lib/multiprocessing/forking.py --- a/Lib/multiprocessing/forking.py Wed Aug 07 05:54:28 2013 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,477 +0,0 @@ -# -# Module for starting a process object using os.fork() or CreateProcess() -# -# multiprocessing/forking.py -# -# Copyright (c) 2006-2008, R Oudkerk -# Licensed to PSF under a Contributor Agreement. -# - -import io -import os -import pickle -import sys -import signal -import errno - -from multiprocessing import util, process - -__all__ = ['Popen', 'assert_spawning', 'duplicate', 'close', 'ForkingPickler'] - -# -# Check that the current thread is spawning a child process -# - -def assert_spawning(self): - if not Popen.thread_is_spawning(): - raise RuntimeError( - '%s objects should only be shared between processes' - ' through inheritance' % type(self).__name__ - ) - -# -# Try making some callable types picklable -# - -from pickle import Pickler -from copyreg import dispatch_table - -class ForkingPickler(Pickler): - _extra_reducers = {} - def __init__(self, *args): - Pickler.__init__(self, *args) - self.dispatch_table = dispatch_table.copy() - self.dispatch_table.update(self._extra_reducers) - @classmethod - def register(cls, type, reduce): - cls._extra_reducers[type] = reduce - - @staticmethod - def dumps(obj): - buf = io.BytesIO() - ForkingPickler(buf, pickle.HIGHEST_PROTOCOL).dump(obj) - return buf.getbuffer() - - loads = pickle.loads - - -def _reduce_method(m): - if m.__self__ is None: - return getattr, (m.__class__, m.__func__.__name__) - else: - return getattr, (m.__self__, m.__func__.__name__) -class _C: - def f(self): - pass -ForkingPickler.register(type(_C().f), _reduce_method) - - -def _reduce_method_descriptor(m): - return getattr, (m.__objclass__, m.__name__) -ForkingPickler.register(type(list.append), _reduce_method_descriptor) -ForkingPickler.register(type(int.__add__), _reduce_method_descriptor) - -try: - from functools import partial -except ImportError: - pass -else: - def _reduce_partial(p): - return _rebuild_partial, (p.func, p.args, p.keywords or {}) - def _rebuild_partial(func, args, keywords): - return partial(func, *args, **keywords) - ForkingPickler.register(partial, _reduce_partial) - -# -# Unix -# - -if sys.platform != 'win32': - duplicate = os.dup - close = os.close - - # - # We define a Popen class similar to the one from subprocess, but - # whose constructor takes a process object as its argument. - # - - class Popen(object): - - def __init__(self, process_obj): - sys.stdout.flush() - sys.stderr.flush() - self.returncode = None - - r, w = os.pipe() - self.sentinel = r - - self.pid = os.fork() - if self.pid == 0: - os.close(r) - if 'random' in sys.modules: - import random - random.seed() - code = process_obj._bootstrap() - os._exit(code) - - # `w` will be closed when the child exits, at which point `r` - # will become ready for reading (using e.g. select()). - os.close(w) - util.Finalize(self, os.close, (r,)) - - def poll(self, flag=os.WNOHANG): - if self.returncode is None: - while True: - try: - pid, sts = os.waitpid(self.pid, flag) - except OSError as e: - if e.errno == errno.EINTR: - continue - # Child process not yet created. See #1731717 - # e.errno == errno.ECHILD == 10 - return None - else: - break - if pid == self.pid: - if os.WIFSIGNALED(sts): - self.returncode = -os.WTERMSIG(sts) - else: - assert os.WIFEXITED(sts) - self.returncode = os.WEXITSTATUS(sts) - return self.returncode - - def wait(self, timeout=None): - if self.returncode is None: - if timeout is not None: - from .connection import wait - if not wait([self.sentinel], timeout): - return None - # This shouldn't block if wait() returned successfully. - return self.poll(os.WNOHANG if timeout == 0.0 else 0) - return self.returncode - - def terminate(self): - if self.returncode is None: - try: - os.kill(self.pid, signal.SIGTERM) - except OSError: - if self.wait(timeout=0.1) is None: - raise - - @staticmethod - def thread_is_spawning(): - return False - -# -# Windows -# - -else: - import _thread - import msvcrt - import _winapi - - from pickle import load, HIGHEST_PROTOCOL - - def dump(obj, file, protocol=None): - ForkingPickler(file, protocol).dump(obj) - - # - # - # - - TERMINATE = 0x10000 - WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False)) - WINSERVICE = sys.executable.lower().endswith("pythonservice.exe") - - close = _winapi.CloseHandle - - # - # _python_exe is the assumed path to the python executable. - # People embedding Python want to modify it. - # - - if WINSERVICE: - _python_exe = os.path.join(sys.exec_prefix, 'python.exe') - else: - _python_exe = sys.executable - - def set_executable(exe): - global _python_exe - _python_exe = exe - - # - # - # - - def duplicate(handle, target_process=None, inheritable=False): - if target_process is None: - target_process = _winapi.GetCurrentProcess() - return _winapi.DuplicateHandle( - _winapi.GetCurrentProcess(), handle, target_process, - 0, inheritable, _winapi.DUPLICATE_SAME_ACCESS - ) - - # - # We define a Popen class similar to the one from subprocess, but - # whose constructor takes a process object as its argument. - # - - class Popen(object): - ''' - Start a subprocess to run the code of a process object - ''' - _tls = _thread._local() - - def __init__(self, process_obj): - cmd = ' '.join('"%s"' % x for x in get_command_line()) - prep_data = get_preparation_data(process_obj._name) - - # create pipe for communication with child - rfd, wfd = os.pipe() - - # get handle for read end of the pipe and make it inheritable - rhandle = duplicate(msvcrt.get_osfhandle(rfd), inheritable=True) - os.close(rfd) - - with open(wfd, 'wb', closefd=True) as to_child: - # start process - try: - hp, ht, pid, tid = _winapi.CreateProcess( - _python_exe, cmd + (' %s' % rhandle), - None, None, 1, 0, None, None, None - ) - _winapi.CloseHandle(ht) - finally: - close(rhandle) - - # set attributes of self - self.pid = pid - self.returncode = None - self._handle = hp - self.sentinel = int(hp) - util.Finalize(self, _winapi.CloseHandle, (self.sentinel,)) - - # send information to child - Popen._tls.process_handle = int(hp) - try: - dump(prep_data, to_child, HIGHEST_PROTOCOL) - dump(process_obj, to_child, HIGHEST_PROTOCOL) - finally: - del Popen._tls.process_handle - - @staticmethod - def thread_is_spawning(): - return getattr(Popen._tls, 'process_handle', None) is not None - - @staticmethod - def duplicate_for_child(handle): - return duplicate(handle, Popen._tls.process_handle) - - def wait(self, timeout=None): - if self.returncode is None: - if timeout is None: - msecs = _winapi.INFINITE - else: - msecs = max(0, int(timeout * 1000 + 0.5)) - - res = _winapi.WaitForSingleObject(int(self._handle), msecs) - if res == _winapi.WAIT_OBJECT_0: - code = _winapi.GetExitCodeProcess(self._handle) - if code == TERMINATE: - code = -signal.SIGTERM - self.returncode = code - - return self.returncode - - def poll(self): - return self.wait(timeout=0) - - def terminate(self): - if self.returncode is None: - try: - _winapi.TerminateProcess(int(self._handle), TERMINATE) - except OSError: - if self.wait(timeout=1.0) is None: - raise - - # - # - # - - def is_forking(argv): - ''' - Return whether commandline indicates we are forking - ''' - if len(argv) >= 2 and argv[1] == '--multiprocessing-fork': - assert len(argv) == 3 - return True - else: - return False - - - def freeze_support(): - ''' - Run code for process object if this in not the main process - ''' - if is_forking(sys.argv): - main() - sys.exit() - - - def get_command_line(): - ''' - Returns prefix of command line used for spawning a child process - ''' - if getattr(process.current_process(), '_inheriting', False): - raise RuntimeError(''' - Attempt to start a new process before the current process - has finished its bootstrapping phase. - - This probably means that you are on Windows and you have - forgotten to use the proper idiom in the main module: - - if __name__ == '__main__': - freeze_support() - ... - - The "freeze_support()" line can be omitted if the program - is not going to be frozen to produce a Windows executable.''') - - if getattr(sys, 'frozen', False): - return [sys.executable, '--multiprocessing-fork'] - else: - prog = 'from multiprocessing.forking import main; main()' - opts = util._args_from_interpreter_flags() - return [_python_exe] + opts + ['-c', prog, '--multiprocessing-fork'] - - - def main(): - ''' - Run code specifed by data received over pipe - ''' - assert is_forking(sys.argv) - - handle = int(sys.argv[-1]) - fd = msvcrt.open_osfhandle(handle, os.O_RDONLY) - from_parent = os.fdopen(fd, 'rb') - - process.current_process()._inheriting = True - preparation_data = load(from_parent) - prepare(preparation_data) - self = load(from_parent) - process.current_process()._inheriting = False - - from_parent.close() - - exitcode = self._bootstrap() - sys.exit(exitcode) - - - def get_preparation_data(name): - ''' - Return info about parent needed by child to unpickle process object - ''' - from .util import _logger, _log_to_stderr - - d = dict( - name=name, - sys_path=sys.path, - sys_argv=sys.argv, - log_to_stderr=_log_to_stderr, - orig_dir=process.ORIGINAL_DIR, - authkey=process.current_process().authkey, - ) - - if _logger is not None: - d['log_level'] = _logger.getEffectiveLevel() - - if not WINEXE and not WINSERVICE: - main_path = getattr(sys.modules['__main__'], '__file__', None) - if not main_path and sys.argv[0] not in ('', '-c'): - main_path = sys.argv[0] - if main_path is not None: - if not os.path.isabs(main_path) and \ - process.ORIGINAL_DIR is not None: - main_path = os.path.join(process.ORIGINAL_DIR, main_path) - d['main_path'] = os.path.normpath(main_path) - - return d - -# -# Prepare current process -# - -old_main_modules = [] - -def prepare(data): - ''' - Try to get current process ready to unpickle process object - ''' - old_main_modules.append(sys.modules['__main__']) - - if 'name' in data: - process.current_process().name = data['name'] - - if 'authkey' in data: - process.current_process()._authkey = data['authkey'] - - if 'log_to_stderr' in data and data['log_to_stderr']: - util.log_to_stderr() - - if 'log_level' in data: - util.get_logger().setLevel(data['log_level']) - - if 'sys_path' in data: - sys.path = data['sys_path'] - - if 'sys_argv' in data: - sys.argv = data['sys_argv'] - - if 'dir' in data: - os.chdir(data['dir']) - - if 'orig_dir' in data: - process.ORIGINAL_DIR = data['orig_dir'] - - if 'main_path' in data: - # XXX (ncoghlan): The following code makes several bogus - # assumptions regarding the relationship between __file__ - # and a module's real name. See PEP 302 and issue #10845 - main_path = data['main_path'] - main_name = os.path.splitext(os.path.basename(main_path))[0] - if main_name == '__init__': - main_name = os.path.basename(os.path.dirname(main_path)) - - if main_name == '__main__': - main_module = sys.modules['__main__'] - main_module.__file__ = main_path - elif main_name != 'ipython': - # Main modules not actually called __main__.py may - # contain additional code that should still be executed - import importlib - import types - - if main_path is None: - dirs = None - elif os.path.basename(main_path).startswith('__init__.py'): - dirs = [os.path.dirname(os.path.dirname(main_path))] - else: - dirs = [os.path.dirname(main_path)] - - assert main_name not in sys.modules, main_name - sys.modules.pop('__mp_main__', None) - # We should not try to load __main__ - # since that would execute 'if __name__ == "__main__"' - # clauses, potentially causing a psuedo fork bomb. - loader = importlib.find_loader(main_name, path=dirs) - main_module = types.ModuleType(main_name) - try: - loader.init_module_attrs(main_module) - except AttributeError: # init_module_attrs is optional - pass - main_module.__name__ = '__mp_main__' - code = loader.get_code(main_name) - exec(code, main_module.__dict__) - - sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module diff -r 9877c25d9556 -r b3620777f54c Lib/multiprocessing/forkserver.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/Lib/multiprocessing/forkserver.py Wed Aug 07 22:43:26 2013 +0100 @@ -0,0 +1,203 @@ +import os +import select +import signal +import socket +import struct +import sys +import threading + +from . import process +from . import reduction +from . import spawn +from . import util + + +__all__ = ['ensure_running', 'get_inherited_fds', 'connect_to_new_process'] + +# +# +# + +MAXFDS_TO_SEND = 256 +UNSIGNED_STRUCT = struct.Struct('Q') # large enough for pid_t + +# +# Read and write unsigned numbers +# + +def read_unsigned(fd): + data = b'' + length = UNSIGNED_STRUCT.size + while len(data) < length: + while True: + try: + s = os.read(fd, length - len(data)) + break + except InterruptedError: + pass + if not s: + raise ValueError('unexpected EOF') + data += s + return UNSIGNED_STRUCT.unpack(data)[0] + +def write_unsigned(fd, n): + msg = UNSIGNED_STRUCT.pack(n) + while msg: + while True: + try: + nbytes = os.write(fd, msg) + break + except InterruptedError: + pass + if nbytes == 0: + raise RuntimeError('should not get here') + msg = msg[nbytes:] + +# +# Public function +# + +def ensure_running(): + '''Make sure fork server is running.''' + _forkserver.start() + + +def get_inherited_fds(): + '''Get list of fds inherited from parent process.''' + return _forkserver.inherited_fds + + +def connect_to_new_process(fds): + '''Request fork server to create child.''' + if len(fds) + 3 >= MAXFDS_TO_SEND: + raise ValueError('too many fds') + address, alive_w = process.current_process()._config['forkserver_info'] + with socket.socket(socket.AF_UNIX) as client: + client.connect(address) + parent_r, child_w = os.pipe() + child_r, parent_w = os.pipe() + allfds = [child_r, child_w, alive_w] + allfds += fds + try: + reduction.sendfds(client, allfds) + return parent_r, parent_w + except: + os.close(parent_r) + os.close(parent_w) + raise + finally: + os.close(child_r) + os.close(child_w) + +# +# Helper process which forks child process on request +# + +class _ForkServer: + + inherited_fds = None + + def start(self): + if threading.active_count() > 1: + raise RuntimeError('cannot start helper after threads started') + config = process.current_process()._config + if config.get('forkserver_info') is not None: + return + + # create listener socket + from .connection import arbitrary_address + self._address = arbitrary_address('AF_UNIX') + self._listener = socket.socket(socket.AF_UNIX) + self._listener.bind(self._address) + os.chmod(self._address, 0o700) + self._listener.listen(100) + + # all client processes own the write end of the "alive" pipe; + # when they all terminate the read end becomes ready. + self._alive_r, self._alive_w = os.pipe() + config['forkserver_info'] = (self._address, self._alive_w) + + self.pid = os.fork() + if self.pid == 0: + try: + # close sys.stdin + if sys.stdin is not None: + try: + sys.stdin.close() + sys.stdin = open(os.devnull) + except (OSError, ValueError): + pass + + # close everything unnecessary + fds_to_keep = [self._listener.fileno(), self._alive_r] + config = process.current_process()._config + fds_to_keep.append(config['semaphore_tracker_fd']) + for f in (sys.stdin, sys.stdout, sys.stderr): + try: + fds_to_keep.append(f.fileno()) + except Exception: + pass + util.close_all_fds_except(fds_to_keep) + + # ignoring SIGCHLD means no need to reap zombie processes + self._handler = signal.signal(signal.SIGCHLD, signal.SIG_IGN) + self._run() + except Exception: + sys.excepthook(*sys.exc_info()) + sys.stderr.flush() + finally: + os._exit(0) + else: + self._listener.close() + + def _run(self): + readers = [self._listener.fileno(), self._alive_r] + while True: + try: + rfds, wfds, xfds = select.select(readers, [], []) + if self._alive_r in rfds: + # EOF because no more client processes left + assert os.read(self._alive_r, 1) == b'' + raise SystemExit + with self._listener.accept()[0] as s: + self._serve_one(s) + except InterruptedError: + pass + + def _serve_one(self, s): + code = 1 + if os.fork() == 0: + # this is the process started at the request of the client + try: + # close unnecessary stuff and reset SIGCHLD handler + self._listener.close() + os.close(self._alive_r) + signal.signal(signal.SIGCHLD, self._handler) + + # receive fds from parent process + fds = reduction.recvfds(s, MAXFDS_TO_SEND + 1) + s.close() + assert len(fds) <= MAXFDS_TO_SEND + child_r, child_w, self._alive_w, *self.inherited_fds = fds + + # send pid to client processes + write_unsigned(child_w, os.getpid()) + + # reseed random number generator + if 'random' in sys.modules: + import random + random.seed() + + # run process object received over pipe + code = spawn._main(child_r) + + # write the exit code to the pipe + write_unsigned(child_w, code) + except Exception: + sys.excepthook(*sys.exc_info()) + sys.stderr.flush() + finally: + os._exit(code) + + +_forkserver = _ForkServer() diff -r 9877c25d9556 -r b3620777f54c Lib/multiprocessing/heap.py --- a/Lib/multiprocessing/heap.py Wed Aug 07 05:54:28 2013 -0700 +++ b/Lib/multiprocessing/heap.py Wed Aug 07 22:43:26 2013 +0100 @@ -15,8 +15,10 @@ import itertools import _multiprocessing -from multiprocessing.util import Finalize, info -from multiprocessing.forking import assert_spawning + +from . import popen +from . import reduction +from . import util __all__ = ['BufferWrapper'] @@ -40,7 +42,7 @@ self._state = (self.size, self.name) def __getstate__(self): - assert_spawning(self) + popen.assert_spawning(self) return self._state def __setstate__(self, state): @@ -52,10 +54,33 @@ class Arena(object): - def __init__(self, size): - self.buffer = mmap.mmap(-1, size) + _counter = itertools.count() + + def __init__(self, size, fileno=-1): self.size = size - self.name = None + self.fileno = fileno + if fileno == -1: + name = os.path.join( + util.get_temp_dir(), + 'pym-%d-%d' % (os.getpid(), next(self._counter))) + self.fileno = os.open( + name, os.O_RDWR | os.O_CREAT | os.O_EXCL, 0o600) + os.unlink(name) + util.Finalize(self, os.close, (self.fileno,)) + with open(self.fileno, 'wb', closefd=False) as f: + f.write(b'\0'*size) + self.buffer = mmap.mmap(self.fileno, self.size) + + def reduce_arena(a): + if a.fileno == -1: + raise ValueError('Arena is unpicklable because' + 'forking was enabled when it was created') + return rebuild_arena, (a.size, reduction.DupFd(a.fileno)) + + def rebuild_arena(size, dupfd): + return Arena(size, dupfd.detach()) + + reduction.register(Arena, reduce_arena) # # Class allowing allocation of chunks of memory from arenas @@ -90,7 +115,7 @@ if i == len(self._lengths): length = self._roundup(max(self._size, size), mmap.PAGESIZE) self._size *= 2 - info('allocating a new mmap of length %d', length) + util.info('allocating a new mmap of length %d', length) arena = Arena(length) self._arenas.append(arena) return (arena, 0, length) @@ -216,7 +241,7 @@ assert 0 <= size < sys.maxsize block = BufferWrapper._heap.malloc(size) self._state = (block, size) - Finalize(self, BufferWrapper._heap.free, args=(block,)) + util.Finalize(self, BufferWrapper._heap.free, args=(block,)) def create_memoryview(self): (arena, start, stop), size = self._state diff -r 9877c25d9556 -r b3620777f54c Lib/multiprocessing/managers.py --- a/Lib/multiprocessing/managers.py Wed Aug 07 05:54:28 2013 -0700 +++ b/Lib/multiprocessing/managers.py Wed Aug 07 22:43:26 2013 +0100 @@ -19,11 +19,15 @@ import array import queue +from time import time as _time from traceback import format_exc -from multiprocessing import Process, current_process, active_children, Pool, util, connection -from multiprocessing.process import AuthenticationString -from multiprocessing.forking import Popen, ForkingPickler -from time import time as _time + +from . import connection +from . import pool +from . import process +from . import popen +from . import reduction +from . import util # # Register some things for pickling @@ -31,16 +35,14 @@ def reduce_array(a): return array.array, (a.typecode, a.tobytes()) -ForkingPickler.register(array.array, reduce_array) +reduction.register(array.array, reduce_array) view_types = [type(getattr({}, name)()) for name in ('items','keys','values')] if view_types[0] is not list: # only needed in Py3.0 def rebuild_as_list(obj): return list, (list(obj),) for view_type in view_types: - ForkingPickler.register(view_type, rebuild_as_list) - import copyreg - copyreg.pickle(view_type, rebuild_as_list) + reduction.register(view_type, rebuild_as_list) # # Type for identifying shared objects @@ -130,7 +132,7 @@ def __init__(self, registry, address, authkey, serializer): assert isinstance(authkey, bytes) self.registry = registry - self.authkey = AuthenticationString(authkey) + self.authkey = process.AuthenticationString(authkey) Listener, Client = listener_client[serializer] # do authentication later @@ -146,7 +148,7 @@ Run the server forever ''' self.stop_event = threading.Event() - current_process()._manager_server = self + process.current_process()._manager_server = self try: accepter = threading.Thread(target=self.accepter) accepter.daemon = True @@ -438,9 +440,9 @@ def __init__(self, address=None, authkey=None, serializer='pickle'): if authkey is None: - authkey = current_process().authkey + authkey = process.current_process().authkey self._address = address # XXX not final address if eg ('', 0) - self._authkey = AuthenticationString(authkey) + self._authkey = process.AuthenticationString(authkey) self._state = State() self._state.value = State.INITIAL self._serializer = serializer @@ -476,7 +478,7 @@ reader, writer = connection.Pipe(duplex=False) # spawn process which runs a server - self._process = Process( + self._process = process.Process( target=type(self)._run_server, args=(self._registry, self._address, self._authkey, self._serializer, writer, initializer, initargs), @@ -691,11 +693,11 @@ self._Client = listener_client[serializer][1] if authkey is not None: - self._authkey = AuthenticationString(authkey) + self._authkey = process.AuthenticationString(authkey) elif self._manager is not None: self._authkey = self._manager._authkey else: - self._authkey = current_process().authkey + self._authkey = process.current_process().authkey if incref: self._incref() @@ -704,7 +706,7 @@ def _connect(self): util.debug('making connection to manager') - name = current_process().name + name = process.current_process().name if threading.current_thread().name != 'MainThread': name += '|' + threading.current_thread().name conn = self._Client(self._token.address, authkey=self._authkey) @@ -798,7 +800,7 @@ def __reduce__(self): kwds = {} - if Popen.thread_is_spawning(): + if popen.get_spawning_popen() is not None: kwds['authkey'] = self._authkey if getattr(self, '_isauto', False): @@ -835,14 +837,14 @@ If possible the shared object is returned, or otherwise a proxy for it. ''' - server = getattr(current_process(), '_manager_server', None) + server = getattr(process.current_process(), '_manager_server', None) if server and server.address == token.address: return server.id_to_obj[token.id][0] else: incref = ( kwds.pop('incref', True) and - not getattr(current_process(), '_inheriting', False) + not getattr(process.current_process(), '_inheriting', False) ) return func(token, serializer, incref=incref, **kwds) @@ -889,7 +891,7 @@ if authkey is None and manager is not None: authkey = manager._authkey if authkey is None: - authkey = current_process().authkey + authkey = process.current_process().authkey ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed) proxy = ProxyType(token, serializer, manager=manager, authkey=authkey, @@ -1109,7 +1111,7 @@ AcquirerProxy) SyncManager.register('Condition', threading.Condition, ConditionProxy) SyncManager.register('Barrier', threading.Barrier, BarrierProxy) -SyncManager.register('Pool', Pool, PoolProxy) +SyncManager.register('Pool', pool.Pool, PoolProxy) SyncManager.register('list', list, ListProxy) SyncManager.register('dict', dict, DictProxy) SyncManager.register('Value', Value, ValueProxy) diff -r 9877c25d9556 -r b3620777f54c Lib/multiprocessing/pool.py --- a/Lib/multiprocessing/pool.py Wed Aug 07 05:54:28 2013 -0700 +++ b/Lib/multiprocessing/pool.py Wed Aug 07 22:43:26 2013 +0100 @@ -7,7 +7,7 @@ # Licensed to PSF under a Contributor Agreement. # -__all__ = ['Pool'] +__all__ = ['Pool', 'ThreadPool'] # # Imports @@ -21,8 +21,10 @@ import time import traceback -from multiprocessing import Process, TimeoutError -from multiprocessing.util import Finalize, debug +# If threading is available then ThreadPool should be provided. Therefore +# we avoid top-level imports which are liable to fail on some systems. +from . import util +from . import Process, cpu_count, TimeoutError, SimpleQueue # # Constants representing the state of a pool @@ -104,11 +106,11 @@ try: task = get() except (EOFError, OSError): - debug('worker got EOFError or OSError -- exiting') + util.debug('worker got EOFError or OSError -- exiting') break if task is None: - debug('worker got sentinel -- exiting') + util.debug('worker got sentinel -- exiting') break job, i, func, args, kwds = task @@ -121,11 +123,11 @@ put((job, i, result)) except Exception as e: wrapped = MaybeEncodingError(e, result[1]) - debug("Possible encoding error while sending result: %s" % ( + util.debug("Possible encoding error while sending result: %s" % ( wrapped)) put((job, i, (False, wrapped))) completed += 1 - debug('worker exiting after %d tasks' % completed) + util.debug('worker exiting after %d tasks' % completed) # # Class representing a process pool @@ -184,7 +186,7 @@ self._result_handler._state = RUN self._result_handler.start() - self._terminate = Finalize( + self._terminate = util.Finalize( self, self._terminate_pool, args=(self._taskqueue, self._inqueue, self._outqueue, self._pool, self._worker_handler, self._task_handler, @@ -201,7 +203,7 @@ worker = self._pool[i] if worker.exitcode is not None: # worker exited - debug('cleaning up worker %d' % i) + util.debug('cleaning up worker %d' % i) worker.join() cleaned = True del self._pool[i] @@ -221,7 +223,7 @@ w.name = w.name.replace('Process', 'PoolWorker') w.daemon = True w.start() - debug('added worker') + util.debug('added worker') def _maintain_pool(self): """Clean up any exited workers and start replacements for them. @@ -230,7 +232,6 @@ self._repopulate_pool() def _setup_queues(self): - from .queues import SimpleQueue self._inqueue = SimpleQueue() self._outqueue = SimpleQueue() self._quick_put = self._inqueue._writer.send @@ -358,7 +359,7 @@ time.sleep(0.1) # send sentinel to stop workers pool._taskqueue.put(None) - debug('worker handler exiting') + util.debug('worker handler exiting') @staticmethod def _handle_tasks(taskqueue, put, outqueue, pool): @@ -368,36 +369,36 @@ i = -1 for i, task in enumerate(taskseq): if thread._state: - debug('task handler found thread._state != RUN') + util.debug('task handler found thread._state != RUN') break try: put(task) except OSError: - debug('could not put task on queue') + util.debug('could not put task on queue') break else: if set_length: - debug('doing set_length()') + util.debug('doing set_length()') set_length(i+1) continue break else: - debug('task handler got sentinel') + util.debug('task handler got sentinel') try: # tell result handler to finish when cache is empty - debug('task handler sending sentinel to result handler') + util.debug('task handler sending sentinel to result handler') outqueue.put(None) # tell workers there is no more work - debug('task handler sending sentinel to workers') + util.debug('task handler sending sentinel to workers') for p in pool: put(None) except OSError: - debug('task handler got OSError when sending sentinels') + util.debug('task handler got OSError when sending sentinels') - debug('task handler exiting') + util.debug('task handler exiting') @staticmethod def _handle_results(outqueue, get, cache): @@ -407,16 +408,16 @@ try: task = get() except (OSError, EOFError): - debug('result handler got EOFError/OSError -- exiting') + util.debug('result handler got EOFError/OSError -- exiting') return if thread._state: assert thread._state == TERMINATE - debug('result handler found thread._state=TERMINATE') + util.debug('result handler found thread._state=TERMINATE') break if task is None: - debug('result handler got sentinel') + util.debug('result handler got sentinel') break job, i, obj = task @@ -429,11 +430,11 @@ try: task = get() except (OSError, EOFError): - debug('result handler got EOFError/OSError -- exiting') + util.debug('result handler got EOFError/OSError -- exiting') return if task is None: - debug('result handler ignoring extra sentinel') + util.debug('result handler ignoring extra sentinel') continue job, i, obj = task try: @@ -442,7 +443,7 @@ pass if hasattr(outqueue, '_reader'): - debug('ensuring that outqueue is not full') + util.debug('ensuring that outqueue is not full') # If we don't make room available in outqueue then # attempts to add the sentinel (None) to outqueue may # block. There is guaranteed to be no more than 2 sentinels. @@ -454,7 +455,7 @@ except (OSError, EOFError): pass - debug('result handler exiting: len(cache)=%s, thread._state=%s', + util.debug('result handler exiting: len(cache)=%s, thread._state=%s', len(cache), thread._state) @staticmethod @@ -472,19 +473,19 @@ ) def close(self): - debug('closing pool') + util.debug('closing pool') if self._state == RUN: self._state = CLOSE self._worker_handler._state = CLOSE def terminate(self): - debug('terminating pool') + util.debug('terminating pool') self._state = TERMINATE self._worker_handler._state = TERMINATE self._terminate() def join(self): - debug('joining pool') + util.debug('joining pool') assert self._state in (CLOSE, TERMINATE) self._worker_handler.join() self._task_handler.join() @@ -495,7 +496,7 @@ @staticmethod def _help_stuff_finish(inqueue, task_handler, size): # task_handler may be blocked trying to put items on inqueue - debug('removing tasks from inqueue until task handler finished') + util.debug('removing tasks from inqueue until task handler finished') inqueue._rlock.acquire() while task_handler.is_alive() and inqueue._reader.poll(): inqueue._reader.recv() @@ -505,12 +506,12 @@ def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool, worker_handler, task_handler, result_handler, cache): # this is guaranteed to only be called once - debug('finalizing pool') + util.debug('finalizing pool') worker_handler._state = TERMINATE task_handler._state = TERMINATE - debug('helping task handler/workers to finish') + util.debug('helping task handler/workers to finish') cls._help_stuff_finish(inqueue, task_handler, len(pool)) assert result_handler.is_alive() or len(cache) == 0 @@ -520,31 +521,31 @@ # We must wait for the worker handler to exit before terminating # workers because we don't want workers to be restarted behind our back. - debug('joining worker handler') + util.debug('joining worker handler') if threading.current_thread() is not worker_handler: worker_handler.join() # Terminate workers which haven't already finished. if pool and hasattr(pool[0], 'terminate'): - debug('terminating workers') + util.debug('terminating workers') for p in pool: if p.exitcode is None: p.terminate() - debug('joining task handler') + util.debug('joining task handler') if threading.current_thread() is not task_handler: task_handler.join() - debug('joining result handler') + util.debug('joining result handler') if threading.current_thread() is not result_handler: result_handler.join() if pool and hasattr(pool[0], 'terminate'): - debug('joining pool workers') + util.debug('joining pool workers') for p in pool: if p.is_alive(): # worker has not yet exited - debug('cleaning up worker %d' % p.pid) + util.debug('cleaning up worker %d' % p.pid) p.join() def __enter__(self): @@ -730,7 +731,10 @@ class ThreadPool(Pool): - from .dummy import Process + @staticmethod + def Process(*args, **kwds): + from .dummy import Process + return Process(*args, **kwds) def __init__(self, processes=None, initializer=None, initargs=()): Pool.__init__(self, processes, initializer, initargs) diff -r 9877c25d9556 -r b3620777f54c Lib/multiprocessing/popen.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/Lib/multiprocessing/popen.py Wed Aug 07 22:43:26 2013 +0100 @@ -0,0 +1,78 @@ +import sys +import threading + +__all__ = ['Popen', 'get_spawning_popen', 'set_spawning_popen', + 'assert_spawning'] + +# +# Check that the current thread is spawning a child process +# + +_tls = threading.local() + +def get_spawning_popen(): + return getattr(_tls, 'spawning_popen', None) + +def set_spawning_popen(popen): + _tls.spawning_popen = popen + +def assert_spawning(obj): + if get_spawning_popen() is None: + raise RuntimeError( + '%s objects should only be shared between processes' + ' through inheritance' % type(obj).__name__ + ) + +# +# +# + +_Popen = None + +def Popen(process_obj): + if _Popen is None: + set_start_method() + return _Popen(process_obj) + +def get_start_method(): + if _Popen is None: + set_start_method() + return _Popen.method + +def set_start_method(meth=None, start_helpers=True): + global _Popen + try: + modname = _method_to_module[meth] + __import__(modname) + except (KeyError, ImportError): + raise ValueError('could not use sart method %r' % meth) + module = sys.modules[modname] + if start_helpers: + module.Popen.ensure_helpers_running() + _Popen = module.Popen + + +if sys.platform == 'win32': + + _method_to_module = { + None: 'multiprocessing.popen_spawn_win32', + 'spawn': 'multiprocessing.popen_spawn_win32', + } + + def get_all_start_methods(): + return ['spawn'] + +else: + _method_to_module = { + None: 'multiprocessing.popen_fork', + 'fork': 'multiprocessing.popen_fork', + 'spawn': 'multiprocessing.popen_spawn_posix', + 'forkserver': 'multiprocessing.popen_forkserver', + } + + def get_all_start_methods(): + from . import reduction + if reduction.HAVE_SEND_HANDLE: + return ['fork', 'spawn', 'forkserver'] + else: + return ['fork', 'spawn'] diff -r 9877c25d9556 -r b3620777f54c Lib/multiprocessing/popen_fork.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/Lib/multiprocessing/popen_fork.py Wed Aug 07 22:43:26 2013 +0100 @@ -0,0 +1,87 @@ +import os +import sys +import signal +import errno + +from . import util + +__all__ = ['Popen'] + +# +# Start child process using fork +# + +class Popen(object): + method = 'fork' + + def __init__(self, process_obj): + sys.stdout.flush() + sys.stderr.flush() + self.returncode = None + self._launch(process_obj) + + def duplicate_for_child(self, fd): + return fd + + def poll(self, flag=os.WNOHANG): + if self.returncode is None: + while True: + try: + pid, sts = os.waitpid(self.pid, flag) + except OSError as e: + if e.errno == errno.EINTR: + continue + # Child process not yet created. See #1731717 + # e.errno == errno.ECHILD == 10 + return None + else: + break + if pid == self.pid: + if os.WIFSIGNALED(sts): + self.returncode = -os.WTERMSIG(sts) + else: + assert os.WIFEXITED(sts) + self.returncode = os.WEXITSTATUS(sts) + return self.returncode + + def wait(self, timeout=None): + if self.returncode is None: + if timeout is not None: + from .connection import wait + if not wait([self.sentinel], timeout): + return None + # This shouldn't block if wait() returned successfully. + return self.poll(os.WNOHANG if timeout == 0.0 else 0) + return self.returncode + + def terminate(self): + if self.returncode is None: + try: + os.kill(self.pid, signal.SIGTERM) + except ProcessLookupError: + pass + except OSError: + if self.wait(timeout=0.1) is None: + raise + + def _launch(self, process_obj): + code = 1 + parent_r, child_w = os.pipe() + self.pid = os.fork() + if self.pid == 0: + try: + os.close(parent_r) + if 'random' in sys.modules: + import random + random.seed() + code = process_obj._bootstrap() + finally: + os._exit(code) + else: + os.close(child_w) + util.Finalize(self, os.close, (parent_r,)) + self.sentinel = parent_r + + @staticmethod + def ensure_helpers_running(): + pass diff -r 9877c25d9556 -r b3620777f54c Lib/multiprocessing/popen_forkserver.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/Lib/multiprocessing/popen_forkserver.py Wed Aug 07 22:43:26 2013 +0100 @@ -0,0 +1,76 @@ +import io +import os + +from . import reduction +if not reduction.HAVE_SEND_HANDLE: + raise ImportError('No support for sending fds between processes') +from . import forkserver +from . import popen +from . import popen_fork +from . import spawn +from . import util + + +__all__ = ['Popen'] + +# +# Wrapper for an fd used while launching a process +# + +class DupFd(object): + def __init__(self, ind): + self.ind = ind + def detach(self): + return forkserver.get_inherited_fds()[self.ind] + +# +# Start child process using a server process +# + +class Popen(popen_fork.Popen): + method = 'forkserver' + DupFd = DupFd + + def __init__(self, process_obj): + self._fds = [] + super().__init__(process_obj) + + def duplicate_for_child(self, fd): + self._fds.append(fd) + return len(self._fds) - 1 + + def _launch(self, process_obj): + prep_data = spawn.get_preparation_data(process_obj._name, True) + + buf = io.BytesIO() + popen.set_spawning_popen(self) + try: + reduction.dump(prep_data, buf) + reduction.dump(process_obj, buf) + finally: + popen.set_spawning_popen(None) + + self.sentinel, w = forkserver.connect_to_new_process(self._fds) + util.Finalize(self, os.close, (self.sentinel,)) + with open(w, 'wb', closefd=True) as f: + f.write(buf.getbuffer()) + self.pid = forkserver.read_unsigned(self.sentinel) + + def poll(self, flag=os.WNOHANG): + if self.returncode is None: + from .connection import wait + timeout = 0 if flag == os.WNOHANG else None + if not wait([self.sentinel], timeout): + return None + try: + self.returncode = forkserver.read_unsigned(self.sentinel) + except (OSError, ValueError): + # The process ended abnormally perhaps because of a signal + self.returncode = 255 + return self.returncode + + @staticmethod + def ensure_helpers_running(): + from . import semaphore_tracker + semaphore_tracker.ensure_running() + forkserver.ensure_running() diff -r 9877c25d9556 -r b3620777f54c Lib/multiprocessing/popen_spawn_posix.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/Lib/multiprocessing/popen_spawn_posix.py Wed Aug 07 22:43:26 2013 +0100 @@ -0,0 +1,90 @@ +import io +import os +import _posixsubprocess + +from . import popen +from . import popen_fork +from . import reduction +from . import spawn +from . import util + +from . import current_process + +__all__ = ['Popen'] + +# +# Start a program with only specified fds kept open +# + +def spawnv_closefds(path, args, fds): + fds = sorted(fds) + errpipe_read, errpipe_write = _posixsubprocess.cloexec_pipe() + try: + return _posixsubprocess.fork_exec( + args, [os.fsencode(path)], True, fds, None, None, + -1, -1, -1, -1, -1, -1, errpipe_read, errpipe_write, + False, False, None) + finally: + os.close(errpipe_read) + os.close(errpipe_write) + +# +# Wrapper for an fd used while launching a process +# + +class DupFd(object): + def __init__(self, fd): + self.fd = fd + def detach(self): + return self.fd + +# +# Start child process using a fresh interpreter +# + +class Popen(popen_fork.Popen): + method = 'spawn' + DupFd = DupFd + + def __init__(self, process_obj): + self._fds = [] + super().__init__(process_obj) + + def duplicate_for_child(self, fd): + self._fds.append(fd) + return fd + + def _launch(self, process_obj): + tracker_fd = current_process()._config['semaphore_tracker_fd'] + self._fds.append(tracker_fd) + prep_data = spawn.get_preparation_data(process_obj._name, False) + fp = io.BytesIO() + popen.set_spawning_popen(self) + try: + reduction.dump(prep_data, fp) + reduction.dump(process_obj, fp) + finally: + popen.set_spawning_popen(None) + + parent_r = child_w = child_r = parent_w = None + try: + parent_r, child_w = os.pipe() + child_r, parent_w = os.pipe() + cmd = spawn.get_command_line() + [str(child_r)] + self._fds.extend([child_r, child_w]) + self.pid = spawnv_closefds(spawn.get_executable(), + cmd, self._fds) + self.sentinel = parent_r + with open(parent_w, 'wb', closefd=False) as f: + f.write(fp.getbuffer()) + finally: + if parent_r is not None: + util.Finalize(self, os.close, (parent_r,)) + for fd in (child_r, child_w, parent_w): + if fd is not None: + os.close(fd) + + @staticmethod + def ensure_helpers_running(): + from . import semaphore_tracker + semaphore_tracker.ensure_running() diff -r 9877c25d9556 -r b3620777f54c Lib/multiprocessing/popen_spawn_win32.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/Lib/multiprocessing/popen_spawn_win32.py Wed Aug 07 22:43:26 2013 +0100 @@ -0,0 +1,102 @@ +import os +import msvcrt +import signal +import sys +import _winapi + +from . import spawn +from . import popen +from . import reduction +from . import util + +__all__ = ['Popen'] + +# +# +# + +TERMINATE = 0x10000 +WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False)) +WINSERVICE = sys.executable.lower().endswith("pythonservice.exe") + +# +# We define a Popen class similar to the one from subprocess, but +# whose constructor takes a process object as its argument. +# + +class Popen(object): + ''' + Start a subprocess to run the code of a process object + ''' + method = 'spawn' + + def __init__(self, process_obj): + prep_data = spawn.get_preparation_data(process_obj._name, False) + cmd = ' '.join('"%s"' % x for x in spawn.get_command_line()) + + # read end of pipe will be "stolen" by the child process + # -- see spawn_main() in spawn.py. + rhandle, whandle = _winapi.CreatePipe(None, 0) + wfd = msvcrt.open_osfhandle(whandle, 0) + cmd += ' {} {}'.format(os.getpid(), rhandle) + + with open(wfd, 'wb', closefd=True) as to_child: + # start process + try: + hp, ht, pid, tid = _winapi.CreateProcess( + spawn.get_executable(), cmd, + None, None, False, 0, None, None, None) + _winapi.CloseHandle(ht) + except: + _winapi.CloseHandle(rhandle) + raise + + # set attributes of self + self.pid = pid + self.returncode = None + self._handle = hp + self.sentinel = int(hp) + util.Finalize(self, _winapi.CloseHandle, (self.sentinel,)) + + # send information to child + popen.set_spawning_popen(self) + try: + reduction.dump(prep_data, to_child) + reduction.dump(process_obj, to_child) + finally: + popen.set_spawning_popen(None) + + def duplicate_for_child(self, handle): + assert self is popen.get_spawning_popen() + return reduction.duplicate(handle, self.sentinel) + + def wait(self, timeout=None): + if self.returncode is None: + if timeout is None: + msecs = _winapi.INFINITE + else: + msecs = max(0, int(timeout * 1000 + 0.5)) + + res = _winapi.WaitForSingleObject(int(self._handle), msecs) + if res == _winapi.WAIT_OBJECT_0: + code = _winapi.GetExitCodeProcess(self._handle) + if code == TERMINATE: + code = -signal.SIGTERM + self.returncode = code + + return self.returncode + + def poll(self): + return self.wait(timeout=0) + + def terminate(self): + if self.returncode is None: + try: + _winapi.TerminateProcess(int(self._handle), TERMINATE) + except OSError: + if self.wait(timeout=1.0) is None: + raise + + @staticmethod + def ensure_helpers_running(): + pass diff -r 9877c25d9556 -r b3620777f54c Lib/multiprocessing/process.py --- a/Lib/multiprocessing/process.py Wed Aug 07 05:54:28 2013 -0700 +++ b/Lib/multiprocessing/process.py Wed Aug 07 22:43:26 2013 +0100 @@ -43,7 +43,7 @@ Return list of process objects corresponding to live child processes ''' _cleanup() - return list(_current_process._children) + return list(_children) # # @@ -51,9 +51,9 @@ def _cleanup(): # check for processes which have finished - for p in list(_current_process._children): + for p in list(_children): if p._popen.poll() is not None: - _current_process._children.discard(p) + _children.discard(p) # # The `Process` class @@ -70,14 +70,9 @@ def __init__(self, group=None, target=None, name=None, args=(), kwargs={}, *, daemon=None): assert group is None, 'group argument must be None for now' - count = next(_current_process._counter) + count = next(_process_counter) self._identity = _current_process._identity + (count,) - self._authkey = _current_process._authkey - if daemon is not None: - self._daemonic = daemon - else: - self._daemonic = _current_process._daemonic - self._tempdir = _current_process._tempdir + self._config = _current_process._config.copy() self._parent_pid = os.getpid() self._popen = None self._target = target @@ -85,6 +80,8 @@ self._kwargs = dict(kwargs) self._name = name or type(self).__name__ + '-' + \ ':'.join(str(i) for i in self._identity) + if daemon is not None: + self.daemon = daemon _dangling.add(self) def run(self): @@ -101,16 +98,16 @@ assert self._popen is None, 'cannot start a process twice' assert self._parent_pid == os.getpid(), \ 'can only start a process object created by current process' - assert not _current_process._daemonic, \ + assert not _current_process._config.get('daemon'), \ 'daemonic processes are not allowed to have children' _cleanup() if self._Popen is not None: Popen = self._Popen else: - from .forking import Popen + from .popen import Popen self._popen = Popen(self) self._sentinel = self._popen.sentinel - _current_process._children.add(self) + _children.add(self) def terminate(self): ''' @@ -126,7 +123,7 @@ assert self._popen is not None, 'can only join a started process' res = self._popen.wait(timeout) if res is not None: - _current_process._children.discard(self) + _children.discard(self) def is_alive(self): ''' @@ -154,7 +151,7 @@ ''' Return whether process is a daemon ''' - return self._daemonic + return self._config.get('daemon', False) @daemon.setter def daemon(self, daemonic): @@ -162,18 +159,18 @@ Set whether process is a daemon ''' assert self._popen is None, 'process has already started' - self._daemonic = daemonic + self._config['daemon'] = daemonic @property def authkey(self): - return self._authkey + return self._config['authkey'] @authkey.setter def authkey(self, authkey): ''' Set authorization key of process ''' - self._authkey = AuthenticationString(authkey) + self._config['authkey'] = AuthenticationString(authkey) @property def exitcode(self): @@ -227,17 +224,17 @@ status = 'stopped[%s]' % _exitcode_to_name.get(status, status) return '<%s(%s, %s%s)>' % (type(self).__name__, self._name, - status, self._daemonic and ' daemon' or '') + status, self.daemon and ' daemon' or '') ## def _bootstrap(self): from . import util - global _current_process + global _current_process, _process_counter, _children try: - self._children = set() - self._counter = itertools.count(1) + _process_counter = itertools.count(1) + _children = set() if sys.stdin is not None: try: sys.stdin.close() @@ -285,8 +282,8 @@ class AuthenticationString(bytes): def __reduce__(self): - from .forking import Popen - if not Popen.thread_is_spawning(): + from .popen import get_spawning_popen + if get_spawning_popen() is None: raise TypeError( 'Pickling an AuthenticationString object is ' 'disallowed for security reasons' @@ -301,16 +298,19 @@ def __init__(self): self._identity = () - self._daemonic = False self._name = 'MainProcess' self._parent_pid = None self._popen = None - self._counter = itertools.count(1) - self._children = set() - self._authkey = AuthenticationString(os.urandom(32)) - self._tempdir = None + self._config = {'authkey': AuthenticationString(os.urandom(32)), + 'semprefix': 'mp'} + # Note that some versions of FreeBSD only allow named + # semaphores to have names of up to 14 characters. Therfore + # we choose a short prefix. + _current_process = _MainProcess() +_process_counter = itertools.count(1) +_children = set() del _MainProcess # diff -r 9877c25d9556 -r b3620777f54c Lib/multiprocessing/queues.py --- a/Lib/multiprocessing/queues.py Wed Aug 07 05:54:28 2013 -0700 +++ b/Lib/multiprocessing/queues.py Wed Aug 07 22:43:26 2013 +0100 @@ -18,11 +18,15 @@ import errno from queue import Empty, Full + import _multiprocessing -from multiprocessing.connection import Pipe -from multiprocessing.synchronize import Lock, BoundedSemaphore, Semaphore, Condition -from multiprocessing.util import debug, info, Finalize, register_after_fork -from multiprocessing.forking import assert_spawning, ForkingPickler + +from . import connection +from . import popen +from . import synchronize + +from .util import debug, info, Finalize, register_after_fork, is_exiting +from .reduction import ForkingPickler # # Queue type using a pipe, buffer and thread @@ -34,14 +38,14 @@ if maxsize <= 0: maxsize = _multiprocessing.SemLock.SEM_VALUE_MAX self._maxsize = maxsize - self._reader, self._writer = Pipe(duplex=False) - self._rlock = Lock() + self._reader, self._writer = connection.Pipe(duplex=False) + self._rlock = synchronize.Lock() self._opid = os.getpid() if sys.platform == 'win32': self._wlock = None else: - self._wlock = Lock() - self._sem = BoundedSemaphore(maxsize) + self._wlock = synchronize.Lock() + self._sem = synchronize.BoundedSemaphore(maxsize) # For use by concurrent.futures self._ignore_epipe = False @@ -51,7 +55,7 @@ register_after_fork(self, Queue._after_fork) def __getstate__(self): - assert_spawning(self) + popen.assert_spawning(self) return (self._ignore_epipe, self._maxsize, self._reader, self._writer, self._rlock, self._wlock, self._sem, self._opid) @@ -208,8 +212,6 @@ @staticmethod def _feed(buffer, notempty, send_bytes, writelock, close, ignore_epipe): debug('starting thread to feed data to pipe') - from .util import is_exiting - nacquire = notempty.acquire nrelease = notempty.release nwait = notempty.wait @@ -279,8 +281,8 @@ def __init__(self, maxsize=0): Queue.__init__(self, maxsize) - self._unfinished_tasks = Semaphore(0) - self._cond = Condition() + self._unfinished_tasks = synchronize.Semaphore(0) + self._cond = synchronize.Condition() def __getstate__(self): return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks) @@ -331,19 +333,19 @@ class SimpleQueue(object): def __init__(self): - self._reader, self._writer = Pipe(duplex=False) - self._rlock = Lock() + self._reader, self._writer = connection.Pipe(duplex=False) + self._rlock = synchronize.Lock() self._poll = self._reader.poll if sys.platform == 'win32': self._wlock = None else: - self._wlock = Lock() + self._wlock = synchronize.Lock() def empty(self): return not self._poll() def __getstate__(self): - assert_spawning(self) + popen.assert_spawning(self) return (self._reader, self._writer, self._rlock, self._wlock) def __setstate__(self, state): diff -r 9877c25d9556 -r b3620777f54c Lib/multiprocessing/reduction.py --- a/Lib/multiprocessing/reduction.py Wed Aug 07 05:54:28 2013 -0700 +++ b/Lib/multiprocessing/reduction.py Wed Aug 07 22:43:26 2013 +0100 @@ -1,6 +1,5 @@ # -# Module to allow connection and socket objects to be transferred -# between processes +# Module which deals with pickling of objects. # # multiprocessing/reduction.py # @@ -8,27 +7,57 @@ # Licensed to PSF under a Contributor Agreement. # -__all__ = ['reduce_socket', 'reduce_connection', 'send_handle', 'recv_handle'] +import copyreg +import functools +import io +import os +import pickle +import socket +import sys -import os -import sys -import socket -import threading -import struct -import signal +from . import popen +from . import util -from multiprocessing import current_process -from multiprocessing.util import register_after_fork, debug, sub_debug -from multiprocessing.util import is_exiting, sub_warning +__all__ = ['send_handle', 'recv_handle', 'ForkingPickler', 'register', 'dump'] +HAVE_SEND_HANDLE = (sys.platform == 'win32' or + (hasattr(socket, 'CMSG_LEN') and + hasattr(socket, 'SCM_RIGHTS') and + hasattr(socket.socket, 'sendmsg'))) + # -# +# Pickler subclass # -if not(sys.platform == 'win32' or (hasattr(socket, 'CMSG_LEN') and - hasattr(socket, 'SCM_RIGHTS'))): - raise ImportError('pickling of connections not supported') +class ForkingPickler(pickle.Pickler): + '''Pickler subclass used by multiprocessing.''' + _extra_reducers = {} + _copyreg_dispatch_table = copyreg.dispatch_table + + def __init__(self, *args): + super().__init__(*args) + self.dispatch_table = self._copyreg_dispatch_table.copy() + self.dispatch_table.update(self._extra_reducers) + + @classmethod + def register(cls, type, reduce): + '''Register a reduce function for a type.''' + cls._extra_reducers[type] = reduce + + @classmethod + def dumps(cls, obj, protocol=None): + buf = io.BytesIO() + cls(buf, protocol).dump(obj) + return buf.getbuffer() + + loads = pickle.loads + +register = ForkingPickler.register + +def dump(obj, file, protocol=None): + '''Replacement for pickle.dump() using ForkingPickler.''' + ForkingPickler(file, protocol).dump(obj) # # Platform specific definitions @@ -36,20 +65,44 @@ if sys.platform == 'win32': # Windows - __all__ += ['reduce_pipe_connection'] + __all__ += ['DupHandle', 'duplicate', 'steal_handle'] import _winapi + def duplicate(handle, target_process=None, inheritable=False): + '''Duplicate a handle. (target_process is a handle not a pid!)''' + if target_process is None: + target_process = _winapi.GetCurrentProcess() + return _winapi.DuplicateHandle( + _winapi.GetCurrentProcess(), handle, target_process, + 0, inheritable, _winapi.DUPLICATE_SAME_ACCESS) + + def steal_handle(source_pid, handle): + '''Steal a handle from process identified by source_pid.''' + source_process_handle = _winapi.OpenProcess( + _winapi.PROCESS_DUP_HANDLE, False, source_pid) + try: + return _winapi.DuplicateHandle( + source_process_handle, handle, + _winapi.GetCurrentProcess(), 0, False, + _winapi.DUPLICATE_SAME_ACCESS | _winapi.DUPLICATE_CLOSE_SOURCE) + finally: + _winapi.CloseHandle(source_process_handle) + def send_handle(conn, handle, destination_pid): + '''Send a handle over a local connection.''' dh = DupHandle(handle, _winapi.DUPLICATE_SAME_ACCESS, destination_pid) conn.send(dh) def recv_handle(conn): + '''Receive a handle over a local connection.''' return conn.recv().detach() class DupHandle(object): + '''Picklable wrapper for a handle.''' def __init__(self, handle, access, pid=None): - # duplicate handle for process with given pid if pid is None: + # We just duplicate the handle in the current process and + # let the receiving process steal the handle. pid = os.getpid() proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, pid) try: @@ -62,9 +115,12 @@ self._pid = pid def detach(self): + '''Get the handle. This should only be called once.''' # retrieve handle from process which currently owns it if self._pid == os.getpid(): + # The handle has already been duplicated for this process. return self._handle + # We must steal the handle from the process whose pid is self._pid. proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, self._pid) try: @@ -74,207 +130,112 @@ finally: _winapi.CloseHandle(proc) - class DupSocket(object): - def __init__(self, sock): - new_sock = sock.dup() - def send(conn, pid): - share = new_sock.share(pid) - conn.send_bytes(share) - self._id = resource_sharer.register(send, new_sock.close) - - def detach(self): - conn = resource_sharer.get_connection(self._id) - try: - share = conn.recv_bytes() - return socket.fromshare(share) - finally: - conn.close() - - def reduce_socket(s): - return rebuild_socket, (DupSocket(s),) - - def rebuild_socket(ds): - return ds.detach() - - def reduce_connection(conn): - handle = conn.fileno() - with socket.fromfd(handle, socket.AF_INET, socket.SOCK_STREAM) as s: - ds = DupSocket(s) - return rebuild_connection, (ds, conn.readable, conn.writable) - - def rebuild_connection(ds, readable, writable): - from .connection import Connection - sock = ds.detach() - return Connection(sock.detach(), readable, writable) - - def reduce_pipe_connection(conn): - access = ((_winapi.FILE_GENERIC_READ if conn.readable else 0) | - (_winapi.FILE_GENERIC_WRITE if conn.writable else 0)) - dh = DupHandle(conn.fileno(), access) - return rebuild_pipe_connection, (dh, conn.readable, conn.writable) - - def rebuild_pipe_connection(dh, readable, writable): - from .connection import PipeConnection - handle = dh.detach() - return PipeConnection(handle, readable, writable) - else: # Unix + __all__ += ['DupFd', 'sendfds', 'recvfds'] + import array # On MacOSX we should acknowledge receipt of fds -- see Issue14669 ACKNOWLEDGE = sys.platform == 'darwin' - def send_handle(conn, handle, destination_pid): - with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s: - s.sendmsg([b'x'], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, - struct.pack("@i", handle))]) - if ACKNOWLEDGE and conn.recv_bytes() != b'ACK': + def sendfds(sock, fds): + '''Send an array of fds over an AF_UNIX socket.''' + fds = array.array('i', fds) + msg = bytes([len(fds) % 256]) + sock.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fds)]) + if ACKNOWLEDGE and sock.recv(1) != b'A': raise RuntimeError('did not receive acknowledgement of fd') + def recvfds(sock, size): + '''Receive an array of fds over an AF_UNIX socket.''' + a = array.array('i') + bytes_size = a.itemsize * size + msg, ancdata, flags, addr = sock.recvmsg(1, socket.CMSG_LEN(bytes_size)) + if not msg and not ancdata: + raise EOFError + try: + if ACKNOWLEDGE: + sock.send(b'A') + if len(ancdata) != 1: + raise RuntimeError('received %d items of ancdata' % + len(ancdata)) + cmsg_level, cmsg_type, cmsg_data = ancdata[0] + if (cmsg_level == socket.SOL_SOCKET and + cmsg_type == socket.SCM_RIGHTS): + if len(cmsg_data) % a.itemsize != 0: + raise ValueError + a.frombytes(cmsg_data) + assert len(a) % 256 == msg[0] + return list(a) + except (ValueError, IndexError): + pass + raise RuntimeError('Invalid data received') + + def send_handle(conn, handle, destination_pid): + '''Send a handle over a local connection.''' + with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s: + sendfds(s, [handle]) + def recv_handle(conn): - size = struct.calcsize("@i") + '''Receive a handle over a local connection.''' with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s: - msg, ancdata, flags, addr = s.recvmsg(1, socket.CMSG_LEN(size)) - try: - if ACKNOWLEDGE: - conn.send_bytes(b'ACK') - cmsg_level, cmsg_type, cmsg_data = ancdata[0] - if (cmsg_level == socket.SOL_SOCKET and - cmsg_type == socket.SCM_RIGHTS): - return struct.unpack("@i", cmsg_data[:size])[0] - except (ValueError, IndexError, struct.error): - pass - raise RuntimeError('Invalid data received') + return recvfds(s, 1)[0] - class DupFd(object): - def __init__(self, fd): - new_fd = os.dup(fd) - def send(conn, pid): - send_handle(conn, new_fd, pid) - def close(): - os.close(new_fd) - self._id = resource_sharer.register(send, close) - - def detach(self): - conn = resource_sharer.get_connection(self._id) - try: - return recv_handle(conn) - finally: - conn.close() - - def reduce_socket(s): - df = DupFd(s.fileno()) - return rebuild_socket, (df, s.family, s.type, s.proto) - - def rebuild_socket(df, family, type, proto): - fd = df.detach() - s = socket.fromfd(fd, family, type, proto) - os.close(fd) - return s - - def reduce_connection(conn): - df = DupFd(conn.fileno()) - return rebuild_connection, (df, conn.readable, conn.writable) - - def rebuild_connection(df, readable, writable): - from .connection import Connection - fd = df.detach() - return Connection(fd, readable, writable) + def DupFd(fd): + '''Return a wrapper for an fd.''' + popen_obj = popen.get_spawning_popen() + if popen_obj is not None: + return popen_obj.DupFd(popen_obj.duplicate_for_child(fd)) + elif HAVE_SEND_HANDLE: + from . import resource_sharer + return resource_sharer.DupFd(fd) + else: + raise ValueError('SCM_RIGHTS appears not to be available') # -# Server which shares registered resources with clients +# Try making some callable types picklable # -class ResourceSharer(object): - def __init__(self): - self._key = 0 - self._cache = {} - self._old_locks = [] - self._lock = threading.Lock() - self._listener = None - self._address = None - self._thread = None - register_after_fork(self, ResourceSharer._afterfork) +def _reduce_method(m): + if m.__self__ is None: + return getattr, (m.__class__, m.__func__.__name__) + else: + return getattr, (m.__self__, m.__func__.__name__) +class _C: + def f(self): + pass +register(type(_C().f), _reduce_method) - def register(self, send, close): - with self._lock: - if self._address is None: - self._start() - self._key += 1 - self._cache[self._key] = (send, close) - return (self._address, self._key) - @staticmethod - def get_connection(ident): - from .connection import Client - address, key = ident - c = Client(address, authkey=current_process().authkey) - c.send((key, os.getpid())) - return c +def _reduce_method_descriptor(m): + return getattr, (m.__objclass__, m.__name__) +register(type(list.append), _reduce_method_descriptor) +register(type(int.__add__), _reduce_method_descriptor) - def stop(self, timeout=None): - from .connection import Client - with self._lock: - if self._address is not None: - c = Client(self._address, authkey=current_process().authkey) - c.send(None) - c.close() - self._thread.join(timeout) - if self._thread.is_alive(): - sub_warn('ResourceSharer thread did not stop when asked') - self._listener.close() - self._thread = None - self._address = None - self._listener = None - for key, (send, close) in self._cache.items(): - close() - self._cache.clear() - def _afterfork(self): - for key, (send, close) in self._cache.items(): - close() - self._cache.clear() - # If self._lock was locked at the time of the fork, it may be broken - # -- see issue 6721. Replace it without letting it be gc'ed. - self._old_locks.append(self._lock) - self._lock = threading.Lock() - if self._listener is not None: - self._listener.close() - self._listener = None - self._address = None - self._thread = None +def _reduce_partial(p): + return _rebuild_partial, (p.func, p.args, p.keywords or {}) +def _rebuild_partial(func, args, keywords): + return functools.partial(func, *args, **keywords) +register(functools.partial, _reduce_partial) - def _start(self): - from .connection import Listener - assert self._listener is None - debug('starting listener and thread for sending handles') - self._listener = Listener(authkey=current_process().authkey) - self._address = self._listener.address - t = threading.Thread(target=self._serve) - t.daemon = True - t.start() - self._thread = t +# +# Make sockets picklable +# - def _serve(self): - if hasattr(signal, 'pthread_sigmask'): - signal.pthread_sigmask(signal.SIG_BLOCK, range(1, signal.NSIG)) - while 1: - try: - conn = self._listener.accept() - msg = conn.recv() - if msg is None: - break - key, destination_pid = msg - send, close = self._cache.pop(key) - send(conn, destination_pid) - close() - conn.close() - except: - if not is_exiting(): - import traceback - sub_warning( - 'thread for sharing handles raised exception :\n' + - '-'*79 + '\n' + traceback.format_exc() + '-'*79 - ) +if sys.platform == 'win32': + def _reduce_socket(s): + from .resource_sharer import DupSocket + return _rebuild_socket, (DupSocket(s),) + def _rebuild_socket(ds): + return ds.detach() + register(socket.socket, _reduce_socket) -resource_sharer = ResourceSharer() +else: + def _reduce_socket(s): + df = DupFd(s.fileno()) + return _rebuild_socket, (df, s.family, s.type, s.proto) + def _rebuild_socket(df, family, type, proto): + fd = df.detach() + return socket.socket(family, type, proto, fileno=fd) + register(socket.socket, _reduce_socket) diff -r 9877c25d9556 -r b3620777f54c Lib/multiprocessing/resource_sharer.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/Lib/multiprocessing/resource_sharer.py Wed Aug 07 22:43:26 2013 +0100 @@ -0,0 +1,161 @@ +# +# We use a background thread for sharing fds on Unix, and for sharing sockets on +# Windows. +# +# A client which wants to pickle a resource registers it with the resource +# sharer and gets an identifier in return. The unpickling process will connect +# to the resource sharer, sends the identifier and its pid, and then receives +# the resource. +# + +import os +import signal +import socket +import sys +import threading + +from . import process +from . import reduction +from . import util + +__all__ = ['stop'] + + +if sys.platform == 'win32': + __all__ += ['DupSocket'] + + class DupSocket(object): + '''Picklable wrapper for a socket.''' + def __init__(self, sock): + new_sock = sock.dup() + def send(conn, pid): + share = new_sock.share(pid) + conn.send_bytes(share) + self._id = _resource_sharer.register(send, new_sock.close) + + def detach(self): + '''Get the socket. This should only be called once.''' + with _resource_sharer.get_connection(self._id) as conn: + share = conn.recv_bytes() + return socket.fromshare(share) + +else: + __all__ += ['DupFd'] + + class DupFd(object): + '''Wrapper for fd which can be used at any time.''' + def __init__(self, fd): + new_fd = os.dup(fd) + def send(conn, pid): + reduction.send_handle(conn, new_fd, pid) + def close(): + os.close(new_fd) + self._id = _resource_sharer.register(send, close) + + def detach(self): + '''Get the fd. This should only be called once.''' + with _resource_sharer.get_connection(self._id) as conn: + return reduction.recv_handle(conn) + + +class _ResourceSharer(object): + '''Manager for resouces using background thread.''' + def __init__(self): + self._key = 0 + self._cache = {} + self._old_locks = [] + self._lock = threading.Lock() + self._listener = None + self._address = None + self._thread = None + util.register_after_fork(self, _ResourceSharer._afterfork) + + def register(self, send, close): + '''Register resource, returning an identifier.''' + with self._lock: + if self._address is None: + self._start() + self._key += 1 + self._cache[self._key] = (send, close) + return (self._address, self._key) + + @staticmethod + def get_connection(ident): + '''Return connection from which to receive identified resource.''' + from .connection import Client + address, key = ident + c = Client(address, authkey=process.current_process().authkey) + c.send((key, os.getpid())) + return c + + def stop(self, timeout=None): + '''Stop the background thread and clear registered resources.''' + from .connection import Client + with self._lock: + if self._address is not None: + c = Client(self._address, + authkey=process.current_process().authkey) + c.send(None) + c.close() + self._thread.join(timeout) + if self._thread.is_alive(): + util.sub_warning('_ResourceSharer thread did ' + 'not stop when asked') + self._listener.close() + self._thread = None + self._address = None + self._listener = None + for key, (send, close) in self._cache.items(): + close() + self._cache.clear() + + def _afterfork(self): + for key, (send, close) in self._cache.items(): + close() + self._cache.clear() + # If self._lock was locked at the time of the fork, it may be broken + # -- see issue 6721. Replace it without letting it be gc'ed. + self._old_locks.append(self._lock) + self._lock = threading.Lock() + if self._listener is not None: + self._listener.close() + self._listener = None + self._address = None + self._thread = None + + def _start(self): + from .connection import Listener + assert self._listener is None + util.debug('starting listener and thread for sending handles') + self._listener = Listener(authkey=process.current_process().authkey) + self._address = self._listener.address + t = threading.Thread(target=self._serve) + t.daemon = True + t.start() + self._thread = t + + def _serve(self): + if hasattr(signal, 'pthread_sigmask'): + signal.pthread_sigmask(signal.SIG_BLOCK, range(1, signal.NSIG)) + while 1: + try: + conn = self._listener.accept() + msg = conn.recv() + if msg is None: + break + key, destination_pid = msg + send, close = self._cache.pop(key) + send(conn, destination_pid) + close() + conn.close() + except: + if not util.is_exiting(): + import traceback + util.sub_warning( + 'thread for sharing handles raised exception :\n' + + '-'*79 + '\n' + traceback.format_exc() + '-'*79 + ) + + +_resource_sharer = _ResourceSharer() +stop = _resource_sharer.stop diff -r 9877c25d9556 -r b3620777f54c Lib/multiprocessing/semaphore_tracker.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/Lib/multiprocessing/semaphore_tracker.py Wed Aug 07 22:43:26 2013 +0100 @@ -0,0 +1,119 @@ +# +# On Unix we run a server process which keeps track of unlinked +# semaphores. The server ignores SIGINT and SIGTERM and reads from a +# pipe. Every other process of the program has a copy of the writable +# end of the pipe, so we get EOF when all other processes have exited. +# Then the server process unlinks any remaining semaphore names. +# +# This is important because the system only supports a limited number +# of named semaphores, and they will not be automatically removed till +# the next reboot. Without this semaphore tracker process, "killall +# python" would probably leave unlinked semaphores. +# + +import errno +import os +import signal +import sys +import threading +import _multiprocessing + +from . import util +from . import current_process + +__all__ = ['ensure_running', 'register', 'unregister'] + + +_lock = threading.Lock() + + +def ensure_running(): + '''Make sure that semaphore tracker process is running.''' + with _lock: + config = current_process()._config + if config.get('semaphore_tracker_fd') is not None: + return + cmd = 'from multiprocessing.semaphore_tracker import main; main(%d)' + r, semaphore_tracker_fd = os.pipe() + try: + # process will out live us, so no need to wait on pid + args = [sys.executable, '-c', cmd % r] + os.spawnv(os.P_NOWAIT, sys.executable, args) + except: + os.close(semaphore_tracker_fd) + raise + else: + config['semaphore_tracker_fd'] = semaphore_tracker_fd + finally: + os.close(r) + + +def register(name): + '''Register name of semaphore with semaphore tracker.''' + _send('REGISTER', name) + + +def unregister(name): + '''Unregister name of semaphore with semaphore tracker.''' + _send('UNREGISTER', name) + + +def _send(cmd, name): + msg = '{0}:{1}\n'.format(cmd, name).encode('ascii') + if len(name) > 512: + # posix guarantees that writes to a pipe of less than PIPE_BUF + # bytes are atomic, and that PIPE_BUF >= 512 + raise ValueError('name too long') + fd = current_process()._config['semaphore_tracker_fd'] + nbytes = os.write(fd, msg) + assert nbytes == len(msg) + + +def main(fd): + '''Track unlink semaphores.''' + # protect the process from ^C and "killall python" etc + signal.signal(signal.SIGINT, signal.SIG_IGN) + signal.signal(signal.SIGTERM, signal.SIG_IGN) + + # close all fds except r and stderr + fds_to_keep = [fd] + try: + fds_to_keep.append(sys.stderr.fileno()) + except Exception: + pass + util.close_all_fds_except(fds_to_keep) + + cache = set() + try: + # keep track of registered/unregistered semaphores + with open(fd, 'rb') as f: + for line in f: + try: + cmd, name = line.strip().split(b':') + if cmd == b'REGISTER': + cache.add(name) + elif cmd == b'UNREGISTER': + cache.remove(name) + else: + raise RuntimeError('unrecognized command %r' % cmd) + except Exception: + try: + sys.excepthook(*sys.exc_info()) + except: + pass + finally: + # all processes have terminated; cleanup any remaining semaphores + for name in cache: + try: + name = name.decode('ascii') + try: + _multiprocessing.sem_unlink(name) + except Exception as e: + print('[semaphore_tracker] %r: %s' % (name, e), + file=sys.stderr) + else: + print('[semaphore_tracker] %r: successfully unlinked' % + name, + file=sys.stderr) + finally: + pass diff -r 9877c25d9556 -r b3620777f54c Lib/multiprocessing/sharedctypes.py --- a/Lib/multiprocessing/sharedctypes.py Wed Aug 07 05:54:28 2013 -0700 +++ b/Lib/multiprocessing/sharedctypes.py Wed Aug 07 22:43:26 2013 +0100 @@ -10,8 +10,11 @@ import ctypes import weakref -from multiprocessing import heap, RLock -from multiprocessing.forking import assert_spawning, ForkingPickler +from . import heap + +from .synchronize import RLock +from .reduction import ForkingPickler +from .popen import assert_spawning __all__ = ['RawValue', 'RawArray', 'Value', 'Array', 'copy', 'synchronized'] diff -r 9877c25d9556 -r b3620777f54c Lib/multiprocessing/spawn.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/Lib/multiprocessing/spawn.py Wed Aug 07 22:43:26 2013 +0100 @@ -0,0 +1,242 @@ +# +# Code used to start processes when using the spawn or forkserver +# start methods. +# +# multiprocessing/spawn.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +import os +import pickle +import sys + +from . import process +from . import util +from . import popen + +__all__ = ['_main', 'freeze_support', 'set_executable', 'get_executable', + 'get_preparation_data', 'get_command_line'] + +# +# _python_exe is the assumed path to the python executable. +# People embedding Python want to modify it. +# + +if sys.platform != 'win32': + WINEXE = False + WINSERVICE = False +else: + WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False)) + WINSERVICE = sys.executable.lower().endswith("pythonservice.exe") + +if WINSERVICE: + _python_exe = os.path.join(sys.exec_prefix, 'python.exe') +else: + _python_exe = sys.executable + +def set_executable(exe): + global _python_exe + _python_exe = exe + +def get_executable(): + return _python_exe + +# +# +# + +def is_forking(argv): + ''' + Return whether commandline indicates we are forking + ''' + if len(argv) >= 2 and argv[1] == '--multiprocessing-fork': + return True + else: + return False + + +def freeze_support(): + ''' + Run code for process object if this in not the main process + ''' + if is_forking(sys.argv): + main() + sys.exit() + + +def get_command_line(): + ''' + Returns prefix of command line used for spawning a child process + ''' + if getattr(process.current_process(), '_inheriting', False): + raise RuntimeError(''' + An attempt has been made to start a new process before the + current process has finished its bootstrapping phase. + + This probably means that you are not using fork to start your + child processes and you have forgotten to use the proper idiom + in the main module: + + if __name__ == '__main__': + freeze_support() + ... + + The "freeze_support()" line can be omitted if the program + is not going to be frozen to produce an executable.''') + + if getattr(sys, 'frozen', False): + return [sys.executable, '--multiprocessing-fork'] + else: + prog = 'from multiprocessing.spawn import spawn_main; spawn_main()' + opts = util._args_from_interpreter_flags() + return [_python_exe] + opts + ['-c', prog, '--multiprocessing-fork'] + + +def spawn_main(): + ''' + Run code specifed by data received over pipe + ''' + assert is_forking(sys.argv) + handle = int(sys.argv[-1]) + if sys.platform == 'win32': + import msvcrt + from .reduction import steal_handle + pid = int(sys.argv[-2]) + new_handle = steal_handle(pid, handle) + fd = msvcrt.open_osfhandle(new_handle, os.O_RDONLY) + else: + fd = handle + exitcode = _main(fd) + sys.exit(exitcode) + + +def _main(fd, method=None): + with os.fdopen(fd, 'rb', closefd=True) as from_parent: + process.current_process()._inheriting = True + try: + preparation_data = pickle.load(from_parent) + prepare(preparation_data) + self = pickle.load(from_parent) + finally: + del process.current_process()._inheriting + return self._bootstrap() + + +def get_preparation_data(name, restricted=False): + ''' + Return info about parent needed by child to unpickle process object + ''' + d = dict( + log_to_stderr=util._log_to_stderr, + authkey=process.current_process().authkey, + ) + + if util._logger is not None: + d['log_level'] = util._logger.getEffectiveLevel() + + if restricted: + return d + + d.update( + name=name, + sys_path=sys.path, + sys_argv=sys.argv, + orig_dir=process.ORIGINAL_DIR, + start_method=popen.get_start_method(), + ) + + if sys.platform != 'win32' or (not WINEXE and not WINSERVICE): + main_path = getattr(sys.modules['__main__'], '__file__', None) + if not main_path and sys.argv[0] not in ('', '-c'): + main_path = sys.argv[0] + if main_path is not None: + if (not os.path.isabs(main_path) and + process.ORIGINAL_DIR is not None): + main_path = os.path.join(process.ORIGINAL_DIR, main_path) + d['main_path'] = os.path.normpath(main_path) + + return d + +# +# Prepare current process +# + +old_main_modules = [] + +def prepare(data): + ''' + Try to get current process ready to unpickle process object + ''' + old_main_modules.append(sys.modules['__main__']) + + if 'name' in data: + process.current_process().name = data['name'] + + if 'authkey' in data: + process.current_process().authkey = data['authkey'] + + if 'log_to_stderr' in data and data['log_to_stderr']: + util.log_to_stderr() + + if 'log_level' in data: + util.get_logger().setLevel(data['log_level']) + + if 'sys_path' in data: + sys.path = data['sys_path'] + + if 'sys_argv' in data: + sys.argv = data['sys_argv'] + + if 'dir' in data: + os.chdir(data['dir']) + + if 'orig_dir' in data: + process.ORIGINAL_DIR = data['orig_dir'] + + if 'start_method' in data: + popen.set_start_method(data['start_method'], + start_helpers=False) + + if 'main_path' in data: + # XXX (ncoghlan): The following code makes several bogus + # assumptions regarding the relationship between __file__ + # and a module's real name. See PEP 302 and issue #10845 + main_path = data['main_path'] + main_name = os.path.splitext(os.path.basename(main_path))[0] + if main_name == '__init__': + main_name = os.path.basename(os.path.dirname(main_path)) + + if main_name == '__main__': + main_module = sys.modules['__main__'] + main_module.__file__ = main_path + elif main_name != 'ipython': + # Main modules not actually called __main__.py may + # contain additional code that should still be executed + import importlib + import types + + if main_path is None: + dirs = None + elif os.path.basename(main_path).startswith('__init__.py'): + dirs = [os.path.dirname(os.path.dirname(main_path))] + else: + dirs = [os.path.dirname(main_path)] + + assert main_name not in sys.modules, main_name + sys.modules.pop('__mp_main__', None) + # We should not try to load __main__ + # since that would execute 'if __name__ == "__main__"' + # clauses, potentially causing a psuedo fork bomb. + loader = importlib.find_loader(main_name, path=dirs) + main_module = types.ModuleType(main_name) + try: + loader.init_module_attrs(main_module) + except AttributeError: # init_module_attrs is optional + pass + main_module.__name__ = '__mp_main__' + code = loader.get_code(main_name) + exec(code, main_module.__dict__) + + sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module diff -r 9877c25d9556 -r b3620777f54c Lib/multiprocessing/synchronize.py --- a/Lib/multiprocessing/synchronize.py Wed Aug 07 05:54:28 2013 -0700 +++ b/Lib/multiprocessing/synchronize.py Wed Aug 07 22:43:26 2013 +0100 @@ -11,20 +11,23 @@ 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event' ] +import os import threading import sys +import itertools +import _multiprocessing -import _multiprocessing -from multiprocessing.process import current_process -from multiprocessing.util import register_after_fork, debug -from multiprocessing.forking import assert_spawning, Popen from time import time as _time +from . import popen +from . import process +from . import util + # Try to import the mp.synchronize module cleanly, if it fails # raise ImportError for platforms lacking a working sem_open implementation. # See issue 3770 try: - from _multiprocessing import SemLock + from _multiprocessing import SemLock, sem_unlink except (ImportError): raise ImportError("This platform lacks a functioning sem_open" + " implementation, therefore, the required" + @@ -44,15 +47,35 @@ class SemLock(object): + _counter = itertools.count() + def __init__(self, kind, value, maxvalue): - sl = self._semlock = _multiprocessing.SemLock(kind, value, maxvalue) - debug('created semlock with handle %s' % sl.handle) + unlink_immediately = (sys.platform == 'win32' or + popen.get_start_method() == 'fork') + sl = self._semlock = _multiprocessing.SemLock( + kind, value, maxvalue, self._make_name(), unlink_immediately) + util.debug('created semlock with handle %s' % sl.handle) self._make_methods() if sys.platform != 'win32': def _after_fork(obj): obj._semlock._after_fork() - register_after_fork(self, _after_fork) + util.register_after_fork(self, _after_fork) + + if self._semlock.name is not None: + # We only get here if we are on Unix with forking + # disabled. When the object is garbage collected or the + # process shuts down we unlink the semaphore name + from .semaphore_tracker import register + register(self._semlock.name) + util.Finalize(self, SemLock._cleanup, (self._semlock.name,), + exitpriority=0) + + @staticmethod + def _cleanup(name): + from .semaphore_tracker import unregister + sem_unlink(name) + unregister(name) def _make_methods(self): self.acquire = self._semlock.acquire @@ -65,15 +88,24 @@ return self._semlock.__exit__(*args) def __getstate__(self): - assert_spawning(self) + popen.assert_spawning(self) sl = self._semlock - return (Popen.duplicate_for_child(sl.handle), sl.kind, sl.maxvalue) + if sys.platform == 'win32': + h = popen.get_spawning_popen().duplicate_for_child(sl.handle) + else: + h = sl.handle + return (h, sl.kind, sl.maxvalue, sl.name) def __setstate__(self, state): self._semlock = _multiprocessing.SemLock._rebuild(*state) - debug('recreated blocker with handle %r' % state[0]) + util.debug('recreated blocker with handle %r' % state[0]) self._make_methods() + @staticmethod + def _make_name(): + return '/%s%s-%s' % (process.current_process()._config['semprefix'], + os.getpid(), next(SemLock._counter)) + # # Semaphore # @@ -122,7 +154,7 @@ def __repr__(self): try: if self._semlock._is_mine(): - name = current_process().name + name = process.current_process().name if threading.current_thread().name != 'MainThread': name += '|' + threading.current_thread().name elif self._semlock._get_value() == 1: @@ -147,7 +179,7 @@ def __repr__(self): try: if self._semlock._is_mine(): - name = current_process().name + name = process.current_process().name if threading.current_thread().name != 'MainThread': name += '|' + threading.current_thread().name count = self._semlock._count() @@ -175,7 +207,7 @@ self._make_methods() def __getstate__(self): - assert_spawning(self) + popen.assert_spawning(self) return (self._lock, self._sleeping_count, self._woken_count, self._wait_semaphore) @@ -342,7 +374,7 @@ def __init__(self, parties, action=None, timeout=None): import struct - from multiprocessing.heap import BufferWrapper + from .heap import BufferWrapper wrapper = BufferWrapper(struct.calcsize('i') * 2) cond = Condition() self.__setstate__((parties, action, timeout, cond, wrapper)) diff -r 9877c25d9556 -r b3620777f54c Lib/multiprocessing/util.py --- a/Lib/multiprocessing/util.py Wed Aug 07 05:54:28 2013 -0700 +++ b/Lib/multiprocessing/util.py Wed Aug 07 22:43:26 2013 +0100 @@ -17,13 +17,13 @@ # cleanup function before multiprocessing does from subprocess import _args_from_interpreter_flags -from multiprocessing.process import current_process, active_children +from . import process __all__ = [ 'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger', 'log_to_stderr', 'get_temp_dir', 'register_after_fork', 'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal', - 'SUBDEBUG', 'SUBWARNING', + 'close_all_fds_except', 'SUBDEBUG', 'SUBWARNING', ] # @@ -111,13 +111,14 @@ def get_temp_dir(): # get name of a temp directory which will be automatically cleaned up - if current_process()._tempdir is None: + tempdir = process.current_process()._config.get('tempdir') + if tempdir is None: import shutil, tempfile tempdir = tempfile.mkdtemp(prefix='pymp-') info('created temp directory %s', tempdir) Finalize(None, shutil.rmtree, args=[tempdir], exitpriority=-100) - current_process()._tempdir = tempdir - return current_process()._tempdir + process.current_process()._config['tempdir'] = tempdir + return tempdir # # Support for reinitialization of objects when bootstrapping a child process @@ -273,8 +274,8 @@ _exiting = False def _exit_function(info=info, debug=debug, _run_finalizers=_run_finalizers, - active_children=active_children, - current_process=current_process): + active_children=process.active_children, + current_process=process.current_process): # We hold on to references to functions in the arglist due to the # situation described below, where this function is called after this # module's globals are destroyed. @@ -303,7 +304,7 @@ # #9207. for p in active_children(): - if p._daemonic: + if p.daemon: info('calling terminate() for daemon %s', p.name) p._popen.terminate() @@ -335,3 +336,19 @@ register_after_fork(self, lambda obj : obj.__dict__.clear()) def __reduce__(self): return type(self), () + +# +# Close fds except those specified +# + +try: + MAXFD = os.sysconf("SC_OPEN_MAX") +except Exception: + MAXFD = 256 + +def close_all_fds_except(fds): + fds = list(fds) + [-1, MAXFD] + fds.sort() + assert fds[-1] == MAXFD, 'fd too large' + for i in range(len(fds) - 1): + os.closerange(fds[i]+1, fds[i+1]) diff -r 9877c25d9556 -r b3620777f54c Lib/test/_test_multiprocessing.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/Lib/test/_test_multiprocessing.py Wed Aug 07 22:43:26 2013 +0100 @@ -0,0 +1,3742 @@ +#!/usr/bin/env python3 + +# +# Unit tests for the multiprocessing package +# + +import unittest +import queue as pyqueue +import time +import io +import itertools +import sys +import os +import gc +import errno +import signal +import array +import socket +import random +import logging +import struct +import operator +import test.support +import test.script_helper + + +# Skip tests if _multiprocessing wasn't built. +_multiprocessing = test.support.import_module('_multiprocessing') +# Skip tests if sem_open implementation is broken. +test.support.import_module('multiprocessing.synchronize') +# import threading after _multiprocessing to raise a more revelant error +# message: "No module named _multiprocessing". _multiprocessing is not compiled +# without thread support. +import threading + +import multiprocessing.dummy +import multiprocessing.connection +import multiprocessing.managers +import multiprocessing.heap +import multiprocessing.pool + +from multiprocessing import util + +try: + from multiprocessing import reduction + HAS_REDUCTION = reduction.HAVE_SEND_HANDLE +except ImportError: + HAS_REDUCTION = False + +try: + from multiprocessing.sharedctypes import Value, copy + HAS_SHAREDCTYPES = True +except ImportError: + HAS_SHAREDCTYPES = False + +try: + import msvcrt +except ImportError: + msvcrt = None + +# +# +# + +def latin(s): + return s.encode('latin') + +# +# Constants +# + +LOG_LEVEL = util.SUBWARNING +#LOG_LEVEL = logging.DEBUG + +DELTA = 0.1 +CHECK_TIMINGS = False # making true makes tests take a lot longer + # and can sometimes cause some non-serious + # failures because some calls block a bit + # longer than expected +if CHECK_TIMINGS: + TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4 +else: + TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1 + +HAVE_GETVALUE = not getattr(_multiprocessing, + 'HAVE_BROKEN_SEM_GETVALUE', False) + +WIN32 = (sys.platform == "win32") + +from multiprocessing.connection import wait + +def wait_for_handle(handle, timeout): + if timeout is not None and timeout < 0.0: + timeout = None + return wait([handle], timeout) + +try: + MAXFD = os.sysconf("SC_OPEN_MAX") +except: + MAXFD = 256 + +# +# Some tests require ctypes +# + +try: + from ctypes import Structure, c_int, c_double +except ImportError: + Structure = object + c_int = c_double = None + + +def check_enough_semaphores(): + """Check that the system supports enough semaphores to run the test.""" + # minimum number of semaphores available according to POSIX + nsems_min = 256 + try: + nsems = os.sysconf("SC_SEM_NSEMS_MAX") + except (AttributeError, ValueError): + # sysconf not available or setting not available + return + if nsems == -1 or nsems >= nsems_min: + return + raise unittest.SkipTest("The OS doesn't support enough semaphores " + "to run the test (required: %d)." % nsems_min) + + +# +# Creates a wrapper for a function which records the time it takes to finish +# + +class TimingWrapper(object): + + def __init__(self, func): + self.func = func + self.elapsed = None + + def __call__(self, *args, **kwds): + t = time.time() + try: + return self.func(*args, **kwds) + finally: + self.elapsed = time.time() - t + +# +# Base class for test cases +# + +class BaseTestCase(object): + + ALLOWED_TYPES = ('processes', 'manager', 'threads') + + def assertTimingAlmostEqual(self, a, b): + if CHECK_TIMINGS: + self.assertAlmostEqual(a, b, 1) + + def assertReturnsIfImplemented(self, value, func, *args): + try: + res = func(*args) + except NotImplementedError: + pass + else: + return self.assertEqual(value, res) + + # For the sanity of Windows users, rather than crashing or freezing in + # multiple ways. + def __reduce__(self, *args): + raise NotImplementedError("shouldn't try to pickle a test case") + + __reduce_ex__ = __reduce__ + +# +# Return the value of a semaphore +# + +def get_value(self): + try: + return self.get_value() + except AttributeError: + try: + return self._Semaphore__value + except AttributeError: + try: + return self._value + except AttributeError: + raise NotImplementedError + +# +# Testcases +# + +class _TestProcess(BaseTestCase): + + ALLOWED_TYPES = ('processes', 'threads') + + def test_current(self): + if self.TYPE == 'threads': + return + + current = self.current_process() + authkey = current.authkey + + self.assertTrue(current.is_alive()) + self.assertTrue(not current.daemon) + self.assertIsInstance(authkey, bytes) + self.assertTrue(len(authkey) > 0) + self.assertEqual(current.ident, os.getpid()) + self.assertEqual(current.exitcode, None) + + def test_daemon_argument(self): + if self.TYPE == "threads": + return + + # By default uses the current process's daemon flag. + proc0 = self.Process(target=self._test) + self.assertEqual(proc0.daemon, self.current_process().daemon) + proc1 = self.Process(target=self._test, daemon=True) + self.assertTrue(proc1.daemon) + proc2 = self.Process(target=self._test, daemon=False) + self.assertFalse(proc2.daemon) + + @classmethod + def _test(cls, q, *args, **kwds): + current = cls.current_process() + q.put(args) + q.put(kwds) + q.put(current.name) + if cls.TYPE != 'threads': + q.put(bytes(current.authkey)) + q.put(current.pid) + + def test_process(self): + q = self.Queue(1) + e = self.Event() + args = (q, 1, 2) + kwargs = {'hello':23, 'bye':2.54} + name = 'SomeProcess' + p = self.Process( + target=self._test, args=args, kwargs=kwargs, name=name + ) + p.daemon = True + current = self.current_process() + + if self.TYPE != 'threads': + self.assertEqual(p.authkey, current.authkey) + self.assertEqual(p.is_alive(), False) + self.assertEqual(p.daemon, True) + self.assertNotIn(p, self.active_children()) + self.assertTrue(type(self.active_children()) is list) + self.assertEqual(p.exitcode, None) + + p.start() + + self.assertEqual(p.exitcode, None) + self.assertEqual(p.is_alive(), True) + self.assertIn(p, self.active_children()) + + self.assertEqual(q.get(), args[1:]) + self.assertEqual(q.get(), kwargs) + self.assertEqual(q.get(), p.name) + if self.TYPE != 'threads': + self.assertEqual(q.get(), current.authkey) + self.assertEqual(q.get(), p.pid) + + p.join() + + self.assertEqual(p.exitcode, 0) + self.assertEqual(p.is_alive(), False) + self.assertNotIn(p, self.active_children()) + + @classmethod + def _test_terminate(cls): + time.sleep(1000) + + def test_terminate(self): + if self.TYPE == 'threads': + return + + p = self.Process(target=self._test_terminate) + p.daemon = True + p.start() + + self.assertEqual(p.is_alive(), True) + self.assertIn(p, self.active_children()) + self.assertEqual(p.exitcode, None) + + join = TimingWrapper(p.join) + + self.assertEqual(join(0), None) + self.assertTimingAlmostEqual(join.elapsed, 0.0) + self.assertEqual(p.is_alive(), True) + + self.assertEqual(join(-1), None) + self.assertTimingAlmostEqual(join.elapsed, 0.0) + self.assertEqual(p.is_alive(), True) + + p.terminate() + + self.assertEqual(join(), None) + self.assertTimingAlmostEqual(join.elapsed, 0.0) + + self.assertEqual(p.is_alive(), False) + self.assertNotIn(p, self.active_children()) + + p.join() + + # XXX sometimes get p.exitcode == 0 on Windows ... + #self.assertEqual(p.exitcode, -signal.SIGTERM) + + def test_cpu_count(self): + try: + cpus = multiprocessing.cpu_count() + except NotImplementedError: + cpus = 1 + self.assertTrue(type(cpus) is int) + self.assertTrue(cpus >= 1) + + def test_active_children(self): + self.assertEqual(type(self.active_children()), list) + + p = self.Process(target=time.sleep, args=(DELTA,)) + self.assertNotIn(p, self.active_children()) + + p.daemon = True + p.start() + self.assertIn(p, self.active_children()) + + p.join() + self.assertNotIn(p, self.active_children()) + + @classmethod + def _test_recursion(cls, wconn, id): + wconn.send(id) + if len(id) < 2: + for i in range(2): + p = cls.Process( + target=cls._test_recursion, args=(wconn, id+[i]) + ) + p.start() + p.join() + + def test_recursion(self): + rconn, wconn = self.Pipe(duplex=False) + self._test_recursion(wconn, []) + + time.sleep(DELTA) + result = [] + while rconn.poll(): + result.append(rconn.recv()) + + expected = [ + [], + [0], + [0, 0], + [0, 1], + [1], + [1, 0], + [1, 1] + ] + self.assertEqual(result, expected) + + @classmethod + def _test_sentinel(cls, event): + event.wait(10.0) + + def test_sentinel(self): + if self.TYPE == "threads": + return + event = self.Event() + p = self.Process(target=self._test_sentinel, args=(event,)) + with self.assertRaises(ValueError): + p.sentinel + p.start() + self.addCleanup(p.join) + sentinel = p.sentinel + self.assertIsInstance(sentinel, int) + self.assertFalse(wait_for_handle(sentinel, timeout=0.0)) + event.set() + p.join() + self.assertTrue(wait_for_handle(sentinel, timeout=1)) + +# +# +# + +class _UpperCaser(multiprocessing.Process): + + def __init__(self): + multiprocessing.Process.__init__(self) + self.child_conn, self.parent_conn = multiprocessing.Pipe() + + def run(self): + self.parent_conn.close() + for s in iter(self.child_conn.recv, None): + self.child_conn.send(s.upper()) + self.child_conn.close() + + def submit(self, s): + assert type(s) is str + self.parent_conn.send(s) + return self.parent_conn.recv() + + def stop(self): + self.parent_conn.send(None) + self.parent_conn.close() + self.child_conn.close() + +class _TestSubclassingProcess(BaseTestCase): + + ALLOWED_TYPES = ('processes',) + + def test_subclassing(self): + uppercaser = _UpperCaser() + uppercaser.daemon = True + uppercaser.start() + self.assertEqual(uppercaser.submit('hello'), 'HELLO') + self.assertEqual(uppercaser.submit('world'), 'WORLD') + uppercaser.stop() + uppercaser.join() + + def test_stderr_flush(self): + # sys.stderr is flushed at process shutdown (issue #13812) + if self.TYPE == "threads": + return + + testfn = test.support.TESTFN + self.addCleanup(test.support.unlink, testfn) + proc = self.Process(target=self._test_stderr_flush, args=(testfn,)) + proc.start() + proc.join() + with open(testfn, 'r') as f: + err = f.read() + # The whole traceback was printed + self.assertIn("ZeroDivisionError", err) + self.assertIn("test_multiprocessing.py", err) + self.assertIn("1/0 # MARKER", err) + + @classmethod + def _test_stderr_flush(cls, testfn): + sys.stderr = open(testfn, 'w') + 1/0 # MARKER + + + @classmethod + def _test_sys_exit(cls, reason, testfn): + sys.stderr = open(testfn, 'w') + sys.exit(reason) + + def test_sys_exit(self): + # See Issue 13854 + if self.TYPE == 'threads': + return + + testfn = test.support.TESTFN + self.addCleanup(test.support.unlink, testfn) + + for reason, code in (([1, 2, 3], 1), ('ignore this', 0)): + p = self.Process(target=self._test_sys_exit, args=(reason, testfn)) + p.daemon = True + p.start() + p.join(5) + self.assertEqual(p.exitcode, code) + + with open(testfn, 'r') as f: + self.assertEqual(f.read().rstrip(), str(reason)) + + for reason in (True, False, 8): + p = self.Process(target=sys.exit, args=(reason,)) + p.daemon = True + p.start() + p.join(5) + self.assertEqual(p.exitcode, reason) + +# +# +# + +def queue_empty(q): + if hasattr(q, 'empty'): + return q.empty() + else: + return q.qsize() == 0 + +def queue_full(q, maxsize): + if hasattr(q, 'full'): + return q.full() + else: + return q.qsize() == maxsize + + +class _TestQueue(BaseTestCase): + + + @classmethod + def _test_put(cls, queue, child_can_start, parent_can_continue): + child_can_start.wait() + for i in range(6): + queue.get() + parent_can_continue.set() + + def test_put(self): + MAXSIZE = 6 + queue = self.Queue(maxsize=MAXSIZE) + child_can_start = self.Event() + parent_can_continue = self.Event() + + proc = self.Process( + target=self._test_put, + args=(queue, child_can_start, parent_can_continue) + ) + proc.daemon = True + proc.start() + + self.assertEqual(queue_empty(queue), True) + self.assertEqual(queue_full(queue, MAXSIZE), False) + + queue.put(1) + queue.put(2, True) + queue.put(3, True, None) + queue.put(4, False) + queue.put(5, False, None) + queue.put_nowait(6) + + # the values may be in buffer but not yet in pipe so sleep a bit + time.sleep(DELTA) + + self.assertEqual(queue_empty(queue), False) + self.assertEqual(queue_full(queue, MAXSIZE), True) + + put = TimingWrapper(queue.put) + put_nowait = TimingWrapper(queue.put_nowait) + + self.assertRaises(pyqueue.Full, put, 7, False) + self.assertTimingAlmostEqual(put.elapsed, 0) + + self.assertRaises(pyqueue.Full, put, 7, False, None) + self.assertTimingAlmostEqual(put.elapsed, 0) + + self.assertRaises(pyqueue.Full, put_nowait, 7) + self.assertTimingAlmostEqual(put_nowait.elapsed, 0) + + self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1) + self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1) + + self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2) + self.assertTimingAlmostEqual(put.elapsed, 0) + + self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3) + self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3) + + child_can_start.set() + parent_can_continue.wait() + + self.assertEqual(queue_empty(queue), True) + self.assertEqual(queue_full(queue, MAXSIZE), False) + + proc.join() + + @classmethod + def _test_get(cls, queue, child_can_start, parent_can_continue): + child_can_start.wait() + #queue.put(1) + queue.put(2) + queue.put(3) + queue.put(4) + queue.put(5) + parent_can_continue.set() + + def test_get(self): + queue = self.Queue() + child_can_start = self.Event() + parent_can_continue = self.Event() + + proc = self.Process( + target=self._test_get, + args=(queue, child_can_start, parent_can_continue) + ) + proc.daemon = True + proc.start() + + self.assertEqual(queue_empty(queue), True) + + child_can_start.set() + parent_can_continue.wait() + + time.sleep(DELTA) + self.assertEqual(queue_empty(queue), False) + + # Hangs unexpectedly, remove for now + #self.assertEqual(queue.get(), 1) + self.assertEqual(queue.get(True, None), 2) + self.assertEqual(queue.get(True), 3) + self.assertEqual(queue.get(timeout=1), 4) + self.assertEqual(queue.get_nowait(), 5) + + self.assertEqual(queue_empty(queue), True) + + get = TimingWrapper(queue.get) + get_nowait = TimingWrapper(queue.get_nowait) + + self.assertRaises(pyqueue.Empty, get, False) + self.assertTimingAlmostEqual(get.elapsed, 0) + + self.assertRaises(pyqueue.Empty, get, False, None) + self.assertTimingAlmostEqual(get.elapsed, 0) + + self.assertRaises(pyqueue.Empty, get_nowait) + self.assertTimingAlmostEqual(get_nowait.elapsed, 0) + + self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1) + self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1) + + self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2) + self.assertTimingAlmostEqual(get.elapsed, 0) + + self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3) + self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3) + + proc.join() + + @classmethod + def _test_fork(cls, queue): + for i in range(10, 20): + queue.put(i) + # note that at this point the items may only be buffered, so the + # process cannot shutdown until the feeder thread has finished + # pushing items onto the pipe. + + def test_fork(self): + # Old versions of Queue would fail to create a new feeder + # thread for a forked process if the original process had its + # own feeder thread. This test checks that this no longer + # happens. + + queue = self.Queue() + + # put items on queue so that main process starts a feeder thread + for i in range(10): + queue.put(i) + + # wait to make sure thread starts before we fork a new process + time.sleep(DELTA) + + # fork process + p = self.Process(target=self._test_fork, args=(queue,)) + p.daemon = True + p.start() + + # check that all expected items are in the queue + for i in range(20): + self.assertEqual(queue.get(), i) + self.assertRaises(pyqueue.Empty, queue.get, False) + + p.join() + + def test_qsize(self): + q = self.Queue() + try: + self.assertEqual(q.qsize(), 0) + except NotImplementedError: + return + q.put(1) + self.assertEqual(q.qsize(), 1) + q.put(5) + self.assertEqual(q.qsize(), 2) + q.get() + self.assertEqual(q.qsize(), 1) + q.get() + self.assertEqual(q.qsize(), 0) + + @classmethod + def _test_task_done(cls, q): + for obj in iter(q.get, None): + time.sleep(DELTA) + q.task_done() + + def test_task_done(self): + queue = self.JoinableQueue() + + if sys.version_info < (2, 5) and not hasattr(queue, 'task_done'): + self.skipTest("requires 'queue.task_done()' method") + + workers = [self.Process(target=self._test_task_done, args=(queue,)) + for i in range(4)] + + for p in workers: + p.daemon = True + p.start() + + for i in range(10): + queue.put(i) + + queue.join() + + for p in workers: + queue.put(None) + + for p in workers: + p.join() + + def test_timeout(self): + q = multiprocessing.Queue() + start = time.time() + self.assertRaises(pyqueue.Empty, q.get, True, 0.2) + delta = time.time() - start + self.assertGreaterEqual(delta, 0.19) + +# +# +# + +class _TestLock(BaseTestCase): + + def test_lock(self): + lock = self.Lock() + self.assertEqual(lock.acquire(), True) + self.assertEqual(lock.acquire(False), False) + self.assertEqual(lock.release(), None) + self.assertRaises((ValueError, threading.ThreadError), lock.release) + + def test_rlock(self): + lock = self.RLock() + self.assertEqual(lock.acquire(), True) + self.assertEqual(lock.acquire(), True) + self.assertEqual(lock.acquire(), True) + self.assertEqual(lock.release(), None) + self.assertEqual(lock.release(), None) + self.assertEqual(lock.release(), None) + self.assertRaises((AssertionError, RuntimeError), lock.release) + + def test_lock_context(self): + with self.Lock(): + pass + + +class _TestSemaphore(BaseTestCase): + + def _test_semaphore(self, sem): + self.assertReturnsIfImplemented(2, get_value, sem) + self.assertEqual(sem.acquire(), True) + self.assertReturnsIfImplemented(1, get_value, sem) + self.assertEqual(sem.acquire(), True) + self.assertReturnsIfImplemented(0, get_value, sem) + self.assertEqual(sem.acquire(False), False) + self.assertReturnsIfImplemented(0, get_value, sem) + self.assertEqual(sem.release(), None) + self.assertReturnsIfImplemented(1, get_value, sem) + self.assertEqual(sem.release(), None) + self.assertReturnsIfImplemented(2, get_value, sem) + + def test_semaphore(self): + sem = self.Semaphore(2) + self._test_semaphore(sem) + self.assertEqual(sem.release(), None) + self.assertReturnsIfImplemented(3, get_value, sem) + self.assertEqual(sem.release(), None) + self.assertReturnsIfImplemented(4, get_value, sem) + + def test_bounded_semaphore(self): + sem = self.BoundedSemaphore(2) + self._test_semaphore(sem) + # Currently fails on OS/X + #if HAVE_GETVALUE: + # self.assertRaises(ValueError, sem.release) + # self.assertReturnsIfImplemented(2, get_value, sem) + + def test_timeout(self): + if self.TYPE != 'processes': + return + + sem = self.Semaphore(0) + acquire = TimingWrapper(sem.acquire) + + self.assertEqual(acquire(False), False) + self.assertTimingAlmostEqual(acquire.elapsed, 0.0) + + self.assertEqual(acquire(False, None), False) + self.assertTimingAlmostEqual(acquire.elapsed, 0.0) + + self.assertEqual(acquire(False, TIMEOUT1), False) + self.assertTimingAlmostEqual(acquire.elapsed, 0) + + self.assertEqual(acquire(True, TIMEOUT2), False) + self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2) + + self.assertEqual(acquire(timeout=TIMEOUT3), False) + self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3) + + +class _TestCondition(BaseTestCase): + + @classmethod + def f(cls, cond, sleeping, woken, timeout=None): + cond.acquire() + sleeping.release() + cond.wait(timeout) + woken.release() + cond.release() + + def check_invariant(self, cond): + # this is only supposed to succeed when there are no sleepers + if self.TYPE == 'processes': + try: + sleepers = (cond._sleeping_count.get_value() - + cond._woken_count.get_value()) + self.assertEqual(sleepers, 0) + self.assertEqual(cond._wait_semaphore.get_value(), 0) + except NotImplementedError: + pass + + def test_notify(self): + cond = self.Condition() + sleeping = self.Semaphore(0) + woken = self.Semaphore(0) + + p = self.Process(target=self.f, args=(cond, sleeping, woken)) + p.daemon = True + p.start() + + p = threading.Thread(target=self.f, args=(cond, sleeping, woken)) + p.daemon = True + p.start() + + # wait for both children to start sleeping + sleeping.acquire() + sleeping.acquire() + + # check no process/thread has woken up + time.sleep(DELTA) + self.assertReturnsIfImplemented(0, get_value, woken) + + # wake up one process/thread + cond.acquire() + cond.notify() + cond.release() + + # check one process/thread has woken up + time.sleep(DELTA) + self.assertReturnsIfImplemented(1, get_value, woken) + + # wake up another + cond.acquire() + cond.notify() + cond.release() + + # check other has woken up + time.sleep(DELTA) + self.assertReturnsIfImplemented(2, get_value, woken) + + # check state is not mucked up + self.check_invariant(cond) + p.join() + + def test_notify_all(self): + cond = self.Condition() + sleeping = self.Semaphore(0) + woken = self.Semaphore(0) + + # start some threads/processes which will timeout + for i in range(3): + p = self.Process(target=self.f, + args=(cond, sleeping, woken, TIMEOUT1)) + p.daemon = True + p.start() + + t = threading.Thread(target=self.f, + args=(cond, sleeping, woken, TIMEOUT1)) + t.daemon = True + t.start() + + # wait for them all to sleep + for i in range(6): + sleeping.acquire() + + # check they have all timed out + for i in range(6): + woken.acquire() + self.assertReturnsIfImplemented(0, get_value, woken) + + # check state is not mucked up + self.check_invariant(cond) + + # start some more threads/processes + for i in range(3): + p = self.Process(target=self.f, args=(cond, sleeping, woken)) + p.daemon = True + p.start() + + t = threading.Thread(target=self.f, args=(cond, sleeping, woken)) + t.daemon = True + t.start() + + # wait for them to all sleep + for i in range(6): + sleeping.acquire() + + # check no process/thread has woken up + time.sleep(DELTA) + self.assertReturnsIfImplemented(0, get_value, woken) + + # wake them all up + cond.acquire() + cond.notify_all() + cond.release() + + # check they have all woken + for i in range(10): + try: + if get_value(woken) == 6: + break + except NotImplementedError: + break + time.sleep(DELTA) + self.assertReturnsIfImplemented(6, get_value, woken) + + # check state is not mucked up + self.check_invariant(cond) + + def test_timeout(self): + cond = self.Condition() + wait = TimingWrapper(cond.wait) + cond.acquire() + res = wait(TIMEOUT1) + cond.release() + self.assertEqual(res, False) + self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1) + + @classmethod + def _test_waitfor_f(cls, cond, state): + with cond: + state.value = 0 + cond.notify() + result = cond.wait_for(lambda : state.value==4) + if not result or state.value != 4: + sys.exit(1) + + @unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes') + def test_waitfor(self): + # based on test in test/lock_tests.py + cond = self.Condition() + state = self.Value('i', -1) + + p = self.Process(target=self._test_waitfor_f, args=(cond, state)) + p.daemon = True + p.start() + + with cond: + result = cond.wait_for(lambda : state.value==0) + self.assertTrue(result) + self.assertEqual(state.value, 0) + + for i in range(4): + time.sleep(0.01) + with cond: + state.value += 1 + cond.notify() + + p.join(5) + self.assertFalse(p.is_alive()) + self.assertEqual(p.exitcode, 0) + + @classmethod + def _test_waitfor_timeout_f(cls, cond, state, success, sem): + sem.release() + with cond: + expected = 0.1 + dt = time.time() + result = cond.wait_for(lambda : state.value==4, timeout=expected) + dt = time.time() - dt + # borrow logic in assertTimeout() from test/lock_tests.py + if not result and expected * 0.6 < dt < expected * 10.0: + success.value = True + + @unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes') + def test_waitfor_timeout(self): + # based on test in test/lock_tests.py + cond = self.Condition() + state = self.Value('i', 0) + success = self.Value('i', False) + sem = self.Semaphore(0) + + p = self.Process(target=self._test_waitfor_timeout_f, + args=(cond, state, success, sem)) + p.daemon = True + p.start() + self.assertTrue(sem.acquire(timeout=10)) + + # Only increment 3 times, so state == 4 is never reached. + for i in range(3): + time.sleep(0.01) + with cond: + state.value += 1 + cond.notify() + + p.join(5) + self.assertTrue(success.value) + + @classmethod + def _test_wait_result(cls, c, pid): + with c: + c.notify() + time.sleep(1) + if pid is not None: + os.kill(pid, signal.SIGINT) + + def test_wait_result(self): + if isinstance(self, ProcessesMixin) and sys.platform != 'win32': + pid = os.getpid() + else: + pid = None + + c = self.Condition() + with c: + self.assertFalse(c.wait(0)) + self.assertFalse(c.wait(0.1)) + + p = self.Process(target=self._test_wait_result, args=(c, pid)) + p.start() + + self.assertTrue(c.wait(10)) + if pid is not None: + self.assertRaises(KeyboardInterrupt, c.wait, 10) + + p.join() + + +class _TestEvent(BaseTestCase): + + @classmethod + def _test_event(cls, event): + time.sleep(TIMEOUT2) + event.set() + + def test_event(self): + event = self.Event() + wait = TimingWrapper(event.wait) + + # Removed temporarily, due to API shear, this does not + # work with threading._Event objects. is_set == isSet + self.assertEqual(event.is_set(), False) + + # Removed, threading.Event.wait() will return the value of the __flag + # instead of None. API Shear with the semaphore backed mp.Event + self.assertEqual(wait(0.0), False) + self.assertTimingAlmostEqual(wait.elapsed, 0.0) + self.assertEqual(wait(TIMEOUT1), False) + self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1) + + event.set() + + # See note above on the API differences + self.assertEqual(event.is_set(), True) + self.assertEqual(wait(), True) + self.assertTimingAlmostEqual(wait.elapsed, 0.0) + self.assertEqual(wait(TIMEOUT1), True) + self.assertTimingAlmostEqual(wait.elapsed, 0.0) + # self.assertEqual(event.is_set(), True) + + event.clear() + + #self.assertEqual(event.is_set(), False) + + p = self.Process(target=self._test_event, args=(event,)) + p.daemon = True + p.start() + self.assertEqual(wait(), True) + +# +# Tests for Barrier - adapted from tests in test/lock_tests.py +# + +# Many of the tests for threading.Barrier use a list as an atomic +# counter: a value is appended to increment the counter, and the +# length of the list gives the value. We use the class DummyList +# for the same purpose. + +class _DummyList(object): + + def __init__(self): + wrapper = multiprocessing.heap.BufferWrapper(struct.calcsize('i')) + lock = multiprocessing.Lock() + self.__setstate__((wrapper, lock)) + self._lengthbuf[0] = 0 + + def __setstate__(self, state): + (self._wrapper, self._lock) = state + self._lengthbuf = self._wrapper.create_memoryview().cast('i') + + def __getstate__(self): + return (self._wrapper, self._lock) + + def append(self, _): + with self._lock: + self._lengthbuf[0] += 1 + + def __len__(self): + with self._lock: + return self._lengthbuf[0] + +def _wait(): + # A crude wait/yield function not relying on synchronization primitives. + time.sleep(0.01) + + +class Bunch(object): + """ + A bunch of threads. + """ + def __init__(self, namespace, f, args, n, wait_before_exit=False): + """ + Construct a bunch of `n` threads running the same function `f`. + If `wait_before_exit` is True, the threads won't terminate until + do_finish() is called. + """ + self.f = f + self.args = args + self.n = n + self.started = namespace.DummyList() + self.finished = namespace.DummyList() + self._can_exit = namespace.Event() + if not wait_before_exit: + self._can_exit.set() + for i in range(n): + p = namespace.Process(target=self.task) + p.daemon = True + p.start() + + def task(self): + pid = os.getpid() + self.started.append(pid) + try: + self.f(*self.args) + finally: + self.finished.append(pid) + self._can_exit.wait(30) + assert self._can_exit.is_set() + + def wait_for_started(self): + while len(self.started) < self.n: + _wait() + + def wait_for_finished(self): + while len(self.finished) < self.n: + _wait() + + def do_finish(self): + self._can_exit.set() + + +class AppendTrue(object): + def __init__(self, obj): + self.obj = obj + def __call__(self): + self.obj.append(True) + + +class _TestBarrier(BaseTestCase): + """ + Tests for Barrier objects. + """ + N = 5 + defaultTimeout = 30.0 # XXX Slow Windows buildbots need generous timeout + + def setUp(self): + self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout) + + def tearDown(self): + self.barrier.abort() + self.barrier = None + + def DummyList(self): + if self.TYPE == 'threads': + return [] + elif self.TYPE == 'manager': + return self.manager.list() + else: + return _DummyList() + + def run_threads(self, f, args): + b = Bunch(self, f, args, self.N-1) + f(*args) + b.wait_for_finished() + + @classmethod + def multipass(cls, barrier, results, n): + m = barrier.parties + assert m == cls.N + for i in range(n): + results[0].append(True) + assert len(results[1]) == i * m + barrier.wait() + results[1].append(True) + assert len(results[0]) == (i + 1) * m + barrier.wait() + try: + assert barrier.n_waiting == 0 + except NotImplementedError: + pass + assert not barrier.broken + + def test_barrier(self, passes=1): + """ + Test that a barrier is passed in lockstep + """ + results = [self.DummyList(), self.DummyList()] + self.run_threads(self.multipass, (self.barrier, results, passes)) + + def test_barrier_10(self): + """ + Test that a barrier works for 10 consecutive runs + """ + return self.test_barrier(10) + + @classmethod + def _test_wait_return_f(cls, barrier, queue): + res = barrier.wait() + queue.put(res) + + def test_wait_return(self): + """ + test the return value from barrier.wait + """ + queue = self.Queue() + self.run_threads(self._test_wait_return_f, (self.barrier, queue)) + results = [queue.get() for i in range(self.N)] + self.assertEqual(results.count(0), 1) + + @classmethod + def _test_action_f(cls, barrier, results): + barrier.wait() + if len(results) != 1: + raise RuntimeError + + def test_action(self): + """ + Test the 'action' callback + """ + results = self.DummyList() + barrier = self.Barrier(self.N, action=AppendTrue(results)) + self.run_threads(self._test_action_f, (barrier, results)) + self.assertEqual(len(results), 1) + + @classmethod + def _test_abort_f(cls, barrier, results1, results2): + try: + i = barrier.wait() + if i == cls.N//2: + raise RuntimeError + barrier.wait() + results1.append(True) + except threading.BrokenBarrierError: + results2.append(True) + except RuntimeError: + barrier.abort() + + def test_abort(self): + """ + Test that an abort will put the barrier in a broken state + """ + results1 = self.DummyList() + results2 = self.DummyList() + self.run_threads(self._test_abort_f, + (self.barrier, results1, results2)) + self.assertEqual(len(results1), 0) + self.assertEqual(len(results2), self.N-1) + self.assertTrue(self.barrier.broken) + + @classmethod + def _test_reset_f(cls, barrier, results1, results2, results3): + i = barrier.wait() + if i == cls.N//2: + # Wait until the other threads are all in the barrier. + while barrier.n_waiting < cls.N-1: + time.sleep(0.001) + barrier.reset() + else: + try: + barrier.wait() + results1.append(True) + except threading.BrokenBarrierError: + results2.append(True) + # Now, pass the barrier again + barrier.wait() + results3.append(True) + + def test_reset(self): + """ + Test that a 'reset' on a barrier frees the waiting threads + """ + results1 = self.DummyList() + results2 = self.DummyList() + results3 = self.DummyList() + self.run_threads(self._test_reset_f, + (self.barrier, results1, results2, results3)) + self.assertEqual(len(results1), 0) + self.assertEqual(len(results2), self.N-1) + self.assertEqual(len(results3), self.N) + + @classmethod + def _test_abort_and_reset_f(cls, barrier, barrier2, + results1, results2, results3): + try: + i = barrier.wait() + if i == cls.N//2: + raise RuntimeError + barrier.wait() + results1.append(True) + except threading.BrokenBarrierError: + results2.append(True) + except RuntimeError: + barrier.abort() + # Synchronize and reset the barrier. Must synchronize first so + # that everyone has left it when we reset, and after so that no + # one enters it before the reset. + if barrier2.wait() == cls.N//2: + barrier.reset() + barrier2.wait() + barrier.wait() + results3.append(True) + + def test_abort_and_reset(self): + """ + Test that a barrier can be reset after being broken. + """ + results1 = self.DummyList() + results2 = self.DummyList() + results3 = self.DummyList() + barrier2 = self.Barrier(self.N) + + self.run_threads(self._test_abort_and_reset_f, + (self.barrier, barrier2, results1, results2, results3)) + self.assertEqual(len(results1), 0) + self.assertEqual(len(results2), self.N-1) + self.assertEqual(len(results3), self.N) + + @classmethod + def _test_timeout_f(cls, barrier, results): + i = barrier.wait() + if i == cls.N//2: + # One thread is late! + time.sleep(1.0) + try: + barrier.wait(0.5) + except threading.BrokenBarrierError: + results.append(True) + + def test_timeout(self): + """ + Test wait(timeout) + """ + results = self.DummyList() + self.run_threads(self._test_timeout_f, (self.barrier, results)) + self.assertEqual(len(results), self.barrier.parties) + + @classmethod + def _test_default_timeout_f(cls, barrier, results): + i = barrier.wait(cls.defaultTimeout) + if i == cls.N//2: + # One thread is later than the default timeout + time.sleep(1.0) + try: + barrier.wait() + except threading.BrokenBarrierError: + results.append(True) + + def test_default_timeout(self): + """ + Test the barrier's default timeout + """ + barrier = self.Barrier(self.N, timeout=0.5) + results = self.DummyList() + self.run_threads(self._test_default_timeout_f, (barrier, results)) + self.assertEqual(len(results), barrier.parties) + + def test_single_thread(self): + b = self.Barrier(1) + b.wait() + b.wait() + + @classmethod + def _test_thousand_f(cls, barrier, passes, conn, lock): + for i in range(passes): + barrier.wait() + with lock: + conn.send(i) + + def test_thousand(self): + if self.TYPE == 'manager': + return + passes = 1000 + lock = self.Lock() + conn, child_conn = self.Pipe(False) + for j in range(self.N): + p = self.Process(target=self._test_thousand_f, + args=(self.barrier, passes, child_conn, lock)) + p.start() + + for i in range(passes): + for j in range(self.N): + self.assertEqual(conn.recv(), i) + +# +# +# + +class _TestValue(BaseTestCase): + + ALLOWED_TYPES = ('processes',) + + codes_values = [ + ('i', 4343, 24234), + ('d', 3.625, -4.25), + ('h', -232, 234), + ('c', latin('x'), latin('y')) + ] + + def setUp(self): + if not HAS_SHAREDCTYPES: + self.skipTest("requires multiprocessing.sharedctypes") + + @classmethod + def _test(cls, values): + for sv, cv in zip(values, cls.codes_values): + sv.value = cv[2] + + + def test_value(self, raw=False): + if raw: + values = [self.RawValue(code, value) + for code, value, _ in self.codes_values] + else: + values = [self.Value(code, value) + for code, value, _ in self.codes_values] + + for sv, cv in zip(values, self.codes_values): + self.assertEqual(sv.value, cv[1]) + + proc = self.Process(target=self._test, args=(values,)) + proc.daemon = True + proc.start() + proc.join() + + for sv, cv in zip(values, self.codes_values): + self.assertEqual(sv.value, cv[2]) + + def test_rawvalue(self): + self.test_value(raw=True) + + def test_getobj_getlock(self): + val1 = self.Value('i', 5) + lock1 = val1.get_lock() + obj1 = val1.get_obj() + + val2 = self.Value('i', 5, lock=None) + lock2 = val2.get_lock() + obj2 = val2.get_obj() + + lock = self.Lock() + val3 = self.Value('i', 5, lock=lock) + lock3 = val3.get_lock() + obj3 = val3.get_obj() + self.assertEqual(lock, lock3) + + arr4 = self.Value('i', 5, lock=False) + self.assertFalse(hasattr(arr4, 'get_lock')) + self.assertFalse(hasattr(arr4, 'get_obj')) + + self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue') + + arr5 = self.RawValue('i', 5) + self.assertFalse(hasattr(arr5, 'get_lock')) + self.assertFalse(hasattr(arr5, 'get_obj')) + + +class _TestArray(BaseTestCase): + + ALLOWED_TYPES = ('processes',) + + @classmethod + def f(cls, seq): + for i in range(1, len(seq)): + seq[i] += seq[i-1] + + @unittest.skipIf(c_int is None, "requires _ctypes") + def test_array(self, raw=False): + seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831] + if raw: + arr = self.RawArray('i', seq) + else: + arr = self.Array('i', seq) + + self.assertEqual(len(arr), len(seq)) + self.assertEqual(arr[3], seq[3]) + self.assertEqual(list(arr[2:7]), list(seq[2:7])) + + arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4]) + + self.assertEqual(list(arr[:]), seq) + + self.f(seq) + + p = self.Process(target=self.f, args=(arr,)) + p.daemon = True + p.start() + p.join() + + self.assertEqual(list(arr[:]), seq) + + @unittest.skipIf(c_int is None, "requires _ctypes") + def test_array_from_size(self): + size = 10 + # Test for zeroing (see issue #11675). + # The repetition below strengthens the test by increasing the chances + # of previously allocated non-zero memory being used for the new array + # on the 2nd and 3rd loops. + for _ in range(3): + arr = self.Array('i', size) + self.assertEqual(len(arr), size) + self.assertEqual(list(arr), [0] * size) + arr[:] = range(10) + self.assertEqual(list(arr), list(range(10))) + del arr + + @unittest.skipIf(c_int is None, "requires _ctypes") + def test_rawarray(self): + self.test_array(raw=True) + + @unittest.skipIf(c_int is None, "requires _ctypes") + def test_getobj_getlock_obj(self): + arr1 = self.Array('i', list(range(10))) + lock1 = arr1.get_lock() + obj1 = arr1.get_obj() + + arr2 = self.Array('i', list(range(10)), lock=None) + lock2 = arr2.get_lock() + obj2 = arr2.get_obj() + + lock = self.Lock() + arr3 = self.Array('i', list(range(10)), lock=lock) + lock3 = arr3.get_lock() + obj3 = arr3.get_obj() + self.assertEqual(lock, lock3) + + arr4 = self.Array('i', range(10), lock=False) + self.assertFalse(hasattr(arr4, 'get_lock')) + self.assertFalse(hasattr(arr4, 'get_obj')) + self.assertRaises(AttributeError, + self.Array, 'i', range(10), lock='notalock') + + arr5 = self.RawArray('i', range(10)) + self.assertFalse(hasattr(arr5, 'get_lock')) + self.assertFalse(hasattr(arr5, 'get_obj')) + +# +# +# + +class _TestContainers(BaseTestCase): + + ALLOWED_TYPES = ('manager',) + + def test_list(self): + a = self.list(list(range(10))) + self.assertEqual(a[:], list(range(10))) + + b = self.list() + self.assertEqual(b[:], []) + + b.extend(list(range(5))) + self.assertEqual(b[:], list(range(5))) + + self.assertEqual(b[2], 2) + self.assertEqual(b[2:10], [2,3,4]) + + b *= 2 + self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]) + + self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6]) + + self.assertEqual(a[:], list(range(10))) + + d = [a, b] + e = self.list(d) + self.assertEqual( + e[:], + [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]] + ) + + f = self.list([a]) + a.append('hello') + self.assertEqual(f[:], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello']]) + + def test_dict(self): + d = self.dict() + indices = list(range(65, 70)) + for i in indices: + d[i] = chr(i) + self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices)) + self.assertEqual(sorted(d.keys()), indices) + self.assertEqual(sorted(d.values()), [chr(i) for i in indices]) + self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices]) + + def test_namespace(self): + n = self.Namespace() + n.name = 'Bob' + n.job = 'Builder' + n._hidden = 'hidden' + self.assertEqual((n.name, n.job), ('Bob', 'Builder')) + del n.job + self.assertEqual(str(n), "Namespace(name='Bob')") + self.assertTrue(hasattr(n, 'name')) + self.assertTrue(not hasattr(n, 'job')) + +# +# +# + +def sqr(x, wait=0.0): + time.sleep(wait) + return x*x + +def mul(x, y): + return x*y + +class _TestPool(BaseTestCase): + + @classmethod + def setUpClass(cls): + super().setUpClass() + cls.pool = cls.Pool(4) + + @classmethod + def tearDownClass(cls): + cls.pool.terminate() + cls.pool.join() + cls.pool = None + super().tearDownClass() + + def test_apply(self): + papply = self.pool.apply + self.assertEqual(papply(sqr, (5,)), sqr(5)) + self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3)) + + def test_map(self): + pmap = self.pool.map + self.assertEqual(pmap(sqr, list(range(10))), list(map(sqr, list(range(10))))) + self.assertEqual(pmap(sqr, list(range(100)), chunksize=20), + list(map(sqr, list(range(100))))) + + def test_starmap(self): + psmap = self.pool.starmap + tuples = list(zip(range(10), range(9,-1, -1))) + self.assertEqual(psmap(mul, tuples), + list(itertools.starmap(mul, tuples))) + tuples = list(zip(range(100), range(99,-1, -1))) + self.assertEqual(psmap(mul, tuples, chunksize=20), + list(itertools.starmap(mul, tuples))) + + def test_starmap_async(self): + tuples = list(zip(range(100), range(99,-1, -1))) + self.assertEqual(self.pool.starmap_async(mul, tuples).get(), + list(itertools.starmap(mul, tuples))) + + def test_map_async(self): + self.assertEqual(self.pool.map_async(sqr, list(range(10))).get(), + list(map(sqr, list(range(10))))) + + def test_map_async_callbacks(self): + call_args = self.manager.list() if self.TYPE == 'manager' else [] + self.pool.map_async(int, ['1'], + callback=call_args.append, + error_callback=call_args.append).wait() + self.assertEqual(1, len(call_args)) + self.assertEqual([1], call_args[0]) + self.pool.map_async(int, ['a'], + callback=call_args.append, + error_callback=call_args.append).wait() + self.assertEqual(2, len(call_args)) + self.assertIsInstance(call_args[1], ValueError) + + def test_map_chunksize(self): + try: + self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1) + except multiprocessing.TimeoutError: + self.fail("pool.map_async with chunksize stalled on null list") + + def test_async(self): + res = self.pool.apply_async(sqr, (7, TIMEOUT1,)) + get = TimingWrapper(res.get) + self.assertEqual(get(), 49) + self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1) + + def test_async_timeout(self): + res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 0.2)) + get = TimingWrapper(res.get) + self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2) + self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2) + + def test_imap(self): + it = self.pool.imap(sqr, list(range(10))) + self.assertEqual(list(it), list(map(sqr, list(range(10))))) + + it = self.pool.imap(sqr, list(range(10))) + for i in range(10): + self.assertEqual(next(it), i*i) + self.assertRaises(StopIteration, it.__next__) + + it = self.pool.imap(sqr, list(range(1000)), chunksize=100) + for i in range(1000): + self.assertEqual(next(it), i*i) + self.assertRaises(StopIteration, it.__next__) + + def test_imap_unordered(self): + it = self.pool.imap_unordered(sqr, list(range(1000))) + self.assertEqual(sorted(it), list(map(sqr, list(range(1000))))) + + it = self.pool.imap_unordered(sqr, list(range(1000)), chunksize=53) + self.assertEqual(sorted(it), list(map(sqr, list(range(1000))))) + + def test_make_pool(self): + self.assertRaises(ValueError, multiprocessing.Pool, -1) + self.assertRaises(ValueError, multiprocessing.Pool, 0) + + p = multiprocessing.Pool(3) + self.assertEqual(3, len(p._pool)) + p.close() + p.join() + + def test_terminate(self): + result = self.pool.map_async( + time.sleep, [0.1 for i in range(10000)], chunksize=1 + ) + self.pool.terminate() + join = TimingWrapper(self.pool.join) + join() + self.assertLess(join.elapsed, 0.5) + + def test_empty_iterable(self): + # See Issue 12157 + p = self.Pool(1) + + self.assertEqual(p.map(sqr, []), []) + self.assertEqual(list(p.imap(sqr, [])), []) + self.assertEqual(list(p.imap_unordered(sqr, [])), []) + self.assertEqual(p.map_async(sqr, []).get(), []) + + p.close() + p.join() + + def test_context(self): + if self.TYPE == 'processes': + L = list(range(10)) + expected = [sqr(i) for i in L] + with multiprocessing.Pool(2) as p: + r = p.map_async(sqr, L) + self.assertEqual(r.get(), expected) + self.assertRaises(ValueError, p.map_async, sqr, L) + + @classmethod + def _test_traceback(cls): + raise RuntimeError(123) # some comment + + def test_traceback(self): + # We want ensure that the traceback from the child process is + # contained in the traceback raised in the main process. + if self.TYPE == 'processes': + with self.Pool(1) as p: + try: + p.apply(self._test_traceback) + except Exception as e: + exc = e + else: + raise AssertionError('expected RuntimeError') + self.assertIs(type(exc), RuntimeError) + self.assertEqual(exc.args, (123,)) + cause = exc.__cause__ + self.assertIs(type(cause), multiprocessing.pool.RemoteTraceback) + self.assertIn('raise RuntimeError(123) # some comment', cause.tb) + + with test.support.captured_stderr() as f1: + try: + raise exc + except RuntimeError: + sys.excepthook(*sys.exc_info()) + self.assertIn('raise RuntimeError(123) # some comment', + f1.getvalue()) + +def raising(): + raise KeyError("key") + +def unpickleable_result(): + return lambda: 42 + +class _TestPoolWorkerErrors(BaseTestCase): + ALLOWED_TYPES = ('processes', ) + + def test_async_error_callback(self): + p = multiprocessing.Pool(2) + + scratchpad = [None] + def errback(exc): + scratchpad[0] = exc + + res = p.apply_async(raising, error_callback=errback) + self.assertRaises(KeyError, res.get) + self.assertTrue(scratchpad[0]) + self.assertIsInstance(scratchpad[0], KeyError) + + p.close() + p.join() + + def test_unpickleable_result(self): + from multiprocessing.pool import MaybeEncodingError + p = multiprocessing.Pool(2) + + # Make sure we don't lose pool processes because of encoding errors. + for iteration in range(20): + + scratchpad = [None] + def errback(exc): + scratchpad[0] = exc + + res = p.apply_async(unpickleable_result, error_callback=errback) + self.assertRaises(MaybeEncodingError, res.get) + wrapped = scratchpad[0] + self.assertTrue(wrapped) + self.assertIsInstance(scratchpad[0], MaybeEncodingError) + self.assertIsNotNone(wrapped.exc) + self.assertIsNotNone(wrapped.value) + + p.close() + p.join() + +class _TestPoolWorkerLifetime(BaseTestCase): + ALLOWED_TYPES = ('processes', ) + + def test_pool_worker_lifetime(self): + p = multiprocessing.Pool(3, maxtasksperchild=10) + self.assertEqual(3, len(p._pool)) + origworkerpids = [w.pid for w in p._pool] + # Run many tasks so each worker gets replaced (hopefully) + results = [] + for i in range(100): + results.append(p.apply_async(sqr, (i, ))) + # Fetch the results and verify we got the right answers, + # also ensuring all the tasks have completed. + for (j, res) in enumerate(results): + self.assertEqual(res.get(), sqr(j)) + # Refill the pool + p._repopulate_pool() + # Wait until all workers are alive + # (countdown * DELTA = 5 seconds max startup process time) + countdown = 50 + while countdown and not all(w.is_alive() for w in p._pool): + countdown -= 1 + time.sleep(DELTA) + finalworkerpids = [w.pid for w in p._pool] + # All pids should be assigned. See issue #7805. + self.assertNotIn(None, origworkerpids) + self.assertNotIn(None, finalworkerpids) + # Finally, check that the worker pids have changed + self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids)) + p.close() + p.join() + + def test_pool_worker_lifetime_early_close(self): + # Issue #10332: closing a pool whose workers have limited lifetimes + # before all the tasks completed would make join() hang. + p = multiprocessing.Pool(3, maxtasksperchild=1) + results = [] + for i in range(6): + results.append(p.apply_async(sqr, (i, 0.3))) + p.close() + p.join() + # check the results + for (j, res) in enumerate(results): + self.assertEqual(res.get(), sqr(j)) + +# +# Test of creating a customized manager class +# + +from multiprocessing.managers import BaseManager, BaseProxy, RemoteError + +class FooBar(object): + def f(self): + return 'f()' + def g(self): + raise ValueError + def _h(self): + return '_h()' + +def baz(): + for i in range(10): + yield i*i + +class IteratorProxy(BaseProxy): + _exposed_ = ('__next__',) + def __iter__(self): + return self + def __next__(self): + return self._callmethod('__next__') + +class MyManager(BaseManager): + pass + +MyManager.register('Foo', callable=FooBar) +MyManager.register('Bar', callable=FooBar, exposed=('f', '_h')) +MyManager.register('baz', callable=baz, proxytype=IteratorProxy) + + +class _TestMyManager(BaseTestCase): + + ALLOWED_TYPES = ('manager',) + + def test_mymanager(self): + manager = MyManager() + manager.start() + self.common(manager) + manager.shutdown() + + # If the manager process exited cleanly then the exitcode + # will be zero. Otherwise (after a short timeout) + # terminate() is used, resulting in an exitcode of -SIGTERM. + self.assertEqual(manager._process.exitcode, 0) + + def test_mymanager_context(self): + with MyManager() as manager: + self.common(manager) + self.assertEqual(manager._process.exitcode, 0) + + def test_mymanager_context_prestarted(self): + manager = MyManager() + manager.start() + with manager: + self.common(manager) + self.assertEqual(manager._process.exitcode, 0) + + def common(self, manager): + foo = manager.Foo() + bar = manager.Bar() + baz = manager.baz() + + foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)] + bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)] + + self.assertEqual(foo_methods, ['f', 'g']) + self.assertEqual(bar_methods, ['f', '_h']) + + self.assertEqual(foo.f(), 'f()') + self.assertRaises(ValueError, foo.g) + self.assertEqual(foo._callmethod('f'), 'f()') + self.assertRaises(RemoteError, foo._callmethod, '_h') + + self.assertEqual(bar.f(), 'f()') + self.assertEqual(bar._h(), '_h()') + self.assertEqual(bar._callmethod('f'), 'f()') + self.assertEqual(bar._callmethod('_h'), '_h()') + + self.assertEqual(list(baz), [i*i for i in range(10)]) + + +# +# Test of connecting to a remote server and using xmlrpclib for serialization +# + +_queue = pyqueue.Queue() +def get_queue(): + return _queue + +class QueueManager(BaseManager): + '''manager class used by server process''' +QueueManager.register('get_queue', callable=get_queue) + +class QueueManager2(BaseManager): + '''manager class which specifies the same interface as QueueManager''' +QueueManager2.register('get_queue') + + +SERIALIZER = 'xmlrpclib' + +class _TestRemoteManager(BaseTestCase): + + ALLOWED_TYPES = ('manager',) + + @classmethod + def _putter(cls, address, authkey): + manager = QueueManager2( + address=address, authkey=authkey, serializer=SERIALIZER + ) + manager.connect() + queue = manager.get_queue() + queue.put(('hello world', None, True, 2.25)) + + def test_remote(self): + authkey = os.urandom(32) + + manager = QueueManager( + address=('localhost', 0), authkey=authkey, serializer=SERIALIZER + ) + manager.start() + + p = self.Process(target=self._putter, args=(manager.address, authkey)) + p.daemon = True + p.start() + + manager2 = QueueManager2( + address=manager.address, authkey=authkey, serializer=SERIALIZER + ) + manager2.connect() + queue = manager2.get_queue() + + # Note that xmlrpclib will deserialize object as a list not a tuple + self.assertEqual(queue.get(), ['hello world', None, True, 2.25]) + + # Because we are using xmlrpclib for serialization instead of + # pickle this will cause a serialization error. + self.assertRaises(Exception, queue.put, time.sleep) + + # Make queue finalizer run before the server is stopped + del queue + manager.shutdown() + +class _TestManagerRestart(BaseTestCase): + + @classmethod + def _putter(cls, address, authkey): + manager = QueueManager( + address=address, authkey=authkey, serializer=SERIALIZER) + manager.connect() + queue = manager.get_queue() + queue.put('hello world') + + def test_rapid_restart(self): + authkey = os.urandom(32) + manager = QueueManager( + address=('localhost', 0), authkey=authkey, serializer=SERIALIZER) + srvr = manager.get_server() + addr = srvr.address + # Close the connection.Listener socket which gets opened as a part + # of manager.get_server(). It's not needed for the test. + srvr.listener.close() + manager.start() + + p = self.Process(target=self._putter, args=(manager.address, authkey)) + p.daemon = True + p.start() + queue = manager.get_queue() + self.assertEqual(queue.get(), 'hello world') + del queue + manager.shutdown() + manager = QueueManager( + address=addr, authkey=authkey, serializer=SERIALIZER) + try: + manager.start() + except OSError as e: + if e.errno != errno.EADDRINUSE: + raise + # Retry after some time, in case the old socket was lingering + # (sporadic failure on buildbots) + time.sleep(1.0) + manager = QueueManager( + address=addr, authkey=authkey, serializer=SERIALIZER) + manager.shutdown() + +# +# +# + +SENTINEL = latin('') + +class _TestConnection(BaseTestCase): + + ALLOWED_TYPES = ('processes', 'threads') + + @classmethod + def _echo(cls, conn): + for msg in iter(conn.recv_bytes, SENTINEL): + conn.send_bytes(msg) + conn.close() + + def test_connection(self): + conn, child_conn = self.Pipe() + + p = self.Process(target=self._echo, args=(child_conn,)) + p.daemon = True + p.start() + + seq = [1, 2.25, None] + msg = latin('hello world') + longmsg = msg * 10 + arr = array.array('i', list(range(4))) + + if self.TYPE == 'processes': + self.assertEqual(type(conn.fileno()), int) + + self.assertEqual(conn.send(seq), None) + self.assertEqual(conn.recv(), seq) + + self.assertEqual(conn.send_bytes(msg), None) + self.assertEqual(conn.recv_bytes(), msg) + + if self.TYPE == 'processes': + buffer = array.array('i', [0]*10) + expected = list(arr) + [0] * (10 - len(arr)) + self.assertEqual(conn.send_bytes(arr), None) + self.assertEqual(conn.recv_bytes_into(buffer), + len(arr) * buffer.itemsize) + self.assertEqual(list(buffer), expected) + + buffer = array.array('i', [0]*10) + expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr)) + self.assertEqual(conn.send_bytes(arr), None) + self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize), + len(arr) * buffer.itemsize) + self.assertEqual(list(buffer), expected) + + buffer = bytearray(latin(' ' * 40)) + self.assertEqual(conn.send_bytes(longmsg), None) + try: + res = conn.recv_bytes_into(buffer) + except multiprocessing.BufferTooShort as e: + self.assertEqual(e.args, (longmsg,)) + else: + self.fail('expected BufferTooShort, got %s' % res) + + poll = TimingWrapper(conn.poll) + + self.assertEqual(poll(), False) + self.assertTimingAlmostEqual(poll.elapsed, 0) + + self.assertEqual(poll(-1), False) + self.assertTimingAlmostEqual(poll.elapsed, 0) + + self.assertEqual(poll(TIMEOUT1), False) + self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1) + + conn.send(None) + time.sleep(.1) + + self.assertEqual(poll(TIMEOUT1), True) + self.assertTimingAlmostEqual(poll.elapsed, 0) + + self.assertEqual(conn.recv(), None) + + really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb + conn.send_bytes(really_big_msg) + self.assertEqual(conn.recv_bytes(), really_big_msg) + + conn.send_bytes(SENTINEL) # tell child to quit + child_conn.close() + + if self.TYPE == 'processes': + self.assertEqual(conn.readable, True) + self.assertEqual(conn.writable, True) + self.assertRaises(EOFError, conn.recv) + self.assertRaises(EOFError, conn.recv_bytes) + + p.join() + + def test_duplex_false(self): + reader, writer = self.Pipe(duplex=False) + self.assertEqual(writer.send(1), None) + self.assertEqual(reader.recv(), 1) + if self.TYPE == 'processes': + self.assertEqual(reader.readable, True) + self.assertEqual(reader.writable, False) + self.assertEqual(writer.readable, False) + self.assertEqual(writer.writable, True) + self.assertRaises(OSError, reader.send, 2) + self.assertRaises(OSError, writer.recv) + self.assertRaises(OSError, writer.poll) + + def test_spawn_close(self): + # We test that a pipe connection can be closed by parent + # process immediately after child is spawned. On Windows this + # would have sometimes failed on old versions because + # child_conn would be closed before the child got a chance to + # duplicate it. + conn, child_conn = self.Pipe() + + p = self.Process(target=self._echo, args=(child_conn,)) + p.daemon = True + p.start() + child_conn.close() # this might complete before child initializes + + msg = latin('hello') + conn.send_bytes(msg) + self.assertEqual(conn.recv_bytes(), msg) + + conn.send_bytes(SENTINEL) + conn.close() + p.join() + + def test_sendbytes(self): + if self.TYPE != 'processes': + return + + msg = latin('abcdefghijklmnopqrstuvwxyz') + a, b = self.Pipe() + + a.send_bytes(msg) + self.assertEqual(b.recv_bytes(), msg) + + a.send_bytes(msg, 5) + self.assertEqual(b.recv_bytes(), msg[5:]) + + a.send_bytes(msg, 7, 8) + self.assertEqual(b.recv_bytes(), msg[7:7+8]) + + a.send_bytes(msg, 26) + self.assertEqual(b.recv_bytes(), latin('')) + + a.send_bytes(msg, 26, 0) + self.assertEqual(b.recv_bytes(), latin('')) + + self.assertRaises(ValueError, a.send_bytes, msg, 27) + + self.assertRaises(ValueError, a.send_bytes, msg, 22, 5) + + self.assertRaises(ValueError, a.send_bytes, msg, 26, 1) + + self.assertRaises(ValueError, a.send_bytes, msg, -1) + + self.assertRaises(ValueError, a.send_bytes, msg, 4, -1) + + @classmethod + def _is_fd_assigned(cls, fd): + try: + os.fstat(fd) + except OSError as e: + if e.errno == errno.EBADF: + return False + raise + else: + return True + + @classmethod + def _writefd(cls, conn, data, create_dummy_fds=False): + if create_dummy_fds: + for i in range(0, 256): + if not cls._is_fd_assigned(i): + os.dup2(conn.fileno(), i) + fd = reduction.recv_handle(conn) + if msvcrt: + fd = msvcrt.open_osfhandle(fd, os.O_WRONLY) + os.write(fd, data) + os.close(fd) + + @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") + def test_fd_transfer(self): + if self.TYPE != 'processes': + self.skipTest("only makes sense with processes") + conn, child_conn = self.Pipe(duplex=True) + + p = self.Process(target=self._writefd, args=(child_conn, b"foo")) + p.daemon = True + p.start() + self.addCleanup(test.support.unlink, test.support.TESTFN) + with open(test.support.TESTFN, "wb") as f: + fd = f.fileno() + if msvcrt: + fd = msvcrt.get_osfhandle(fd) + reduction.send_handle(conn, fd, p.pid) + p.join() + with open(test.support.TESTFN, "rb") as f: + self.assertEqual(f.read(), b"foo") + + @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") + @unittest.skipIf(sys.platform == "win32", + "test semantics don't make sense on Windows") + @unittest.skipIf(MAXFD <= 256, + "largest assignable fd number is too small") + @unittest.skipUnless(hasattr(os, "dup2"), + "test needs os.dup2()") + def test_large_fd_transfer(self): + # With fd > 256 (issue #11657) + if self.TYPE != 'processes': + self.skipTest("only makes sense with processes") + conn, child_conn = self.Pipe(duplex=True) + + p = self.Process(target=self._writefd, args=(child_conn, b"bar", True)) + p.daemon = True + p.start() + self.addCleanup(test.support.unlink, test.support.TESTFN) + with open(test.support.TESTFN, "wb") as f: + fd = f.fileno() + for newfd in range(256, MAXFD): + if not self._is_fd_assigned(newfd): + break + else: + self.fail("could not find an unassigned large file descriptor") + os.dup2(fd, newfd) + try: + reduction.send_handle(conn, newfd, p.pid) + finally: + os.close(newfd) + p.join() + with open(test.support.TESTFN, "rb") as f: + self.assertEqual(f.read(), b"bar") + + @classmethod + def _send_data_without_fd(self, conn): + os.write(conn.fileno(), b"\0") + + @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") + @unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows") + def test_missing_fd_transfer(self): + # Check that exception is raised when received data is not + # accompanied by a file descriptor in ancillary data. + if self.TYPE != 'processes': + self.skipTest("only makes sense with processes") + conn, child_conn = self.Pipe(duplex=True) + + p = self.Process(target=self._send_data_without_fd, args=(child_conn,)) + p.daemon = True + p.start() + self.assertRaises(RuntimeError, reduction.recv_handle, conn) + p.join() + + def test_context(self): + a, b = self.Pipe() + + with a, b: + a.send(1729) + self.assertEqual(b.recv(), 1729) + if self.TYPE == 'processes': + self.assertFalse(a.closed) + self.assertFalse(b.closed) + + if self.TYPE == 'processes': + self.assertTrue(a.closed) + self.assertTrue(b.closed) + self.assertRaises(OSError, a.recv) + self.assertRaises(OSError, b.recv) + +class _TestListener(BaseTestCase): + + ALLOWED_TYPES = ('processes',) + + def test_multiple_bind(self): + for family in self.connection.families: + l = self.connection.Listener(family=family) + self.addCleanup(l.close) + self.assertRaises(OSError, self.connection.Listener, + l.address, family) + + def test_context(self): + with self.connection.Listener() as l: + with self.connection.Client(l.address) as c: + with l.accept() as d: + c.send(1729) + self.assertEqual(d.recv(), 1729) + + if self.TYPE == 'processes': + self.assertRaises(OSError, l.accept) + +class _TestListenerClient(BaseTestCase): + + ALLOWED_TYPES = ('processes', 'threads') + + @classmethod + def _test(cls, address): + conn = cls.connection.Client(address) + conn.send('hello') + conn.close() + + def test_listener_client(self): + for family in self.connection.families: + l = self.connection.Listener(family=family) + p = self.Process(target=self._test, args=(l.address,)) + p.daemon = True + p.start() + conn = l.accept() + self.assertEqual(conn.recv(), 'hello') + p.join() + l.close() + + def test_issue14725(self): + l = self.connection.Listener() + p = self.Process(target=self._test, args=(l.address,)) + p.daemon = True + p.start() + time.sleep(1) + # On Windows the client process should by now have connected, + # written data and closed the pipe handle by now. This causes + # ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue + # 14725. + conn = l.accept() + self.assertEqual(conn.recv(), 'hello') + conn.close() + p.join() + l.close() + + def test_issue16955(self): + for fam in self.connection.families: + l = self.connection.Listener(family=fam) + c = self.connection.Client(l.address) + a = l.accept() + a.send_bytes(b"hello") + self.assertTrue(c.poll(1)) + a.close() + c.close() + l.close() + +class _TestPoll(BaseTestCase): + + ALLOWED_TYPES = ('processes', 'threads') + + def test_empty_string(self): + a, b = self.Pipe() + self.assertEqual(a.poll(), False) + b.send_bytes(b'') + self.assertEqual(a.poll(), True) + self.assertEqual(a.poll(), True) + + @classmethod + def _child_strings(cls, conn, strings): + for s in strings: + time.sleep(0.1) + conn.send_bytes(s) + conn.close() + + def test_strings(self): + strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop') + a, b = self.Pipe() + p = self.Process(target=self._child_strings, args=(b, strings)) + p.start() + + for s in strings: + for i in range(200): + if a.poll(0.01): + break + x = a.recv_bytes() + self.assertEqual(s, x) + + p.join() + + @classmethod + def _child_boundaries(cls, r): + # Polling may "pull" a message in to the child process, but we + # don't want it to pull only part of a message, as that would + # corrupt the pipe for any other processes which might later + # read from it. + r.poll(5) + + def test_boundaries(self): + r, w = self.Pipe(False) + p = self.Process(target=self._child_boundaries, args=(r,)) + p.start() + time.sleep(2) + L = [b"first", b"second"] + for obj in L: + w.send_bytes(obj) + w.close() + p.join() + self.assertIn(r.recv_bytes(), L) + + @classmethod + def _child_dont_merge(cls, b): + b.send_bytes(b'a') + b.send_bytes(b'b') + b.send_bytes(b'cd') + + def test_dont_merge(self): + a, b = self.Pipe() + self.assertEqual(a.poll(0.0), False) + self.assertEqual(a.poll(0.1), False) + + p = self.Process(target=self._child_dont_merge, args=(b,)) + p.start() + + self.assertEqual(a.recv_bytes(), b'a') + self.assertEqual(a.poll(1.0), True) + self.assertEqual(a.poll(1.0), True) + self.assertEqual(a.recv_bytes(), b'b') + self.assertEqual(a.poll(1.0), True) + self.assertEqual(a.poll(1.0), True) + self.assertEqual(a.poll(0.0), True) + self.assertEqual(a.recv_bytes(), b'cd') + + p.join() + +# +# Test of sending connection and socket objects between processes +# + +@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") +class _TestPicklingConnections(BaseTestCase): + + ALLOWED_TYPES = ('processes',) + + @classmethod + def tearDownClass(cls): + from multiprocessing import resource_sharer + resource_sharer.stop(timeout=5) + + @classmethod + def _listener(cls, conn, families): + for fam in families: + l = cls.connection.Listener(family=fam) + conn.send(l.address) + new_conn = l.accept() + conn.send(new_conn) + new_conn.close() + l.close() + + l = socket.socket() + l.bind(('localhost', 0)) + l.listen(1) + conn.send(l.getsockname()) + new_conn, addr = l.accept() + conn.send(new_conn) + new_conn.close() + l.close() + + conn.recv() + + @classmethod + def _remote(cls, conn): + for (address, msg) in iter(conn.recv, None): + client = cls.connection.Client(address) + client.send(msg.upper()) + client.close() + + address, msg = conn.recv() + client = socket.socket() + client.connect(address) + client.sendall(msg.upper()) + client.close() + + conn.close() + + def test_pickling(self): + families = self.connection.families + + lconn, lconn0 = self.Pipe() + lp = self.Process(target=self._listener, args=(lconn0, families)) + lp.daemon = True + lp.start() + lconn0.close() + + rconn, rconn0 = self.Pipe() + rp = self.Process(target=self._remote, args=(rconn0,)) + rp.daemon = True + rp.start() + rconn0.close() + + for fam in families: + msg = ('This connection uses family %s' % fam).encode('ascii') + address = lconn.recv() + rconn.send((address, msg)) + new_conn = lconn.recv() + self.assertEqual(new_conn.recv(), msg.upper()) + + rconn.send(None) + + msg = latin('This connection uses a normal socket') + address = lconn.recv() + rconn.send((address, msg)) + new_conn = lconn.recv() + buf = [] + while True: + s = new_conn.recv(100) + if not s: + break + buf.append(s) + buf = b''.join(buf) + self.assertEqual(buf, msg.upper()) + new_conn.close() + + lconn.send(None) + + rconn.close() + lconn.close() + + lp.join() + rp.join() + + @classmethod + def child_access(cls, conn): + w = conn.recv() + w.send('all is well') + w.close() + + r = conn.recv() + msg = r.recv() + conn.send(msg*2) + + conn.close() + + def test_access(self): + # On Windows, if we do not specify a destination pid when + # using DupHandle then we need to be careful to use the + # correct access flags for DuplicateHandle(), or else + # DupHandle.detach() will raise PermissionError. For example, + # for a read only pipe handle we should use + # access=FILE_GENERIC_READ. (Unfortunately + # DUPLICATE_SAME_ACCESS does not work.) + conn, child_conn = self.Pipe() + p = self.Process(target=self.child_access, args=(child_conn,)) + p.daemon = True + p.start() + child_conn.close() + + r, w = self.Pipe(duplex=False) + conn.send(w) + w.close() + self.assertEqual(r.recv(), 'all is well') + r.close() + + r, w = self.Pipe(duplex=False) + conn.send(r) + r.close() + w.send('foobar') + w.close() + self.assertEqual(conn.recv(), 'foobar'*2) + +# +# +# + +class _TestHeap(BaseTestCase): + + ALLOWED_TYPES = ('processes',) + + def test_heap(self): + iterations = 5000 + maxblocks = 50 + blocks = [] + + # create and destroy lots of blocks of different sizes + for i in range(iterations): + size = int(random.lognormvariate(0, 1) * 1000) + b = multiprocessing.heap.BufferWrapper(size) + blocks.append(b) + if len(blocks) > maxblocks: + i = random.randrange(maxblocks) + del blocks[i] + + # get the heap object + heap = multiprocessing.heap.BufferWrapper._heap + + # verify the state of the heap + all = [] + occupied = 0 + heap._lock.acquire() + self.addCleanup(heap._lock.release) + for L in list(heap._len_to_seq.values()): + for arena, start, stop in L: + all.append((heap._arenas.index(arena), start, stop, + stop-start, 'free')) + for arena, start, stop in heap._allocated_blocks: + all.append((heap._arenas.index(arena), start, stop, + stop-start, 'occupied')) + occupied += (stop-start) + + all.sort() + + for i in range(len(all)-1): + (arena, start, stop) = all[i][:3] + (narena, nstart, nstop) = all[i+1][:3] + self.assertTrue((arena != narena and nstart == 0) or + (stop == nstart)) + + def test_free_from_gc(self): + # Check that freeing of blocks by the garbage collector doesn't deadlock + # (issue #12352). + # Make sure the GC is enabled, and set lower collection thresholds to + # make collections more frequent (and increase the probability of + # deadlock). + if not gc.isenabled(): + gc.enable() + self.addCleanup(gc.disable) + thresholds = gc.get_threshold() + self.addCleanup(gc.set_threshold, *thresholds) + gc.set_threshold(10) + + # perform numerous block allocations, with cyclic references to make + # sure objects are collected asynchronously by the gc + for i in range(5000): + a = multiprocessing.heap.BufferWrapper(1) + b = multiprocessing.heap.BufferWrapper(1) + # circular references + a.buddy = b + b.buddy = a + +# +# +# + +class _Foo(Structure): + _fields_ = [ + ('x', c_int), + ('y', c_double) + ] + +class _TestSharedCTypes(BaseTestCase): + + ALLOWED_TYPES = ('processes',) + + def setUp(self): + if not HAS_SHAREDCTYPES: + self.skipTest("requires multiprocessing.sharedctypes") + + @classmethod + def _double(cls, x, y, foo, arr, string): + x.value *= 2 + y.value *= 2 + foo.x *= 2 + foo.y *= 2 + string.value *= 2 + for i in range(len(arr)): + arr[i] *= 2 + + def test_sharedctypes(self, lock=False): + x = Value('i', 7, lock=lock) + y = Value(c_double, 1.0/3.0, lock=lock) + foo = Value(_Foo, 3, 2, lock=lock) + arr = self.Array('d', list(range(10)), lock=lock) + string = self.Array('c', 20, lock=lock) + string.value = latin('hello') + + p = self.Process(target=self._double, args=(x, y, foo, arr, string)) + p.daemon = True + p.start() + p.join() + + self.assertEqual(x.value, 14) + self.assertAlmostEqual(y.value, 2.0/3.0) + self.assertEqual(foo.x, 6) + self.assertAlmostEqual(foo.y, 4.0) + for i in range(10): + self.assertAlmostEqual(arr[i], i*2) + self.assertEqual(string.value, latin('hellohello')) + + def test_synchronize(self): + self.test_sharedctypes(lock=True) + + def test_copy(self): + foo = _Foo(2, 5.0) + bar = copy(foo) + foo.x = 0 + foo.y = 0 + self.assertEqual(bar.x, 2) + self.assertAlmostEqual(bar.y, 5.0) + +# +# +# + +class _TestFinalize(BaseTestCase): + + ALLOWED_TYPES = ('processes',) + + @classmethod + def _test_finalize(cls, conn): + class Foo(object): + pass + + a = Foo() + util.Finalize(a, conn.send, args=('a',)) + del a # triggers callback for a + + b = Foo() + close_b = util.Finalize(b, conn.send, args=('b',)) + close_b() # triggers callback for b + close_b() # does nothing because callback has already been called + del b # does nothing because callback has already been called + + c = Foo() + util.Finalize(c, conn.send, args=('c',)) + + d10 = Foo() + util.Finalize(d10, conn.send, args=('d10',), exitpriority=1) + + d01 = Foo() + util.Finalize(d01, conn.send, args=('d01',), exitpriority=0) + d02 = Foo() + util.Finalize(d02, conn.send, args=('d02',), exitpriority=0) + d03 = Foo() + util.Finalize(d03, conn.send, args=('d03',), exitpriority=0) + + util.Finalize(None, conn.send, args=('e',), exitpriority=-10) + + util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100) + + # call multiprocessing's cleanup function then exit process without + # garbage collecting locals + util._exit_function() + conn.close() + os._exit(0) + + def test_finalize(self): + conn, child_conn = self.Pipe() + + p = self.Process(target=self._test_finalize, args=(child_conn,)) + p.daemon = True + p.start() + p.join() + + result = [obj for obj in iter(conn.recv, 'STOP')] + self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e']) + +# +# Test that from ... import * works for each module +# + +class _TestImportStar(unittest.TestCase): + + def get_module_names(self): + import glob + folder = os.path.dirname(multiprocessing.__file__) + pattern = os.path.join(folder, '*.py') + files = glob.glob(pattern) + modules = [os.path.splitext(os.path.split(f)[1])[0] for f in files] + modules = ['multiprocessing.' + m for m in modules] + modules.remove('multiprocessing.__init__') + modules.append('multiprocessing') + return modules + + def test_import(self): + modules = self.get_module_names() + if sys.platform == 'win32': + modules.remove('multiprocessing.popen_fork') + modules.remove('multiprocessing.popen_forkserver') + modules.remove('multiprocessing.popen_spawn_posix') + else: + modules.remove('multiprocessing.popen_spawn_win32') + if not HAS_REDUCTION: + modules.remove('multiprocessing.popen_forkserver') + + if c_int is None: + # This module requires _ctypes + modules.remove('multiprocessing.sharedctypes') + + for name in modules: + __import__(name) + mod = sys.modules[name] + self.assertTrue(hasattr(mod, '__all__'), name) + + for attr in mod.__all__: + self.assertTrue( + hasattr(mod, attr), + '%r does not have attribute %r' % (mod, attr) + ) + +# +# Quick test that logging works -- does not test logging output +# + +class _TestLogging(BaseTestCase): + + ALLOWED_TYPES = ('processes',) + + def test_enable_logging(self): + logger = multiprocessing.get_logger() + logger.setLevel(util.SUBWARNING) + self.assertTrue(logger is not None) + logger.debug('this will not be printed') + logger.info('nor will this') + logger.setLevel(LOG_LEVEL) + + @classmethod + def _test_level(cls, conn): + logger = multiprocessing.get_logger() + conn.send(logger.getEffectiveLevel()) + + def test_level(self): + LEVEL1 = 32 + LEVEL2 = 37 + + logger = multiprocessing.get_logger() + root_logger = logging.getLogger() + root_level = root_logger.level + + reader, writer = multiprocessing.Pipe(duplex=False) + + logger.setLevel(LEVEL1) + p = self.Process(target=self._test_level, args=(writer,)) + p.daemon = True + p.start() + self.assertEqual(LEVEL1, reader.recv()) + + logger.setLevel(logging.NOTSET) + root_logger.setLevel(LEVEL2) + p = self.Process(target=self._test_level, args=(writer,)) + p.daemon = True + p.start() + self.assertEqual(LEVEL2, reader.recv()) + + root_logger.setLevel(root_level) + logger.setLevel(level=LOG_LEVEL) + + +# class _TestLoggingProcessName(BaseTestCase): +# +# def handle(self, record): +# assert record.processName == multiprocessing.current_process().name +# self.__handled = True +# +# def test_logging(self): +# handler = logging.Handler() +# handler.handle = self.handle +# self.__handled = False +# # Bypass getLogger() and side-effects +# logger = logging.getLoggerClass()( +# 'multiprocessing.test.TestLoggingProcessName') +# logger.addHandler(handler) +# logger.propagate = False +# +# logger.warn('foo') +# assert self.__handled + +# +# Check that Process.join() retries if os.waitpid() fails with EINTR +# + +class _TestPollEintr(BaseTestCase): + + ALLOWED_TYPES = ('processes',) + + @classmethod + def _killer(cls, pid): + time.sleep(0.5) + os.kill(pid, signal.SIGUSR1) + + @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1') + def test_poll_eintr(self): + got_signal = [False] + def record(*args): + got_signal[0] = True + pid = os.getpid() + oldhandler = signal.signal(signal.SIGUSR1, record) + try: + killer = self.Process(target=self._killer, args=(pid,)) + killer.start() + p = self.Process(target=time.sleep, args=(1,)) + p.start() + p.join() + self.assertTrue(got_signal[0]) + self.assertEqual(p.exitcode, 0) + killer.join() + finally: + signal.signal(signal.SIGUSR1, oldhandler) + +# +# Test to verify handle verification, see issue 3321 +# + +class TestInvalidHandle(unittest.TestCase): + + @unittest.skipIf(WIN32, "skipped on Windows") + def test_invalid_handles(self): + conn = multiprocessing.connection.Connection(44977608) + try: + self.assertRaises((ValueError, OSError), conn.poll) + finally: + # Hack private attribute _handle to avoid printing an error + # in conn.__del__ + conn._handle = None + self.assertRaises((ValueError, OSError), + multiprocessing.connection.Connection, -1) + + + +class OtherTest(unittest.TestCase): + # TODO: add more tests for deliver/answer challenge. + def test_deliver_challenge_auth_failure(self): + class _FakeConnection(object): + def recv_bytes(self, size): + return b'something bogus' + def send_bytes(self, data): + pass + self.assertRaises(multiprocessing.AuthenticationError, + multiprocessing.connection.deliver_challenge, + _FakeConnection(), b'abc') + + def test_answer_challenge_auth_failure(self): + class _FakeConnection(object): + def __init__(self): + self.count = 0 + def recv_bytes(self, size): + self.count += 1 + if self.count == 1: + return multiprocessing.connection.CHALLENGE + elif self.count == 2: + return b'something bogus' + return b'' + def send_bytes(self, data): + pass + self.assertRaises(multiprocessing.AuthenticationError, + multiprocessing.connection.answer_challenge, + _FakeConnection(), b'abc') + +# +# Test Manager.start()/Pool.__init__() initializer feature - see issue 5585 +# + +def initializer(ns): + ns.test += 1 + +class TestInitializers(unittest.TestCase): + def setUp(self): + self.mgr = multiprocessing.Manager() + self.ns = self.mgr.Namespace() + self.ns.test = 0 + + def tearDown(self): + self.mgr.shutdown() + self.mgr.join() + + def test_manager_initializer(self): + m = multiprocessing.managers.SyncManager() + self.assertRaises(TypeError, m.start, 1) + m.start(initializer, (self.ns,)) + self.assertEqual(self.ns.test, 1) + m.shutdown() + m.join() + + def test_pool_initializer(self): + self.assertRaises(TypeError, multiprocessing.Pool, initializer=1) + p = multiprocessing.Pool(1, initializer, (self.ns,)) + p.close() + p.join() + self.assertEqual(self.ns.test, 1) + +# +# Issue 5155, 5313, 5331: Test process in processes +# Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior +# + +def _ThisSubProcess(q): + try: + item = q.get(block=False) + except pyqueue.Empty: + pass + +def _TestProcess(q): + queue = multiprocessing.Queue() + subProc = multiprocessing.Process(target=_ThisSubProcess, args=(queue,)) + subProc.daemon = True + subProc.start() + subProc.join() + +def _afunc(x): + return x*x + +def pool_in_process(): + pool = multiprocessing.Pool(processes=4) + x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7]) + pool.close() + pool.join() + +class _file_like(object): + def __init__(self, delegate): + self._delegate = delegate + self._pid = None + + @property + def cache(self): + pid = os.getpid() + # There are no race conditions since fork keeps only the running thread + if pid != self._pid: + self._pid = pid + self._cache = [] + return self._cache + + def write(self, data): + self.cache.append(data) + + def flush(self): + self._delegate.write(''.join(self.cache)) + self._cache = [] + +class TestStdinBadfiledescriptor(unittest.TestCase): + + def test_queue_in_process(self): + queue = multiprocessing.Queue() + proc = multiprocessing.Process(target=_TestProcess, args=(queue,)) + proc.start() + proc.join() + + def test_pool_in_process(self): + p = multiprocessing.Process(target=pool_in_process) + p.start() + p.join() + + def test_flushing(self): + sio = io.StringIO() + flike = _file_like(sio) + flike.write('foo') + proc = multiprocessing.Process(target=lambda: flike.flush()) + flike.flush() + assert sio.getvalue() == 'foo' + + +class TestWait(unittest.TestCase): + + @classmethod + def _child_test_wait(cls, w, slow): + for i in range(10): + if slow: + time.sleep(random.random()*0.1) + w.send((i, os.getpid())) + w.close() + + def test_wait(self, slow=False): + from multiprocessing.connection import wait + readers = [] + procs = [] + messages = [] + + for i in range(4): + r, w = multiprocessing.Pipe(duplex=False) + p = multiprocessing.Process(target=self._child_test_wait, args=(w, slow)) + p.daemon = True + p.start() + w.close() + readers.append(r) + procs.append(p) + self.addCleanup(p.join) + + while readers: + for r in wait(readers): + try: + msg = r.recv() + except EOFError: + readers.remove(r) + r.close() + else: + messages.append(msg) + + messages.sort() + expected = sorted((i, p.pid) for i in range(10) for p in procs) + self.assertEqual(messages, expected) + + @classmethod + def _child_test_wait_socket(cls, address, slow): + s = socket.socket() + s.connect(address) + for i in range(10): + if slow: + time.sleep(random.random()*0.1) + s.sendall(('%s\n' % i).encode('ascii')) + s.close() + + def test_wait_socket(self, slow=False): + from multiprocessing.connection import wait + l = socket.socket() + l.bind(('', 0)) + l.listen(4) + addr = ('localhost', l.getsockname()[1]) + readers = [] + procs = [] + dic = {} + + for i in range(4): + p = multiprocessing.Process(target=self._child_test_wait_socket, + args=(addr, slow)) + p.daemon = True + p.start() + procs.append(p) + self.addCleanup(p.join) + + for i in range(4): + r, _ = l.accept() + readers.append(r) + dic[r] = [] + l.close() + + while readers: + for r in wait(readers): + msg = r.recv(32) + if not msg: + readers.remove(r) + r.close() + else: + dic[r].append(msg) + + expected = ''.join('%s\n' % i for i in range(10)).encode('ascii') + for v in dic.values(): + self.assertEqual(b''.join(v), expected) + + def test_wait_slow(self): + self.test_wait(True) + + def test_wait_socket_slow(self): + self.test_wait_socket(True) + + def test_wait_timeout(self): + from multiprocessing.connection import wait + + expected = 5 + a, b = multiprocessing.Pipe() + + start = time.time() + res = wait([a, b], expected) + delta = time.time() - start + + self.assertEqual(res, []) + self.assertLess(delta, expected * 2) + self.assertGreater(delta, expected * 0.5) + + b.send(None) + + start = time.time() + res = wait([a, b], 20) + delta = time.time() - start + + self.assertEqual(res, [a]) + self.assertLess(delta, 0.4) + + @classmethod + def signal_and_sleep(cls, sem, period): + sem.release() + time.sleep(period) + + def test_wait_integer(self): + from multiprocessing.connection import wait + + expected = 3 + sorted_ = lambda l: sorted(l, key=lambda x: id(x)) + sem = multiprocessing.Semaphore(0) + a, b = multiprocessing.Pipe() + p = multiprocessing.Process(target=self.signal_and_sleep, + args=(sem, expected)) + + p.start() + self.assertIsInstance(p.sentinel, int) + self.assertTrue(sem.acquire(timeout=20)) + + start = time.time() + res = wait([a, p.sentinel, b], expected + 20) + delta = time.time() - start + + self.assertEqual(res, [p.sentinel]) + self.assertLess(delta, expected + 2) + self.assertGreater(delta, expected - 2) + + a.send(None) + + start = time.time() + res = wait([a, p.sentinel, b], 20) + delta = time.time() - start + + self.assertEqual(sorted_(res), sorted_([p.sentinel, b])) + self.assertLess(delta, 0.4) + + b.send(None) + + start = time.time() + res = wait([a, p.sentinel, b], 20) + delta = time.time() - start + + self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b])) + self.assertLess(delta, 0.4) + + p.terminate() + p.join() + + def test_neg_timeout(self): + from multiprocessing.connection import wait + a, b = multiprocessing.Pipe() + t = time.time() + res = wait([a], timeout=-1) + t = time.time() - t + self.assertEqual(res, []) + self.assertLess(t, 1) + a.close() + b.close() + +# +# Issue 14151: Test invalid family on invalid environment +# + +class TestInvalidFamily(unittest.TestCase): + + @unittest.skipIf(WIN32, "skipped on Windows") + def test_invalid_family(self): + with self.assertRaises(ValueError): + multiprocessing.connection.Listener(r'\\.\test') + + @unittest.skipUnless(WIN32, "skipped on non-Windows platforms") + def test_invalid_family_win32(self): + with self.assertRaises(ValueError): + multiprocessing.connection.Listener('/var/test.pipe') + +# +# Issue 12098: check sys.flags of child matches that for parent +# + +class TestFlags(unittest.TestCase): + @classmethod + def run_in_grandchild(cls, conn): + conn.send(tuple(sys.flags)) + + @classmethod + def run_in_child(cls): + import json + r, w = multiprocessing.Pipe(duplex=False) + p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,)) + p.start() + grandchild_flags = r.recv() + p.join() + r.close() + w.close() + flags = (tuple(sys.flags), grandchild_flags) + print(json.dumps(flags)) + + def test_flags(self): + import json, subprocess + # start child process using unusual flags + prog = ('from test._test_multiprocessing import TestFlags; ' + + 'TestFlags.run_in_child()') + data = subprocess.check_output( + [sys.executable, '-E', '-S', '-O', '-c', prog]) + child_flags, grandchild_flags = json.loads(data.decode('ascii')) + self.assertEqual(child_flags, grandchild_flags) + +# +# Test interaction with socket timeouts - see Issue #6056 +# + +class TestTimeouts(unittest.TestCase): + @classmethod + def _test_timeout(cls, child, address): + time.sleep(1) + child.send(123) + child.close() + conn = multiprocessing.connection.Client(address) + conn.send(456) + conn.close() + + def test_timeout(self): + old_timeout = socket.getdefaulttimeout() + try: + socket.setdefaulttimeout(0.1) + parent, child = multiprocessing.Pipe(duplex=True) + l = multiprocessing.connection.Listener(family='AF_INET') + p = multiprocessing.Process(target=self._test_timeout, + args=(child, l.address)) + p.start() + child.close() + self.assertEqual(parent.recv(), 123) + parent.close() + conn = l.accept() + self.assertEqual(conn.recv(), 456) + conn.close() + l.close() + p.join(10) + finally: + socket.setdefaulttimeout(old_timeout) + +# +# Test what happens with no "if __name__ == '__main__'" +# + +class TestNoForkBomb(unittest.TestCase): + def test_noforkbomb(self): + name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py') + if WIN32: + rc, out, err = test.script_helper.assert_python_failure(name) + self.assertEqual('', out.decode('ascii')) + self.assertIn('RuntimeError', err.decode('ascii')) + else: + rc, out, err = test.script_helper.assert_python_ok(name) + self.assertEqual('123', out.decode('ascii').rstrip()) + self.assertEqual('', err.decode('ascii')) + +# +# Issue #17555: ForkAwareThreadLock +# + +class TestForkAwareThreadLock(unittest.TestCase): + # We recurisvely start processes. Issue #17555 meant that the + # after fork registry would get duplicate entries for the same + # lock. The size of the registry at generation n was ~2**n. + + @classmethod + def child(cls, n, conn): + if n > 1: + p = multiprocessing.Process(target=cls.child, args=(n-1, conn)) + p.start() + p.join() + else: + conn.send(len(util._afterfork_registry)) + conn.close() + + def test_lock(self): + r, w = multiprocessing.Pipe(False) + l = util.ForkAwareThreadLock() + old_size = len(util._afterfork_registry) + p = multiprocessing.Process(target=self.child, args=(5, w)) + p.start() + new_size = r.recv() + p.join() + self.assertLessEqual(new_size, old_size) + +# +# Check that non-forked child processes do not inherit unneeded fds/handles +# + +class TestCloseFds(unittest.TestCase): + + def get_high_socket_fd(self): + if WIN32: + # The child process will not have any socket handles, so + # calling socket.fromfd() should produce WSAENOTSOCK even + # if there is a handle of the same number. + return socket.socket().detach() + else: + # We want to produce a socket with an fd high enough that a + # freshly created child process will not have any fds as high. + fd = socket.socket().detach() + to_close = [] + while fd < 50: + to_close.append(fd) + fd = os.dup(fd) + for x in to_close: + os.close(x) + return fd + + def close(self, fd): + if WIN32: + socket.socket(fileno=fd).close() + else: + os.close(fd) + + @classmethod + def _test_closefds(cls, conn, fd): + try: + s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM) + except Exception as e: + conn.send(e) + else: + s.close() + conn.send(None) + + def test_closefd(self): + if not HAS_REDUCTION: + raise unittest.SkipTest('requires fd pickling') + + reader, writer = multiprocessing.Pipe() + fd = self.get_high_socket_fd() + try: + p = multiprocessing.Process(target=self._test_closefds, + args=(writer, fd)) + p.start() + writer.close() + e = reader.recv() + p.join(timeout=5) + finally: + self.close(fd) + writer.close() + reader.close() + + if multiprocessing.get_start_method() == 'fork': + self.assertIs(e, None) + else: + WSAENOTSOCK = 10038 + self.assertIsInstance(e, OSError) + self.assertTrue(e.errno == errno.EBADF or + e.winerror == WSAENOTSOCK, e) + +# +# Issue #17097: EINTR should be ignored by recv(), send(), accept() etc +# + +class TestIgnoreEINTR(unittest.TestCase): + + @classmethod + def _test_ignore(cls, conn): + def handler(signum, frame): + pass + signal.signal(signal.SIGUSR1, handler) + conn.send('ready') + x = conn.recv() + conn.send(x) + conn.send_bytes(b'x'*(1024*1024)) # sending 1 MB should block + + @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1') + def test_ignore(self): + conn, child_conn = multiprocessing.Pipe() + try: + p = multiprocessing.Process(target=self._test_ignore, + args=(child_conn,)) + p.daemon = True + p.start() + child_conn.close() + self.assertEqual(conn.recv(), 'ready') + time.sleep(0.1) + os.kill(p.pid, signal.SIGUSR1) + time.sleep(0.1) + conn.send(1234) + self.assertEqual(conn.recv(), 1234) + time.sleep(0.1) + os.kill(p.pid, signal.SIGUSR1) + self.assertEqual(conn.recv_bytes(), b'x'*(1024*1024)) + time.sleep(0.1) + p.join() + finally: + conn.close() + + @classmethod + def _test_ignore_listener(cls, conn): + def handler(signum, frame): + pass + signal.signal(signal.SIGUSR1, handler) + with multiprocessing.connection.Listener() as l: + conn.send(l.address) + a = l.accept() + a.send('welcome') + + @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1') + def test_ignore_listener(self): + conn, child_conn = multiprocessing.Pipe() + try: + p = multiprocessing.Process(target=self._test_ignore_listener, + args=(child_conn,)) + p.daemon = True + p.start() + child_conn.close() + address = conn.recv() + time.sleep(0.1) + os.kill(p.pid, signal.SIGUSR1) + time.sleep(0.1) + client = multiprocessing.connection.Client(address) + self.assertEqual(client.recv(), 'welcome') + p.join() + finally: + conn.close() + +class TestStartMethod(unittest.TestCase): + def test_set_get(self): + count = 0 + old_method = multiprocessing.get_start_method() + try: + for method in ('fork', 'spawn', 'forkserver'): + try: + multiprocessing.set_start_method(method) + except ValueError: + continue + self.assertEqual(multiprocessing.get_start_method(), method) + count += 1 + finally: + multiprocessing.set_start_method(old_method) + self.assertGreaterEqual(count, 1) + + def test_get_all(self): + methods = multiprocessing.get_all_start_methods() + if sys.platform == 'win32': + self.assertEqual(methods, ['spawn']) + else: + self.assertTrue(methods == ['fork', 'spawn'] or + methods == ['fork', 'spawn', 'forkserver']) + +# +# Check that killing process does not leak named semaphores +# + +@unittest.skipIf(sys.platform == "win32", + "test semantics don't make sense on Windows") +class TestSemaphoreTracker(unittest.TestCase): + def test_semaphore_tracker(self): + import subprocess + cmd = '''if 1: + import multiprocessing as mp, time, os + mp.set_start_method("spawn") + lock1 = mp.Lock() + lock2 = mp.Lock() + os.write(%d, lock1._semlock.name.encode("ascii") + b"\\n") + os.write(%d, lock2._semlock.name.encode("ascii") + b"\\n") + time.sleep(10) + ''' + r, w = os.pipe() + p = subprocess.Popen([sys.executable, '-c', cmd % (w, w)], + pass_fds=[w]) + os.close(w) + with open(r, 'rb', closefd=True) as f: + name1 = f.readline().rstrip().decode('ascii') + name2 = f.readline().rstrip().decode('ascii') + _multiprocessing.sem_unlink(name1) + p.terminate() + p.wait() + time.sleep(1.0) + with self.assertRaises(OSError) as ctx: + _multiprocessing.sem_unlink(name2) + # docs say it should be ENOENT, but OSX seems to give EINVAL + self.assertIn(ctx.exception.errno, (errno.ENOENT, errno.EINVAL)) + +# +# Mixins +# + +class ProcessesMixin(object): + TYPE = 'processes' + Process = multiprocessing.Process + connection = multiprocessing.connection + current_process = staticmethod(multiprocessing.current_process) + active_children = staticmethod(multiprocessing.active_children) + Pool = staticmethod(multiprocessing.Pool) + Pipe = staticmethod(multiprocessing.Pipe) + Queue = staticmethod(multiprocessing.Queue) + JoinableQueue = staticmethod(multiprocessing.JoinableQueue) + Lock = staticmethod(multiprocessing.Lock) + RLock = staticmethod(multiprocessing.RLock) + Semaphore = staticmethod(multiprocessing.Semaphore) + BoundedSemaphore = staticmethod(multiprocessing.BoundedSemaphore) + Condition = staticmethod(multiprocessing.Condition) + Event = staticmethod(multiprocessing.Event) + Barrier = staticmethod(multiprocessing.Barrier) + Value = staticmethod(multiprocessing.Value) + Array = staticmethod(multiprocessing.Array) + RawValue = staticmethod(multiprocessing.RawValue) + RawArray = staticmethod(multiprocessing.RawArray) + + +class ManagerMixin(object): + TYPE = 'manager' + Process = multiprocessing.Process + Queue = property(operator.attrgetter('manager.Queue')) + JoinableQueue = property(operator.attrgetter('manager.JoinableQueue')) + Lock = property(operator.attrgetter('manager.Lock')) + RLock = property(operator.attrgetter('manager.RLock')) + Semaphore = property(operator.attrgetter('manager.Semaphore')) + BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore')) + Condition = property(operator.attrgetter('manager.Condition')) + Event = property(operator.attrgetter('manager.Event')) + Barrier = property(operator.attrgetter('manager.Barrier')) + Value = property(operator.attrgetter('manager.Value')) + Array = property(operator.attrgetter('manager.Array')) + list = property(operator.attrgetter('manager.list')) + dict = property(operator.attrgetter('manager.dict')) + Namespace = property(operator.attrgetter('manager.Namespace')) + + @classmethod + def Pool(cls, *args, **kwds): + return cls.manager.Pool(*args, **kwds) + + @classmethod + def setUpClass(cls): + cls.manager = multiprocessing.Manager() + + @classmethod + def tearDownClass(cls): + # only the manager process should be returned by active_children() + # but this can take a bit on slow machines, so wait a few seconds + # if there are other children too (see #17395) + t = 0.01 + while len(multiprocessing.active_children()) > 1 and t < 5: + time.sleep(t) + t *= 2 + gc.collect() # do garbage collection + if cls.manager._number_of_objects() != 0: + # This is not really an error since some tests do not + # ensure that all processes which hold a reference to a + # managed object have been joined. + print('Shared objects which still exist at manager shutdown:') + print(cls.manager._debug_info()) + cls.manager.shutdown() + cls.manager.join() + cls.manager = None + + +class ThreadsMixin(object): + TYPE = 'threads' + Process = multiprocessing.dummy.Process + connection = multiprocessing.dummy.connection + current_process = staticmethod(multiprocessing.dummy.current_process) + active_children = staticmethod(multiprocessing.dummy.active_children) + Pool = staticmethod(multiprocessing.Pool) + Pipe = staticmethod(multiprocessing.dummy.Pipe) + Queue = staticmethod(multiprocessing.dummy.Queue) + JoinableQueue = staticmethod(multiprocessing.dummy.JoinableQueue) + Lock = staticmethod(multiprocessing.dummy.Lock) + RLock = staticmethod(multiprocessing.dummy.RLock) + Semaphore = staticmethod(multiprocessing.dummy.Semaphore) + BoundedSemaphore = staticmethod(multiprocessing.dummy.BoundedSemaphore) + Condition = staticmethod(multiprocessing.dummy.Condition) + Event = staticmethod(multiprocessing.dummy.Event) + Barrier = staticmethod(multiprocessing.dummy.Barrier) + Value = staticmethod(multiprocessing.dummy.Value) + Array = staticmethod(multiprocessing.dummy.Array) + +# +# Functions used to create test cases from the base ones in this module +# + +def install_tests_in_module_dict(remote_globs, start_method): + __module__ = remote_globs['__name__'] + local_globs = globals() + ALL_TYPES = {'processes', 'threads', 'manager'} + + for name, base in local_globs.items(): + if not isinstance(base, type): + continue + if issubclass(base, BaseTestCase): + if base is BaseTestCase: + continue + assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES + for type_ in base.ALLOWED_TYPES: + newname = 'With' + type_.capitalize() + name[1:] + Mixin = local_globs[type_.capitalize() + 'Mixin'] + class Temp(base, Mixin, unittest.TestCase): + pass + Temp.__name__ = Temp.__qualname__ = newname + Temp.__module__ = __module__ + remote_globs[newname] = Temp + elif issubclass(base, unittest.TestCase): + class Temp(base, object): + pass + Temp.__name__ = Temp.__qualname__ = name + Temp.__module__ = __module__ + remote_globs[name] = Temp + + def setUpModule(): + remote_globs['old_start_method'] = multiprocessing.get_start_method() + try: + multiprocessing.set_start_method(start_method) + except ValueError: + raise unittest.SkipTest(start_method + + ' start method not supported') + print('Using start method %r' % multiprocessing.get_start_method()) + + if sys.platform.startswith("linux"): + try: + lock = multiprocessing.RLock() + except OSError: + raise unittest.SkipTest("OSError raises on RLock creation, " + "see issue 3111!") + check_enough_semaphores() + util.get_temp_dir() # creates temp directory + multiprocessing.get_logger().setLevel(LOG_LEVEL) + + def tearDownModule(): + # pause a bit so we don't get warning about dangling threads/processes + time.sleep(0.5) + multiprocessing.set_start_method(remote_globs['old_start_method']) + + remote_globs['setUpModule'] = setUpModule + remote_globs['tearDownModule'] = tearDownModule diff -r 9877c25d9556 -r b3620777f54c Lib/test/test_multiprocessing.py --- a/Lib/test/test_multiprocessing.py Wed Aug 07 05:54:28 2013 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,3606 +0,0 @@ -#!/usr/bin/env python3 - -# -# Unit tests for the multiprocessing package -# - -import unittest -import queue as pyqueue -import time -import io -import itertools -import sys -import os -import gc -import errno -import signal -import array -import socket -import random -import logging -import struct -import operator -import test.support -import test.script_helper - - -# Skip tests if _multiprocessing wasn't built. -_multiprocessing = test.support.import_module('_multiprocessing') -# Skip tests if sem_open implementation is broken. -test.support.import_module('multiprocessing.synchronize') -# import threading after _multiprocessing to raise a more revelant error -# message: "No module named _multiprocessing". _multiprocessing is not compiled -# without thread support. -import threading - -import multiprocessing.dummy -import multiprocessing.connection -import multiprocessing.managers -import multiprocessing.heap -import multiprocessing.pool - -from multiprocessing import util - -try: - from multiprocessing import reduction - HAS_REDUCTION = True -except ImportError: - HAS_REDUCTION = False - -try: - from multiprocessing.sharedctypes import Value, copy - HAS_SHAREDCTYPES = True -except ImportError: - HAS_SHAREDCTYPES = False - -try: - import msvcrt -except ImportError: - msvcrt = None - -# -# -# - -def latin(s): - return s.encode('latin') - -# -# Constants -# - -LOG_LEVEL = util.SUBWARNING -#LOG_LEVEL = logging.DEBUG - -DELTA = 0.1 -CHECK_TIMINGS = False # making true makes tests take a lot longer - # and can sometimes cause some non-serious - # failures because some calls block a bit - # longer than expected -if CHECK_TIMINGS: - TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4 -else: - TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1 - -HAVE_GETVALUE = not getattr(_multiprocessing, - 'HAVE_BROKEN_SEM_GETVALUE', False) - -WIN32 = (sys.platform == "win32") - -from multiprocessing.connection import wait - -def wait_for_handle(handle, timeout): - if timeout is not None and timeout < 0.0: - timeout = None - return wait([handle], timeout) - -try: - MAXFD = os.sysconf("SC_OPEN_MAX") -except: - MAXFD = 256 - -# -# Some tests require ctypes -# - -try: - from ctypes import Structure, c_int, c_double -except ImportError: - Structure = object - c_int = c_double = None - - -def check_enough_semaphores(): - """Check that the system supports enough semaphores to run the test.""" - # minimum number of semaphores available according to POSIX - nsems_min = 256 - try: - nsems = os.sysconf("SC_SEM_NSEMS_MAX") - except (AttributeError, ValueError): - # sysconf not available or setting not available - return - if nsems == -1 or nsems >= nsems_min: - return - raise unittest.SkipTest("The OS doesn't support enough semaphores " - "to run the test (required: %d)." % nsems_min) - - -# -# Creates a wrapper for a function which records the time it takes to finish -# - -class TimingWrapper(object): - - def __init__(self, func): - self.func = func - self.elapsed = None - - def __call__(self, *args, **kwds): - t = time.time() - try: - return self.func(*args, **kwds) - finally: - self.elapsed = time.time() - t - -# -# Base class for test cases -# - -class BaseTestCase(object): - - ALLOWED_TYPES = ('processes', 'manager', 'threads') - - def assertTimingAlmostEqual(self, a, b): - if CHECK_TIMINGS: - self.assertAlmostEqual(a, b, 1) - - def assertReturnsIfImplemented(self, value, func, *args): - try: - res = func(*args) - except NotImplementedError: - pass - else: - return self.assertEqual(value, res) - - # For the sanity of Windows users, rather than crashing or freezing in - # multiple ways. - def __reduce__(self, *args): - raise NotImplementedError("shouldn't try to pickle a test case") - - __reduce_ex__ = __reduce__ - -# -# Return the value of a semaphore -# - -def get_value(self): - try: - return self.get_value() - except AttributeError: - try: - return self._Semaphore__value - except AttributeError: - try: - return self._value - except AttributeError: - raise NotImplementedError - -# -# Testcases -# - -class _TestProcess(BaseTestCase): - - ALLOWED_TYPES = ('processes', 'threads') - - def test_current(self): - if self.TYPE == 'threads': - return - - current = self.current_process() - authkey = current.authkey - - self.assertTrue(current.is_alive()) - self.assertTrue(not current.daemon) - self.assertIsInstance(authkey, bytes) - self.assertTrue(len(authkey) > 0) - self.assertEqual(current.ident, os.getpid()) - self.assertEqual(current.exitcode, None) - - def test_daemon_argument(self): - if self.TYPE == "threads": - return - - # By default uses the current process's daemon flag. - proc0 = self.Process(target=self._test) - self.assertEqual(proc0.daemon, self.current_process().daemon) - proc1 = self.Process(target=self._test, daemon=True) - self.assertTrue(proc1.daemon) - proc2 = self.Process(target=self._test, daemon=False) - self.assertFalse(proc2.daemon) - - @classmethod - def _test(cls, q, *args, **kwds): - current = cls.current_process() - q.put(args) - q.put(kwds) - q.put(current.name) - if cls.TYPE != 'threads': - q.put(bytes(current.authkey)) - q.put(current.pid) - - def test_process(self): - q = self.Queue(1) - e = self.Event() - args = (q, 1, 2) - kwargs = {'hello':23, 'bye':2.54} - name = 'SomeProcess' - p = self.Process( - target=self._test, args=args, kwargs=kwargs, name=name - ) - p.daemon = True - current = self.current_process() - - if self.TYPE != 'threads': - self.assertEqual(p.authkey, current.authkey) - self.assertEqual(p.is_alive(), False) - self.assertEqual(p.daemon, True) - self.assertNotIn(p, self.active_children()) - self.assertTrue(type(self.active_children()) is list) - self.assertEqual(p.exitcode, None) - - p.start() - - self.assertEqual(p.exitcode, None) - self.assertEqual(p.is_alive(), True) - self.assertIn(p, self.active_children()) - - self.assertEqual(q.get(), args[1:]) - self.assertEqual(q.get(), kwargs) - self.assertEqual(q.get(), p.name) - if self.TYPE != 'threads': - self.assertEqual(q.get(), current.authkey) - self.assertEqual(q.get(), p.pid) - - p.join() - - self.assertEqual(p.exitcode, 0) - self.assertEqual(p.is_alive(), False) - self.assertNotIn(p, self.active_children()) - - @classmethod - def _test_terminate(cls): - time.sleep(1000) - - def test_terminate(self): - if self.TYPE == 'threads': - return - - p = self.Process(target=self._test_terminate) - p.daemon = True - p.start() - - self.assertEqual(p.is_alive(), True) - self.assertIn(p, self.active_children()) - self.assertEqual(p.exitcode, None) - - join = TimingWrapper(p.join) - - self.assertEqual(join(0), None) - self.assertTimingAlmostEqual(join.elapsed, 0.0) - self.assertEqual(p.is_alive(), True) - - self.assertEqual(join(-1), None) - self.assertTimingAlmostEqual(join.elapsed, 0.0) - self.assertEqual(p.is_alive(), True) - - p.terminate() - - self.assertEqual(join(), None) - self.assertTimingAlmostEqual(join.elapsed, 0.0) - - self.assertEqual(p.is_alive(), False) - self.assertNotIn(p, self.active_children()) - - p.join() - - # XXX sometimes get p.exitcode == 0 on Windows ... - #self.assertEqual(p.exitcode, -signal.SIGTERM) - - def test_cpu_count(self): - try: - cpus = multiprocessing.cpu_count() - except NotImplementedError: - cpus = 1 - self.assertTrue(type(cpus) is int) - self.assertTrue(cpus >= 1) - - def test_active_children(self): - self.assertEqual(type(self.active_children()), list) - - p = self.Process(target=time.sleep, args=(DELTA,)) - self.assertNotIn(p, self.active_children()) - - p.daemon = True - p.start() - self.assertIn(p, self.active_children()) - - p.join() - self.assertNotIn(p, self.active_children()) - - @classmethod - def _test_recursion(cls, wconn, id): - from multiprocessing import forking - wconn.send(id) - if len(id) < 2: - for i in range(2): - p = cls.Process( - target=cls._test_recursion, args=(wconn, id+[i]) - ) - p.start() - p.join() - - def test_recursion(self): - rconn, wconn = self.Pipe(duplex=False) - self._test_recursion(wconn, []) - - time.sleep(DELTA) - result = [] - while rconn.poll(): - result.append(rconn.recv()) - - expected = [ - [], - [0], - [0, 0], - [0, 1], - [1], - [1, 0], - [1, 1] - ] - self.assertEqual(result, expected) - - @classmethod - def _test_sentinel(cls, event): - event.wait(10.0) - - def test_sentinel(self): - if self.TYPE == "threads": - return - event = self.Event() - p = self.Process(target=self._test_sentinel, args=(event,)) - with self.assertRaises(ValueError): - p.sentinel - p.start() - self.addCleanup(p.join) - sentinel = p.sentinel - self.assertIsInstance(sentinel, int) - self.assertFalse(wait_for_handle(sentinel, timeout=0.0)) - event.set() - p.join() - self.assertTrue(wait_for_handle(sentinel, timeout=DELTA)) - -# -# -# - -class _UpperCaser(multiprocessing.Process): - - def __init__(self): - multiprocessing.Process.__init__(self) - self.child_conn, self.parent_conn = multiprocessing.Pipe() - - def run(self): - self.parent_conn.close() - for s in iter(self.child_conn.recv, None): - self.child_conn.send(s.upper()) - self.child_conn.close() - - def submit(self, s): - assert type(s) is str - self.parent_conn.send(s) - return self.parent_conn.recv() - - def stop(self): - self.parent_conn.send(None) - self.parent_conn.close() - self.child_conn.close() - -class _TestSubclassingProcess(BaseTestCase): - - ALLOWED_TYPES = ('processes',) - - def test_subclassing(self): - uppercaser = _UpperCaser() - uppercaser.daemon = True - uppercaser.start() - self.assertEqual(uppercaser.submit('hello'), 'HELLO') - self.assertEqual(uppercaser.submit('world'), 'WORLD') - uppercaser.stop() - uppercaser.join() - - def test_stderr_flush(self): - # sys.stderr is flushed at process shutdown (issue #13812) - if self.TYPE == "threads": - return - - testfn = test.support.TESTFN - self.addCleanup(test.support.unlink, testfn) - proc = self.Process(target=self._test_stderr_flush, args=(testfn,)) - proc.start() - proc.join() - with open(testfn, 'r') as f: - err = f.read() - # The whole traceback was printed - self.assertIn("ZeroDivisionError", err) - self.assertIn("test_multiprocessing.py", err) - self.assertIn("1/0 # MARKER", err) - - @classmethod - def _test_stderr_flush(cls, testfn): - sys.stderr = open(testfn, 'w') - 1/0 # MARKER - - - @classmethod - def _test_sys_exit(cls, reason, testfn): - sys.stderr = open(testfn, 'w') - sys.exit(reason) - - def test_sys_exit(self): - # See Issue 13854 - if self.TYPE == 'threads': - return - - testfn = test.support.TESTFN - self.addCleanup(test.support.unlink, testfn) - - for reason, code in (([1, 2, 3], 1), ('ignore this', 0)): - p = self.Process(target=self._test_sys_exit, args=(reason, testfn)) - p.daemon = True - p.start() - p.join(5) - self.assertEqual(p.exitcode, code) - - with open(testfn, 'r') as f: - self.assertEqual(f.read().rstrip(), str(reason)) - - for reason in (True, False, 8): - p = self.Process(target=sys.exit, args=(reason,)) - p.daemon = True - p.start() - p.join(5) - self.assertEqual(p.exitcode, reason) - -# -# -# - -def queue_empty(q): - if hasattr(q, 'empty'): - return q.empty() - else: - return q.qsize() == 0 - -def queue_full(q, maxsize): - if hasattr(q, 'full'): - return q.full() - else: - return q.qsize() == maxsize - - -class _TestQueue(BaseTestCase): - - - @classmethod - def _test_put(cls, queue, child_can_start, parent_can_continue): - child_can_start.wait() - for i in range(6): - queue.get() - parent_can_continue.set() - - def test_put(self): - MAXSIZE = 6 - queue = self.Queue(maxsize=MAXSIZE) - child_can_start = self.Event() - parent_can_continue = self.Event() - - proc = self.Process( - target=self._test_put, - args=(queue, child_can_start, parent_can_continue) - ) - proc.daemon = True - proc.start() - - self.assertEqual(queue_empty(queue), True) - self.assertEqual(queue_full(queue, MAXSIZE), False) - - queue.put(1) - queue.put(2, True) - queue.put(3, True, None) - queue.put(4, False) - queue.put(5, False, None) - queue.put_nowait(6) - - # the values may be in buffer but not yet in pipe so sleep a bit - time.sleep(DELTA) - - self.assertEqual(queue_empty(queue), False) - self.assertEqual(queue_full(queue, MAXSIZE), True) - - put = TimingWrapper(queue.put) - put_nowait = TimingWrapper(queue.put_nowait) - - self.assertRaises(pyqueue.Full, put, 7, False) - self.assertTimingAlmostEqual(put.elapsed, 0) - - self.assertRaises(pyqueue.Full, put, 7, False, None) - self.assertTimingAlmostEqual(put.elapsed, 0) - - self.assertRaises(pyqueue.Full, put_nowait, 7) - self.assertTimingAlmostEqual(put_nowait.elapsed, 0) - - self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1) - self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1) - - self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2) - self.assertTimingAlmostEqual(put.elapsed, 0) - - self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3) - self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3) - - child_can_start.set() - parent_can_continue.wait() - - self.assertEqual(queue_empty(queue), True) - self.assertEqual(queue_full(queue, MAXSIZE), False) - - proc.join() - - @classmethod - def _test_get(cls, queue, child_can_start, parent_can_continue): - child_can_start.wait() - #queue.put(1) - queue.put(2) - queue.put(3) - queue.put(4) - queue.put(5) - parent_can_continue.set() - - def test_get(self): - queue = self.Queue() - child_can_start = self.Event() - parent_can_continue = self.Event() - - proc = self.Process( - target=self._test_get, - args=(queue, child_can_start, parent_can_continue) - ) - proc.daemon = True - proc.start() - - self.assertEqual(queue_empty(queue), True) - - child_can_start.set() - parent_can_continue.wait() - - time.sleep(DELTA) - self.assertEqual(queue_empty(queue), False) - - # Hangs unexpectedly, remove for now - #self.assertEqual(queue.get(), 1) - self.assertEqual(queue.get(True, None), 2) - self.assertEqual(queue.get(True), 3) - self.assertEqual(queue.get(timeout=1), 4) - self.assertEqual(queue.get_nowait(), 5) - - self.assertEqual(queue_empty(queue), True) - - get = TimingWrapper(queue.get) - get_nowait = TimingWrapper(queue.get_nowait) - - self.assertRaises(pyqueue.Empty, get, False) - self.assertTimingAlmostEqual(get.elapsed, 0) - - self.assertRaises(pyqueue.Empty, get, False, None) - self.assertTimingAlmostEqual(get.elapsed, 0) - - self.assertRaises(pyqueue.Empty, get_nowait) - self.assertTimingAlmostEqual(get_nowait.elapsed, 0) - - self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1) - self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1) - - self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2) - self.assertTimingAlmostEqual(get.elapsed, 0) - - self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3) - self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3) - - proc.join() - - @classmethod - def _test_fork(cls, queue): - for i in range(10, 20): - queue.put(i) - # note that at this point the items may only be buffered, so the - # process cannot shutdown until the feeder thread has finished - # pushing items onto the pipe. - - def test_fork(self): - # Old versions of Queue would fail to create a new feeder - # thread for a forked process if the original process had its - # own feeder thread. This test checks that this no longer - # happens. - - queue = self.Queue() - - # put items on queue so that main process starts a feeder thread - for i in range(10): - queue.put(i) - - # wait to make sure thread starts before we fork a new process - time.sleep(DELTA) - - # fork process - p = self.Process(target=self._test_fork, args=(queue,)) - p.daemon = True - p.start() - - # check that all expected items are in the queue - for i in range(20): - self.assertEqual(queue.get(), i) - self.assertRaises(pyqueue.Empty, queue.get, False) - - p.join() - - def test_qsize(self): - q = self.Queue() - try: - self.assertEqual(q.qsize(), 0) - except NotImplementedError: - return - q.put(1) - self.assertEqual(q.qsize(), 1) - q.put(5) - self.assertEqual(q.qsize(), 2) - q.get() - self.assertEqual(q.qsize(), 1) - q.get() - self.assertEqual(q.qsize(), 0) - - @classmethod - def _test_task_done(cls, q): - for obj in iter(q.get, None): - time.sleep(DELTA) - q.task_done() - - def test_task_done(self): - queue = self.JoinableQueue() - - if sys.version_info < (2, 5) and not hasattr(queue, 'task_done'): - self.skipTest("requires 'queue.task_done()' method") - - workers = [self.Process(target=self._test_task_done, args=(queue,)) - for i in range(4)] - - for p in workers: - p.daemon = True - p.start() - - for i in range(10): - queue.put(i) - - queue.join() - - for p in workers: - queue.put(None) - - for p in workers: - p.join() - - def test_timeout(self): - q = multiprocessing.Queue() - start = time.time() - self.assertRaises(pyqueue.Empty, q.get, True, 0.2) - delta = time.time() - start - self.assertGreaterEqual(delta, 0.19) - -# -# -# - -class _TestLock(BaseTestCase): - - def test_lock(self): - lock = self.Lock() - self.assertEqual(lock.acquire(), True) - self.assertEqual(lock.acquire(False), False) - self.assertEqual(lock.release(), None) - self.assertRaises((ValueError, threading.ThreadError), lock.release) - - def test_rlock(self): - lock = self.RLock() - self.assertEqual(lock.acquire(), True) - self.assertEqual(lock.acquire(), True) - self.assertEqual(lock.acquire(), True) - self.assertEqual(lock.release(), None) - self.assertEqual(lock.release(), None) - self.assertEqual(lock.release(), None) - self.assertRaises((AssertionError, RuntimeError), lock.release) - - def test_lock_context(self): - with self.Lock(): - pass - - -class _TestSemaphore(BaseTestCase): - - def _test_semaphore(self, sem): - self.assertReturnsIfImplemented(2, get_value, sem) - self.assertEqual(sem.acquire(), True) - self.assertReturnsIfImplemented(1, get_value, sem) - self.assertEqual(sem.acquire(), True) - self.assertReturnsIfImplemented(0, get_value, sem) - self.assertEqual(sem.acquire(False), False) - self.assertReturnsIfImplemented(0, get_value, sem) - self.assertEqual(sem.release(), None) - self.assertReturnsIfImplemented(1, get_value, sem) - self.assertEqual(sem.release(), None) - self.assertReturnsIfImplemented(2, get_value, sem) - - def test_semaphore(self): - sem = self.Semaphore(2) - self._test_semaphore(sem) - self.assertEqual(sem.release(), None) - self.assertReturnsIfImplemented(3, get_value, sem) - self.assertEqual(sem.release(), None) - self.assertReturnsIfImplemented(4, get_value, sem) - - def test_bounded_semaphore(self): - sem = self.BoundedSemaphore(2) - self._test_semaphore(sem) - # Currently fails on OS/X - #if HAVE_GETVALUE: - # self.assertRaises(ValueError, sem.release) - # self.assertReturnsIfImplemented(2, get_value, sem) - - def test_timeout(self): - if self.TYPE != 'processes': - return - - sem = self.Semaphore(0) - acquire = TimingWrapper(sem.acquire) - - self.assertEqual(acquire(False), False) - self.assertTimingAlmostEqual(acquire.elapsed, 0.0) - - self.assertEqual(acquire(False, None), False) - self.assertTimingAlmostEqual(acquire.elapsed, 0.0) - - self.assertEqual(acquire(False, TIMEOUT1), False) - self.assertTimingAlmostEqual(acquire.elapsed, 0) - - self.assertEqual(acquire(True, TIMEOUT2), False) - self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2) - - self.assertEqual(acquire(timeout=TIMEOUT3), False) - self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3) - - -class _TestCondition(BaseTestCase): - - @classmethod - def f(cls, cond, sleeping, woken, timeout=None): - cond.acquire() - sleeping.release() - cond.wait(timeout) - woken.release() - cond.release() - - def check_invariant(self, cond): - # this is only supposed to succeed when there are no sleepers - if self.TYPE == 'processes': - try: - sleepers = (cond._sleeping_count.get_value() - - cond._woken_count.get_value()) - self.assertEqual(sleepers, 0) - self.assertEqual(cond._wait_semaphore.get_value(), 0) - except NotImplementedError: - pass - - def test_notify(self): - cond = self.Condition() - sleeping = self.Semaphore(0) - woken = self.Semaphore(0) - - p = self.Process(target=self.f, args=(cond, sleeping, woken)) - p.daemon = True - p.start() - - p = threading.Thread(target=self.f, args=(cond, sleeping, woken)) - p.daemon = True - p.start() - - # wait for both children to start sleeping - sleeping.acquire() - sleeping.acquire() - - # check no process/thread has woken up - time.sleep(DELTA) - self.assertReturnsIfImplemented(0, get_value, woken) - - # wake up one process/thread - cond.acquire() - cond.notify() - cond.release() - - # check one process/thread has woken up - time.sleep(DELTA) - self.assertReturnsIfImplemented(1, get_value, woken) - - # wake up another - cond.acquire() - cond.notify() - cond.release() - - # check other has woken up - time.sleep(DELTA) - self.assertReturnsIfImplemented(2, get_value, woken) - - # check state is not mucked up - self.check_invariant(cond) - p.join() - - def test_notify_all(self): - cond = self.Condition() - sleeping = self.Semaphore(0) - woken = self.Semaphore(0) - - # start some threads/processes which will timeout - for i in range(3): - p = self.Process(target=self.f, - args=(cond, sleeping, woken, TIMEOUT1)) - p.daemon = True - p.start() - - t = threading.Thread(target=self.f, - args=(cond, sleeping, woken, TIMEOUT1)) - t.daemon = True - t.start() - - # wait for them all to sleep - for i in range(6): - sleeping.acquire() - - # check they have all timed out - for i in range(6): - woken.acquire() - self.assertReturnsIfImplemented(0, get_value, woken) - - # check state is not mucked up - self.check_invariant(cond) - - # start some more threads/processes - for i in range(3): - p = self.Process(target=self.f, args=(cond, sleeping, woken)) - p.daemon = True - p.start() - - t = threading.Thread(target=self.f, args=(cond, sleeping, woken)) - t.daemon = True - t.start() - - # wait for them to all sleep - for i in range(6): - sleeping.acquire() - - # check no process/thread has woken up - time.sleep(DELTA) - self.assertReturnsIfImplemented(0, get_value, woken) - - # wake them all up - cond.acquire() - cond.notify_all() - cond.release() - - # check they have all woken - for i in range(10): - try: - if get_value(woken) == 6: - break - except NotImplementedError: - break - time.sleep(DELTA) - self.assertReturnsIfImplemented(6, get_value, woken) - - # check state is not mucked up - self.check_invariant(cond) - - def test_timeout(self): - cond = self.Condition() - wait = TimingWrapper(cond.wait) - cond.acquire() - res = wait(TIMEOUT1) - cond.release() - self.assertEqual(res, False) - self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1) - - @classmethod - def _test_waitfor_f(cls, cond, state): - with cond: - state.value = 0 - cond.notify() - result = cond.wait_for(lambda : state.value==4) - if not result or state.value != 4: - sys.exit(1) - - @unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes') - def test_waitfor(self): - # based on test in test/lock_tests.py - cond = self.Condition() - state = self.Value('i', -1) - - p = self.Process(target=self._test_waitfor_f, args=(cond, state)) - p.daemon = True - p.start() - - with cond: - result = cond.wait_for(lambda : state.value==0) - self.assertTrue(result) - self.assertEqual(state.value, 0) - - for i in range(4): - time.sleep(0.01) - with cond: - state.value += 1 - cond.notify() - - p.join(5) - self.assertFalse(p.is_alive()) - self.assertEqual(p.exitcode, 0) - - @classmethod - def _test_waitfor_timeout_f(cls, cond, state, success, sem): - sem.release() - with cond: - expected = 0.1 - dt = time.time() - result = cond.wait_for(lambda : state.value==4, timeout=expected) - dt = time.time() - dt - # borrow logic in assertTimeout() from test/lock_tests.py - if not result and expected * 0.6 < dt < expected * 10.0: - success.value = True - - @unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes') - def test_waitfor_timeout(self): - # based on test in test/lock_tests.py - cond = self.Condition() - state = self.Value('i', 0) - success = self.Value('i', False) - sem = self.Semaphore(0) - - p = self.Process(target=self._test_waitfor_timeout_f, - args=(cond, state, success, sem)) - p.daemon = True - p.start() - self.assertTrue(sem.acquire(timeout=10)) - - # Only increment 3 times, so state == 4 is never reached. - for i in range(3): - time.sleep(0.01) - with cond: - state.value += 1 - cond.notify() - - p.join(5) - self.assertTrue(success.value) - - @classmethod - def _test_wait_result(cls, c, pid): - with c: - c.notify() - time.sleep(1) - if pid is not None: - os.kill(pid, signal.SIGINT) - - def test_wait_result(self): - if isinstance(self, ProcessesMixin) and sys.platform != 'win32': - pid = os.getpid() - else: - pid = None - - c = self.Condition() - with c: - self.assertFalse(c.wait(0)) - self.assertFalse(c.wait(0.1)) - - p = self.Process(target=self._test_wait_result, args=(c, pid)) - p.start() - - self.assertTrue(c.wait(10)) - if pid is not None: - self.assertRaises(KeyboardInterrupt, c.wait, 10) - - p.join() - - -class _TestEvent(BaseTestCase): - - @classmethod - def _test_event(cls, event): - time.sleep(TIMEOUT2) - event.set() - - def test_event(self): - event = self.Event() - wait = TimingWrapper(event.wait) - - # Removed temporarily, due to API shear, this does not - # work with threading._Event objects. is_set == isSet - self.assertEqual(event.is_set(), False) - - # Removed, threading.Event.wait() will return the value of the __flag - # instead of None. API Shear with the semaphore backed mp.Event - self.assertEqual(wait(0.0), False) - self.assertTimingAlmostEqual(wait.elapsed, 0.0) - self.assertEqual(wait(TIMEOUT1), False) - self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1) - - event.set() - - # See note above on the API differences - self.assertEqual(event.is_set(), True) - self.assertEqual(wait(), True) - self.assertTimingAlmostEqual(wait.elapsed, 0.0) - self.assertEqual(wait(TIMEOUT1), True) - self.assertTimingAlmostEqual(wait.elapsed, 0.0) - # self.assertEqual(event.is_set(), True) - - event.clear() - - #self.assertEqual(event.is_set(), False) - - p = self.Process(target=self._test_event, args=(event,)) - p.daemon = True - p.start() - self.assertEqual(wait(), True) - -# -# Tests for Barrier - adapted from tests in test/lock_tests.py -# - -# Many of the tests for threading.Barrier use a list as an atomic -# counter: a value is appended to increment the counter, and the -# length of the list gives the value. We use the class DummyList -# for the same purpose. - -class _DummyList(object): - - def __init__(self): - wrapper = multiprocessing.heap.BufferWrapper(struct.calcsize('i')) - lock = multiprocessing.Lock() - self.__setstate__((wrapper, lock)) - self._lengthbuf[0] = 0 - - def __setstate__(self, state): - (self._wrapper, self._lock) = state - self._lengthbuf = self._wrapper.create_memoryview().cast('i') - - def __getstate__(self): - return (self._wrapper, self._lock) - - def append(self, _): - with self._lock: - self._lengthbuf[0] += 1 - - def __len__(self): - with self._lock: - return self._lengthbuf[0] - -def _wait(): - # A crude wait/yield function not relying on synchronization primitives. - time.sleep(0.01) - - -class Bunch(object): - """ - A bunch of threads. - """ - def __init__(self, namespace, f, args, n, wait_before_exit=False): - """ - Construct a bunch of `n` threads running the same function `f`. - If `wait_before_exit` is True, the threads won't terminate until - do_finish() is called. - """ - self.f = f - self.args = args - self.n = n - self.started = namespace.DummyList() - self.finished = namespace.DummyList() - self._can_exit = namespace.Event() - if not wait_before_exit: - self._can_exit.set() - for i in range(n): - p = namespace.Process(target=self.task) - p.daemon = True - p.start() - - def task(self): - pid = os.getpid() - self.started.append(pid) - try: - self.f(*self.args) - finally: - self.finished.append(pid) - self._can_exit.wait(30) - assert self._can_exit.is_set() - - def wait_for_started(self): - while len(self.started) < self.n: - _wait() - - def wait_for_finished(self): - while len(self.finished) < self.n: - _wait() - - def do_finish(self): - self._can_exit.set() - - -class AppendTrue(object): - def __init__(self, obj): - self.obj = obj - def __call__(self): - self.obj.append(True) - - -class _TestBarrier(BaseTestCase): - """ - Tests for Barrier objects. - """ - N = 5 - defaultTimeout = 30.0 # XXX Slow Windows buildbots need generous timeout - - def setUp(self): - self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout) - - def tearDown(self): - self.barrier.abort() - self.barrier = None - - def DummyList(self): - if self.TYPE == 'threads': - return [] - elif self.TYPE == 'manager': - return self.manager.list() - else: - return _DummyList() - - def run_threads(self, f, args): - b = Bunch(self, f, args, self.N-1) - f(*args) - b.wait_for_finished() - - @classmethod - def multipass(cls, barrier, results, n): - m = barrier.parties - assert m == cls.N - for i in range(n): - results[0].append(True) - assert len(results[1]) == i * m - barrier.wait() - results[1].append(True) - assert len(results[0]) == (i + 1) * m - barrier.wait() - try: - assert barrier.n_waiting == 0 - except NotImplementedError: - pass - assert not barrier.broken - - def test_barrier(self, passes=1): - """ - Test that a barrier is passed in lockstep - """ - results = [self.DummyList(), self.DummyList()] - self.run_threads(self.multipass, (self.barrier, results, passes)) - - def test_barrier_10(self): - """ - Test that a barrier works for 10 consecutive runs - """ - return self.test_barrier(10) - - @classmethod - def _test_wait_return_f(cls, barrier, queue): - res = barrier.wait() - queue.put(res) - - def test_wait_return(self): - """ - test the return value from barrier.wait - """ - queue = self.Queue() - self.run_threads(self._test_wait_return_f, (self.barrier, queue)) - results = [queue.get() for i in range(self.N)] - self.assertEqual(results.count(0), 1) - - @classmethod - def _test_action_f(cls, barrier, results): - barrier.wait() - if len(results) != 1: - raise RuntimeError - - def test_action(self): - """ - Test the 'action' callback - """ - results = self.DummyList() - barrier = self.Barrier(self.N, action=AppendTrue(results)) - self.run_threads(self._test_action_f, (barrier, results)) - self.assertEqual(len(results), 1) - - @classmethod - def _test_abort_f(cls, barrier, results1, results2): - try: - i = barrier.wait() - if i == cls.N//2: - raise RuntimeError - barrier.wait() - results1.append(True) - except threading.BrokenBarrierError: - results2.append(True) - except RuntimeError: - barrier.abort() - - def test_abort(self): - """ - Test that an abort will put the barrier in a broken state - """ - results1 = self.DummyList() - results2 = self.DummyList() - self.run_threads(self._test_abort_f, - (self.barrier, results1, results2)) - self.assertEqual(len(results1), 0) - self.assertEqual(len(results2), self.N-1) - self.assertTrue(self.barrier.broken) - - @classmethod - def _test_reset_f(cls, barrier, results1, results2, results3): - i = barrier.wait() - if i == cls.N//2: - # Wait until the other threads are all in the barrier. - while barrier.n_waiting < cls.N-1: - time.sleep(0.001) - barrier.reset() - else: - try: - barrier.wait() - results1.append(True) - except threading.BrokenBarrierError: - results2.append(True) - # Now, pass the barrier again - barrier.wait() - results3.append(True) - - def test_reset(self): - """ - Test that a 'reset' on a barrier frees the waiting threads - """ - results1 = self.DummyList() - results2 = self.DummyList() - results3 = self.DummyList() - self.run_threads(self._test_reset_f, - (self.barrier, results1, results2, results3)) - self.assertEqual(len(results1), 0) - self.assertEqual(len(results2), self.N-1) - self.assertEqual(len(results3), self.N) - - @classmethod - def _test_abort_and_reset_f(cls, barrier, barrier2, - results1, results2, results3): - try: - i = barrier.wait() - if i == cls.N//2: - raise RuntimeError - barrier.wait() - results1.append(True) - except threading.BrokenBarrierError: - results2.append(True) - except RuntimeError: - barrier.abort() - # Synchronize and reset the barrier. Must synchronize first so - # that everyone has left it when we reset, and after so that no - # one enters it before the reset. - if barrier2.wait() == cls.N//2: - barrier.reset() - barrier2.wait() - barrier.wait() - results3.append(True) - - def test_abort_and_reset(self): - """ - Test that a barrier can be reset after being broken. - """ - results1 = self.DummyList() - results2 = self.DummyList() - results3 = self.DummyList() - barrier2 = self.Barrier(self.N) - - self.run_threads(self._test_abort_and_reset_f, - (self.barrier, barrier2, results1, results2, results3)) - self.assertEqual(len(results1), 0) - self.assertEqual(len(results2), self.N-1) - self.assertEqual(len(results3), self.N) - - @classmethod - def _test_timeout_f(cls, barrier, results): - i = barrier.wait() - if i == cls.N//2: - # One thread is late! - time.sleep(1.0) - try: - barrier.wait(0.5) - except threading.BrokenBarrierError: - results.append(True) - - def test_timeout(self): - """ - Test wait(timeout) - """ - results = self.DummyList() - self.run_threads(self._test_timeout_f, (self.barrier, results)) - self.assertEqual(len(results), self.barrier.parties) - - @classmethod - def _test_default_timeout_f(cls, barrier, results): - i = barrier.wait(cls.defaultTimeout) - if i == cls.N//2: - # One thread is later than the default timeout - time.sleep(1.0) - try: - barrier.wait() - except threading.BrokenBarrierError: - results.append(True) - - def test_default_timeout(self): - """ - Test the barrier's default timeout - """ - barrier = self.Barrier(self.N, timeout=0.5) - results = self.DummyList() - self.run_threads(self._test_default_timeout_f, (barrier, results)) - self.assertEqual(len(results), barrier.parties) - - def test_single_thread(self): - b = self.Barrier(1) - b.wait() - b.wait() - - @classmethod - def _test_thousand_f(cls, barrier, passes, conn, lock): - for i in range(passes): - barrier.wait() - with lock: - conn.send(i) - - def test_thousand(self): - if self.TYPE == 'manager': - return - passes = 1000 - lock = self.Lock() - conn, child_conn = self.Pipe(False) - for j in range(self.N): - p = self.Process(target=self._test_thousand_f, - args=(self.barrier, passes, child_conn, lock)) - p.start() - - for i in range(passes): - for j in range(self.N): - self.assertEqual(conn.recv(), i) - -# -# -# - -class _TestValue(BaseTestCase): - - ALLOWED_TYPES = ('processes',) - - codes_values = [ - ('i', 4343, 24234), - ('d', 3.625, -4.25), - ('h', -232, 234), - ('c', latin('x'), latin('y')) - ] - - def setUp(self): - if not HAS_SHAREDCTYPES: - self.skipTest("requires multiprocessing.sharedctypes") - - @classmethod - def _test(cls, values): - for sv, cv in zip(values, cls.codes_values): - sv.value = cv[2] - - - def test_value(self, raw=False): - if raw: - values = [self.RawValue(code, value) - for code, value, _ in self.codes_values] - else: - values = [self.Value(code, value) - for code, value, _ in self.codes_values] - - for sv, cv in zip(values, self.codes_values): - self.assertEqual(sv.value, cv[1]) - - proc = self.Process(target=self._test, args=(values,)) - proc.daemon = True - proc.start() - proc.join() - - for sv, cv in zip(values, self.codes_values): - self.assertEqual(sv.value, cv[2]) - - def test_rawvalue(self): - self.test_value(raw=True) - - def test_getobj_getlock(self): - val1 = self.Value('i', 5) - lock1 = val1.get_lock() - obj1 = val1.get_obj() - - val2 = self.Value('i', 5, lock=None) - lock2 = val2.get_lock() - obj2 = val2.get_obj() - - lock = self.Lock() - val3 = self.Value('i', 5, lock=lock) - lock3 = val3.get_lock() - obj3 = val3.get_obj() - self.assertEqual(lock, lock3) - - arr4 = self.Value('i', 5, lock=False) - self.assertFalse(hasattr(arr4, 'get_lock')) - self.assertFalse(hasattr(arr4, 'get_obj')) - - self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue') - - arr5 = self.RawValue('i', 5) - self.assertFalse(hasattr(arr5, 'get_lock')) - self.assertFalse(hasattr(arr5, 'get_obj')) - - -class _TestArray(BaseTestCase): - - ALLOWED_TYPES = ('processes',) - - @classmethod - def f(cls, seq): - for i in range(1, len(seq)): - seq[i] += seq[i-1] - - @unittest.skipIf(c_int is None, "requires _ctypes") - def test_array(self, raw=False): - seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831] - if raw: - arr = self.RawArray('i', seq) - else: - arr = self.Array('i', seq) - - self.assertEqual(len(arr), len(seq)) - self.assertEqual(arr[3], seq[3]) - self.assertEqual(list(arr[2:7]), list(seq[2:7])) - - arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4]) - - self.assertEqual(list(arr[:]), seq) - - self.f(seq) - - p = self.Process(target=self.f, args=(arr,)) - p.daemon = True - p.start() - p.join() - - self.assertEqual(list(arr[:]), seq) - - @unittest.skipIf(c_int is None, "requires _ctypes") - def test_array_from_size(self): - size = 10 - # Test for zeroing (see issue #11675). - # The repetition below strengthens the test by increasing the chances - # of previously allocated non-zero memory being used for the new array - # on the 2nd and 3rd loops. - for _ in range(3): - arr = self.Array('i', size) - self.assertEqual(len(arr), size) - self.assertEqual(list(arr), [0] * size) - arr[:] = range(10) - self.assertEqual(list(arr), list(range(10))) - del arr - - @unittest.skipIf(c_int is None, "requires _ctypes") - def test_rawarray(self): - self.test_array(raw=True) - - @unittest.skipIf(c_int is None, "requires _ctypes") - def test_getobj_getlock_obj(self): - arr1 = self.Array('i', list(range(10))) - lock1 = arr1.get_lock() - obj1 = arr1.get_obj() - - arr2 = self.Array('i', list(range(10)), lock=None) - lock2 = arr2.get_lock() - obj2 = arr2.get_obj() - - lock = self.Lock() - arr3 = self.Array('i', list(range(10)), lock=lock) - lock3 = arr3.get_lock() - obj3 = arr3.get_obj() - self.assertEqual(lock, lock3) - - arr4 = self.Array('i', range(10), lock=False) - self.assertFalse(hasattr(arr4, 'get_lock')) - self.assertFalse(hasattr(arr4, 'get_obj')) - self.assertRaises(AttributeError, - self.Array, 'i', range(10), lock='notalock') - - arr5 = self.RawArray('i', range(10)) - self.assertFalse(hasattr(arr5, 'get_lock')) - self.assertFalse(hasattr(arr5, 'get_obj')) - -# -# -# - -class _TestContainers(BaseTestCase): - - ALLOWED_TYPES = ('manager',) - - def test_list(self): - a = self.list(list(range(10))) - self.assertEqual(a[:], list(range(10))) - - b = self.list() - self.assertEqual(b[:], []) - - b.extend(list(range(5))) - self.assertEqual(b[:], list(range(5))) - - self.assertEqual(b[2], 2) - self.assertEqual(b[2:10], [2,3,4]) - - b *= 2 - self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]) - - self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6]) - - self.assertEqual(a[:], list(range(10))) - - d = [a, b] - e = self.list(d) - self.assertEqual( - e[:], - [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]] - ) - - f = self.list([a]) - a.append('hello') - self.assertEqual(f[:], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello']]) - - def test_dict(self): - d = self.dict() - indices = list(range(65, 70)) - for i in indices: - d[i] = chr(i) - self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices)) - self.assertEqual(sorted(d.keys()), indices) - self.assertEqual(sorted(d.values()), [chr(i) for i in indices]) - self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices]) - - def test_namespace(self): - n = self.Namespace() - n.name = 'Bob' - n.job = 'Builder' - n._hidden = 'hidden' - self.assertEqual((n.name, n.job), ('Bob', 'Builder')) - del n.job - self.assertEqual(str(n), "Namespace(name='Bob')") - self.assertTrue(hasattr(n, 'name')) - self.assertTrue(not hasattr(n, 'job')) - -# -# -# - -def sqr(x, wait=0.0): - time.sleep(wait) - return x*x - -def mul(x, y): - return x*y - -class _TestPool(BaseTestCase): - - @classmethod - def setUpClass(cls): - super().setUpClass() - cls.pool = cls.Pool(4) - - @classmethod - def tearDownClass(cls): - cls.pool.terminate() - cls.pool.join() - cls.pool = None - super().tearDownClass() - - def test_apply(self): - papply = self.pool.apply - self.assertEqual(papply(sqr, (5,)), sqr(5)) - self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3)) - - def test_map(self): - pmap = self.pool.map - self.assertEqual(pmap(sqr, list(range(10))), list(map(sqr, list(range(10))))) - self.assertEqual(pmap(sqr, list(range(100)), chunksize=20), - list(map(sqr, list(range(100))))) - - def test_starmap(self): - psmap = self.pool.starmap - tuples = list(zip(range(10), range(9,-1, -1))) - self.assertEqual(psmap(mul, tuples), - list(itertools.starmap(mul, tuples))) - tuples = list(zip(range(100), range(99,-1, -1))) - self.assertEqual(psmap(mul, tuples, chunksize=20), - list(itertools.starmap(mul, tuples))) - - def test_starmap_async(self): - tuples = list(zip(range(100), range(99,-1, -1))) - self.assertEqual(self.pool.starmap_async(mul, tuples).get(), - list(itertools.starmap(mul, tuples))) - - def test_map_async(self): - self.assertEqual(self.pool.map_async(sqr, list(range(10))).get(), - list(map(sqr, list(range(10))))) - - def test_map_async_callbacks(self): - call_args = self.manager.list() if self.TYPE == 'manager' else [] - self.pool.map_async(int, ['1'], - callback=call_args.append, - error_callback=call_args.append).wait() - self.assertEqual(1, len(call_args)) - self.assertEqual([1], call_args[0]) - self.pool.map_async(int, ['a'], - callback=call_args.append, - error_callback=call_args.append).wait() - self.assertEqual(2, len(call_args)) - self.assertIsInstance(call_args[1], ValueError) - - def test_map_chunksize(self): - try: - self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1) - except multiprocessing.TimeoutError: - self.fail("pool.map_async with chunksize stalled on null list") - - def test_async(self): - res = self.pool.apply_async(sqr, (7, TIMEOUT1,)) - get = TimingWrapper(res.get) - self.assertEqual(get(), 49) - self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1) - - def test_async_timeout(self): - res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 0.2)) - get = TimingWrapper(res.get) - self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2) - self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2) - - def test_imap(self): - it = self.pool.imap(sqr, list(range(10))) - self.assertEqual(list(it), list(map(sqr, list(range(10))))) - - it = self.pool.imap(sqr, list(range(10))) - for i in range(10): - self.assertEqual(next(it), i*i) - self.assertRaises(StopIteration, it.__next__) - - it = self.pool.imap(sqr, list(range(1000)), chunksize=100) - for i in range(1000): - self.assertEqual(next(it), i*i) - self.assertRaises(StopIteration, it.__next__) - - def test_imap_unordered(self): - it = self.pool.imap_unordered(sqr, list(range(1000))) - self.assertEqual(sorted(it), list(map(sqr, list(range(1000))))) - - it = self.pool.imap_unordered(sqr, list(range(1000)), chunksize=53) - self.assertEqual(sorted(it), list(map(sqr, list(range(1000))))) - - def test_make_pool(self): - self.assertRaises(ValueError, multiprocessing.Pool, -1) - self.assertRaises(ValueError, multiprocessing.Pool, 0) - - p = multiprocessing.Pool(3) - self.assertEqual(3, len(p._pool)) - p.close() - p.join() - - def test_terminate(self): - result = self.pool.map_async( - time.sleep, [0.1 for i in range(10000)], chunksize=1 - ) - self.pool.terminate() - join = TimingWrapper(self.pool.join) - join() - self.assertLess(join.elapsed, 0.5) - - def test_empty_iterable(self): - # See Issue 12157 - p = self.Pool(1) - - self.assertEqual(p.map(sqr, []), []) - self.assertEqual(list(p.imap(sqr, [])), []) - self.assertEqual(list(p.imap_unordered(sqr, [])), []) - self.assertEqual(p.map_async(sqr, []).get(), []) - - p.close() - p.join() - - def test_context(self): - if self.TYPE == 'processes': - L = list(range(10)) - expected = [sqr(i) for i in L] - with multiprocessing.Pool(2) as p: - r = p.map_async(sqr, L) - self.assertEqual(r.get(), expected) - self.assertRaises(ValueError, p.map_async, sqr, L) - - @classmethod - def _test_traceback(cls): - raise RuntimeError(123) # some comment - - def test_traceback(self): - # We want ensure that the traceback from the child process is - # contained in the traceback raised in the main process. - if self.TYPE == 'processes': - with self.Pool(1) as p: - try: - p.apply(self._test_traceback) - except Exception as e: - exc = e - else: - raise AssertionError('expected RuntimeError') - self.assertIs(type(exc), RuntimeError) - self.assertEqual(exc.args, (123,)) - cause = exc.__cause__ - self.assertIs(type(cause), multiprocessing.pool.RemoteTraceback) - self.assertIn('raise RuntimeError(123) # some comment', cause.tb) - - with test.support.captured_stderr() as f1: - try: - raise exc - except RuntimeError: - sys.excepthook(*sys.exc_info()) - self.assertIn('raise RuntimeError(123) # some comment', - f1.getvalue()) - -def raising(): - raise KeyError("key") - -def unpickleable_result(): - return lambda: 42 - -class _TestPoolWorkerErrors(BaseTestCase): - ALLOWED_TYPES = ('processes', ) - - def test_async_error_callback(self): - p = multiprocessing.Pool(2) - - scratchpad = [None] - def errback(exc): - scratchpad[0] = exc - - res = p.apply_async(raising, error_callback=errback) - self.assertRaises(KeyError, res.get) - self.assertTrue(scratchpad[0]) - self.assertIsInstance(scratchpad[0], KeyError) - - p.close() - p.join() - - def test_unpickleable_result(self): - from multiprocessing.pool import MaybeEncodingError - p = multiprocessing.Pool(2) - - # Make sure we don't lose pool processes because of encoding errors. - for iteration in range(20): - - scratchpad = [None] - def errback(exc): - scratchpad[0] = exc - - res = p.apply_async(unpickleable_result, error_callback=errback) - self.assertRaises(MaybeEncodingError, res.get) - wrapped = scratchpad[0] - self.assertTrue(wrapped) - self.assertIsInstance(scratchpad[0], MaybeEncodingError) - self.assertIsNotNone(wrapped.exc) - self.assertIsNotNone(wrapped.value) - - p.close() - p.join() - -class _TestPoolWorkerLifetime(BaseTestCase): - ALLOWED_TYPES = ('processes', ) - - def test_pool_worker_lifetime(self): - p = multiprocessing.Pool(3, maxtasksperchild=10) - self.assertEqual(3, len(p._pool)) - origworkerpids = [w.pid for w in p._pool] - # Run many tasks so each worker gets replaced (hopefully) - results = [] - for i in range(100): - results.append(p.apply_async(sqr, (i, ))) - # Fetch the results and verify we got the right answers, - # also ensuring all the tasks have completed. - for (j, res) in enumerate(results): - self.assertEqual(res.get(), sqr(j)) - # Refill the pool - p._repopulate_pool() - # Wait until all workers are alive - # (countdown * DELTA = 5 seconds max startup process time) - countdown = 50 - while countdown and not all(w.is_alive() for w in p._pool): - countdown -= 1 - time.sleep(DELTA) - finalworkerpids = [w.pid for w in p._pool] - # All pids should be assigned. See issue #7805. - self.assertNotIn(None, origworkerpids) - self.assertNotIn(None, finalworkerpids) - # Finally, check that the worker pids have changed - self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids)) - p.close() - p.join() - - def test_pool_worker_lifetime_early_close(self): - # Issue #10332: closing a pool whose workers have limited lifetimes - # before all the tasks completed would make join() hang. - p = multiprocessing.Pool(3, maxtasksperchild=1) - results = [] - for i in range(6): - results.append(p.apply_async(sqr, (i, 0.3))) - p.close() - p.join() - # check the results - for (j, res) in enumerate(results): - self.assertEqual(res.get(), sqr(j)) - -# -# Test of creating a customized manager class -# - -from multiprocessing.managers import BaseManager, BaseProxy, RemoteError - -class FooBar(object): - def f(self): - return 'f()' - def g(self): - raise ValueError - def _h(self): - return '_h()' - -def baz(): - for i in range(10): - yield i*i - -class IteratorProxy(BaseProxy): - _exposed_ = ('__next__',) - def __iter__(self): - return self - def __next__(self): - return self._callmethod('__next__') - -class MyManager(BaseManager): - pass - -MyManager.register('Foo', callable=FooBar) -MyManager.register('Bar', callable=FooBar, exposed=('f', '_h')) -MyManager.register('baz', callable=baz, proxytype=IteratorProxy) - - -class _TestMyManager(BaseTestCase): - - ALLOWED_TYPES = ('manager',) - - def test_mymanager(self): - manager = MyManager() - manager.start() - self.common(manager) - manager.shutdown() - - # If the manager process exited cleanly then the exitcode - # will be zero. Otherwise (after a short timeout) - # terminate() is used, resulting in an exitcode of -SIGTERM. - self.assertEqual(manager._process.exitcode, 0) - - def test_mymanager_context(self): - with MyManager() as manager: - self.common(manager) - self.assertEqual(manager._process.exitcode, 0) - - def test_mymanager_context_prestarted(self): - manager = MyManager() - manager.start() - with manager: - self.common(manager) - self.assertEqual(manager._process.exitcode, 0) - - def common(self, manager): - foo = manager.Foo() - bar = manager.Bar() - baz = manager.baz() - - foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)] - bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)] - - self.assertEqual(foo_methods, ['f', 'g']) - self.assertEqual(bar_methods, ['f', '_h']) - - self.assertEqual(foo.f(), 'f()') - self.assertRaises(ValueError, foo.g) - self.assertEqual(foo._callmethod('f'), 'f()') - self.assertRaises(RemoteError, foo._callmethod, '_h') - - self.assertEqual(bar.f(), 'f()') - self.assertEqual(bar._h(), '_h()') - self.assertEqual(bar._callmethod('f'), 'f()') - self.assertEqual(bar._callmethod('_h'), '_h()') - - self.assertEqual(list(baz), [i*i for i in range(10)]) - - -# -# Test of connecting to a remote server and using xmlrpclib for serialization -# - -_queue = pyqueue.Queue() -def get_queue(): - return _queue - -class QueueManager(BaseManager): - '''manager class used by server process''' -QueueManager.register('get_queue', callable=get_queue) - -class QueueManager2(BaseManager): - '''manager class which specifies the same interface as QueueManager''' -QueueManager2.register('get_queue') - - -SERIALIZER = 'xmlrpclib' - -class _TestRemoteManager(BaseTestCase): - - ALLOWED_TYPES = ('manager',) - - @classmethod - def _putter(cls, address, authkey): - manager = QueueManager2( - address=address, authkey=authkey, serializer=SERIALIZER - ) - manager.connect() - queue = manager.get_queue() - queue.put(('hello world', None, True, 2.25)) - - def test_remote(self): - authkey = os.urandom(32) - - manager = QueueManager( - address=('localhost', 0), authkey=authkey, serializer=SERIALIZER - ) - manager.start() - - p = self.Process(target=self._putter, args=(manager.address, authkey)) - p.daemon = True - p.start() - - manager2 = QueueManager2( - address=manager.address, authkey=authkey, serializer=SERIALIZER - ) - manager2.connect() - queue = manager2.get_queue() - - # Note that xmlrpclib will deserialize object as a list not a tuple - self.assertEqual(queue.get(), ['hello world', None, True, 2.25]) - - # Because we are using xmlrpclib for serialization instead of - # pickle this will cause a serialization error. - self.assertRaises(Exception, queue.put, time.sleep) - - # Make queue finalizer run before the server is stopped - del queue - manager.shutdown() - -class _TestManagerRestart(BaseTestCase): - - @classmethod - def _putter(cls, address, authkey): - manager = QueueManager( - address=address, authkey=authkey, serializer=SERIALIZER) - manager.connect() - queue = manager.get_queue() - queue.put('hello world') - - def test_rapid_restart(self): - authkey = os.urandom(32) - manager = QueueManager( - address=('localhost', 0), authkey=authkey, serializer=SERIALIZER) - srvr = manager.get_server() - addr = srvr.address - # Close the connection.Listener socket which gets opened as a part - # of manager.get_server(). It's not needed for the test. - srvr.listener.close() - manager.start() - - p = self.Process(target=self._putter, args=(manager.address, authkey)) - p.daemon = True - p.start() - queue = manager.get_queue() - self.assertEqual(queue.get(), 'hello world') - del queue - manager.shutdown() - manager = QueueManager( - address=addr, authkey=authkey, serializer=SERIALIZER) - try: - manager.start() - except OSError as e: - if e.errno != errno.EADDRINUSE: - raise - # Retry after some time, in case the old socket was lingering - # (sporadic failure on buildbots) - time.sleep(1.0) - manager = QueueManager( - address=addr, authkey=authkey, serializer=SERIALIZER) - manager.shutdown() - -# -# -# - -SENTINEL = latin('') - -class _TestConnection(BaseTestCase): - - ALLOWED_TYPES = ('processes', 'threads') - - @classmethod - def _echo(cls, conn): - for msg in iter(conn.recv_bytes, SENTINEL): - conn.send_bytes(msg) - conn.close() - - def test_connection(self): - conn, child_conn = self.Pipe() - - p = self.Process(target=self._echo, args=(child_conn,)) - p.daemon = True - p.start() - - seq = [1, 2.25, None] - msg = latin('hello world') - longmsg = msg * 10 - arr = array.array('i', list(range(4))) - - if self.TYPE == 'processes': - self.assertEqual(type(conn.fileno()), int) - - self.assertEqual(conn.send(seq), None) - self.assertEqual(conn.recv(), seq) - - self.assertEqual(conn.send_bytes(msg), None) - self.assertEqual(conn.recv_bytes(), msg) - - if self.TYPE == 'processes': - buffer = array.array('i', [0]*10) - expected = list(arr) + [0] * (10 - len(arr)) - self.assertEqual(conn.send_bytes(arr), None) - self.assertEqual(conn.recv_bytes_into(buffer), - len(arr) * buffer.itemsize) - self.assertEqual(list(buffer), expected) - - buffer = array.array('i', [0]*10) - expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr)) - self.assertEqual(conn.send_bytes(arr), None) - self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize), - len(arr) * buffer.itemsize) - self.assertEqual(list(buffer), expected) - - buffer = bytearray(latin(' ' * 40)) - self.assertEqual(conn.send_bytes(longmsg), None) - try: - res = conn.recv_bytes_into(buffer) - except multiprocessing.BufferTooShort as e: - self.assertEqual(e.args, (longmsg,)) - else: - self.fail('expected BufferTooShort, got %s' % res) - - poll = TimingWrapper(conn.poll) - - self.assertEqual(poll(), False) - self.assertTimingAlmostEqual(poll.elapsed, 0) - - self.assertEqual(poll(-1), False) - self.assertTimingAlmostEqual(poll.elapsed, 0) - - self.assertEqual(poll(TIMEOUT1), False) - self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1) - - conn.send(None) - time.sleep(.1) - - self.assertEqual(poll(TIMEOUT1), True) - self.assertTimingAlmostEqual(poll.elapsed, 0) - - self.assertEqual(conn.recv(), None) - - really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb - conn.send_bytes(really_big_msg) - self.assertEqual(conn.recv_bytes(), really_big_msg) - - conn.send_bytes(SENTINEL) # tell child to quit - child_conn.close() - - if self.TYPE == 'processes': - self.assertEqual(conn.readable, True) - self.assertEqual(conn.writable, True) - self.assertRaises(EOFError, conn.recv) - self.assertRaises(EOFError, conn.recv_bytes) - - p.join() - - def test_duplex_false(self): - reader, writer = self.Pipe(duplex=False) - self.assertEqual(writer.send(1), None) - self.assertEqual(reader.recv(), 1) - if self.TYPE == 'processes': - self.assertEqual(reader.readable, True) - self.assertEqual(reader.writable, False) - self.assertEqual(writer.readable, False) - self.assertEqual(writer.writable, True) - self.assertRaises(OSError, reader.send, 2) - self.assertRaises(OSError, writer.recv) - self.assertRaises(OSError, writer.poll) - - def test_spawn_close(self): - # We test that a pipe connection can be closed by parent - # process immediately after child is spawned. On Windows this - # would have sometimes failed on old versions because - # child_conn would be closed before the child got a chance to - # duplicate it. - conn, child_conn = self.Pipe() - - p = self.Process(target=self._echo, args=(child_conn,)) - p.daemon = True - p.start() - child_conn.close() # this might complete before child initializes - - msg = latin('hello') - conn.send_bytes(msg) - self.assertEqual(conn.recv_bytes(), msg) - - conn.send_bytes(SENTINEL) - conn.close() - p.join() - - def test_sendbytes(self): - if self.TYPE != 'processes': - return - - msg = latin('abcdefghijklmnopqrstuvwxyz') - a, b = self.Pipe() - - a.send_bytes(msg) - self.assertEqual(b.recv_bytes(), msg) - - a.send_bytes(msg, 5) - self.assertEqual(b.recv_bytes(), msg[5:]) - - a.send_bytes(msg, 7, 8) - self.assertEqual(b.recv_bytes(), msg[7:7+8]) - - a.send_bytes(msg, 26) - self.assertEqual(b.recv_bytes(), latin('')) - - a.send_bytes(msg, 26, 0) - self.assertEqual(b.recv_bytes(), latin('')) - - self.assertRaises(ValueError, a.send_bytes, msg, 27) - - self.assertRaises(ValueError, a.send_bytes, msg, 22, 5) - - self.assertRaises(ValueError, a.send_bytes, msg, 26, 1) - - self.assertRaises(ValueError, a.send_bytes, msg, -1) - - self.assertRaises(ValueError, a.send_bytes, msg, 4, -1) - - @classmethod - def _is_fd_assigned(cls, fd): - try: - os.fstat(fd) - except OSError as e: - if e.errno == errno.EBADF: - return False - raise - else: - return True - - @classmethod - def _writefd(cls, conn, data, create_dummy_fds=False): - if create_dummy_fds: - for i in range(0, 256): - if not cls._is_fd_assigned(i): - os.dup2(conn.fileno(), i) - fd = reduction.recv_handle(conn) - if msvcrt: - fd = msvcrt.open_osfhandle(fd, os.O_WRONLY) - os.write(fd, data) - os.close(fd) - - @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") - def test_fd_transfer(self): - if self.TYPE != 'processes': - self.skipTest("only makes sense with processes") - conn, child_conn = self.Pipe(duplex=True) - - p = self.Process(target=self._writefd, args=(child_conn, b"foo")) - p.daemon = True - p.start() - self.addCleanup(test.support.unlink, test.support.TESTFN) - with open(test.support.TESTFN, "wb") as f: - fd = f.fileno() - if msvcrt: - fd = msvcrt.get_osfhandle(fd) - reduction.send_handle(conn, fd, p.pid) - p.join() - with open(test.support.TESTFN, "rb") as f: - self.assertEqual(f.read(), b"foo") - - @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") - @unittest.skipIf(sys.platform == "win32", - "test semantics don't make sense on Windows") - @unittest.skipIf(MAXFD <= 256, - "largest assignable fd number is too small") - @unittest.skipUnless(hasattr(os, "dup2"), - "test needs os.dup2()") - def test_large_fd_transfer(self): - # With fd > 256 (issue #11657) - if self.TYPE != 'processes': - self.skipTest("only makes sense with processes") - conn, child_conn = self.Pipe(duplex=True) - - p = self.Process(target=self._writefd, args=(child_conn, b"bar", True)) - p.daemon = True - p.start() - self.addCleanup(test.support.unlink, test.support.TESTFN) - with open(test.support.TESTFN, "wb") as f: - fd = f.fileno() - for newfd in range(256, MAXFD): - if not self._is_fd_assigned(newfd): - break - else: - self.fail("could not find an unassigned large file descriptor") - os.dup2(fd, newfd) - try: - reduction.send_handle(conn, newfd, p.pid) - finally: - os.close(newfd) - p.join() - with open(test.support.TESTFN, "rb") as f: - self.assertEqual(f.read(), b"bar") - - @classmethod - def _send_data_without_fd(self, conn): - os.write(conn.fileno(), b"\0") - - @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") - @unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows") - def test_missing_fd_transfer(self): - # Check that exception is raised when received data is not - # accompanied by a file descriptor in ancillary data. - if self.TYPE != 'processes': - self.skipTest("only makes sense with processes") - conn, child_conn = self.Pipe(duplex=True) - - p = self.Process(target=self._send_data_without_fd, args=(child_conn,)) - p.daemon = True - p.start() - self.assertRaises(RuntimeError, reduction.recv_handle, conn) - p.join() - - def test_context(self): - a, b = self.Pipe() - - with a, b: - a.send(1729) - self.assertEqual(b.recv(), 1729) - if self.TYPE == 'processes': - self.assertFalse(a.closed) - self.assertFalse(b.closed) - - if self.TYPE == 'processes': - self.assertTrue(a.closed) - self.assertTrue(b.closed) - self.assertRaises(OSError, a.recv) - self.assertRaises(OSError, b.recv) - -class _TestListener(BaseTestCase): - - ALLOWED_TYPES = ('processes',) - - def test_multiple_bind(self): - for family in self.connection.families: - l = self.connection.Listener(family=family) - self.addCleanup(l.close) - self.assertRaises(OSError, self.connection.Listener, - l.address, family) - - def test_context(self): - with self.connection.Listener() as l: - with self.connection.Client(l.address) as c: - with l.accept() as d: - c.send(1729) - self.assertEqual(d.recv(), 1729) - - if self.TYPE == 'processes': - self.assertRaises(OSError, l.accept) - -class _TestListenerClient(BaseTestCase): - - ALLOWED_TYPES = ('processes', 'threads') - - @classmethod - def _test(cls, address): - conn = cls.connection.Client(address) - conn.send('hello') - conn.close() - - def test_listener_client(self): - for family in self.connection.families: - l = self.connection.Listener(family=family) - p = self.Process(target=self._test, args=(l.address,)) - p.daemon = True - p.start() - conn = l.accept() - self.assertEqual(conn.recv(), 'hello') - p.join() - l.close() - - def test_issue14725(self): - l = self.connection.Listener() - p = self.Process(target=self._test, args=(l.address,)) - p.daemon = True - p.start() - time.sleep(1) - # On Windows the client process should by now have connected, - # written data and closed the pipe handle by now. This causes - # ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue - # 14725. - conn = l.accept() - self.assertEqual(conn.recv(), 'hello') - conn.close() - p.join() - l.close() - - def test_issue16955(self): - for fam in self.connection.families: - l = self.connection.Listener(family=fam) - c = self.connection.Client(l.address) - a = l.accept() - a.send_bytes(b"hello") - self.assertTrue(c.poll(1)) - a.close() - c.close() - l.close() - -class _TestPoll(BaseTestCase): - - ALLOWED_TYPES = ('processes', 'threads') - - def test_empty_string(self): - a, b = self.Pipe() - self.assertEqual(a.poll(), False) - b.send_bytes(b'') - self.assertEqual(a.poll(), True) - self.assertEqual(a.poll(), True) - - @classmethod - def _child_strings(cls, conn, strings): - for s in strings: - time.sleep(0.1) - conn.send_bytes(s) - conn.close() - - def test_strings(self): - strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop') - a, b = self.Pipe() - p = self.Process(target=self._child_strings, args=(b, strings)) - p.start() - - for s in strings: - for i in range(200): - if a.poll(0.01): - break - x = a.recv_bytes() - self.assertEqual(s, x) - - p.join() - - @classmethod - def _child_boundaries(cls, r): - # Polling may "pull" a message in to the child process, but we - # don't want it to pull only part of a message, as that would - # corrupt the pipe for any other processes which might later - # read from it. - r.poll(5) - - def test_boundaries(self): - r, w = self.Pipe(False) - p = self.Process(target=self._child_boundaries, args=(r,)) - p.start() - time.sleep(2) - L = [b"first", b"second"] - for obj in L: - w.send_bytes(obj) - w.close() - p.join() - self.assertIn(r.recv_bytes(), L) - - @classmethod - def _child_dont_merge(cls, b): - b.send_bytes(b'a') - b.send_bytes(b'b') - b.send_bytes(b'cd') - - def test_dont_merge(self): - a, b = self.Pipe() - self.assertEqual(a.poll(0.0), False) - self.assertEqual(a.poll(0.1), False) - - p = self.Process(target=self._child_dont_merge, args=(b,)) - p.start() - - self.assertEqual(a.recv_bytes(), b'a') - self.assertEqual(a.poll(1.0), True) - self.assertEqual(a.poll(1.0), True) - self.assertEqual(a.recv_bytes(), b'b') - self.assertEqual(a.poll(1.0), True) - self.assertEqual(a.poll(1.0), True) - self.assertEqual(a.poll(0.0), True) - self.assertEqual(a.recv_bytes(), b'cd') - - p.join() - -# -# Test of sending connection and socket objects between processes -# - -@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction") -class _TestPicklingConnections(BaseTestCase): - - ALLOWED_TYPES = ('processes',) - - @classmethod - def tearDownClass(cls): - from multiprocessing.reduction import resource_sharer - resource_sharer.stop(timeout=5) - - @classmethod - def _listener(cls, conn, families): - for fam in families: - l = cls.connection.Listener(family=fam) - conn.send(l.address) - new_conn = l.accept() - conn.send(new_conn) - new_conn.close() - l.close() - - l = socket.socket() - l.bind(('localhost', 0)) - l.listen(1) - conn.send(l.getsockname()) - new_conn, addr = l.accept() - conn.send(new_conn) - new_conn.close() - l.close() - - conn.recv() - - @classmethod - def _remote(cls, conn): - for (address, msg) in iter(conn.recv, None): - client = cls.connection.Client(address) - client.send(msg.upper()) - client.close() - - address, msg = conn.recv() - client = socket.socket() - client.connect(address) - client.sendall(msg.upper()) - client.close() - - conn.close() - - def test_pickling(self): - families = self.connection.families - - lconn, lconn0 = self.Pipe() - lp = self.Process(target=self._listener, args=(lconn0, families)) - lp.daemon = True - lp.start() - lconn0.close() - - rconn, rconn0 = self.Pipe() - rp = self.Process(target=self._remote, args=(rconn0,)) - rp.daemon = True - rp.start() - rconn0.close() - - for fam in families: - msg = ('This connection uses family %s' % fam).encode('ascii') - address = lconn.recv() - rconn.send((address, msg)) - new_conn = lconn.recv() - self.assertEqual(new_conn.recv(), msg.upper()) - - rconn.send(None) - - msg = latin('This connection uses a normal socket') - address = lconn.recv() - rconn.send((address, msg)) - new_conn = lconn.recv() - buf = [] - while True: - s = new_conn.recv(100) - if not s: - break - buf.append(s) - buf = b''.join(buf) - self.assertEqual(buf, msg.upper()) - new_conn.close() - - lconn.send(None) - - rconn.close() - lconn.close() - - lp.join() - rp.join() - - @classmethod - def child_access(cls, conn): - w = conn.recv() - w.send('all is well') - w.close() - - r = conn.recv() - msg = r.recv() - conn.send(msg*2) - - conn.close() - - def test_access(self): - # On Windows, if we do not specify a destination pid when - # using DupHandle then we need to be careful to use the - # correct access flags for DuplicateHandle(), or else - # DupHandle.detach() will raise PermissionError. For example, - # for a read only pipe handle we should use - # access=FILE_GENERIC_READ. (Unfortunately - # DUPLICATE_SAME_ACCESS does not work.) - conn, child_conn = self.Pipe() - p = self.Process(target=self.child_access, args=(child_conn,)) - p.daemon = True - p.start() - child_conn.close() - - r, w = self.Pipe(duplex=False) - conn.send(w) - w.close() - self.assertEqual(r.recv(), 'all is well') - r.close() - - r, w = self.Pipe(duplex=False) - conn.send(r) - r.close() - w.send('foobar') - w.close() - self.assertEqual(conn.recv(), 'foobar'*2) - -# -# -# - -class _TestHeap(BaseTestCase): - - ALLOWED_TYPES = ('processes',) - - def test_heap(self): - iterations = 5000 - maxblocks = 50 - blocks = [] - - # create and destroy lots of blocks of different sizes - for i in range(iterations): - size = int(random.lognormvariate(0, 1) * 1000) - b = multiprocessing.heap.BufferWrapper(size) - blocks.append(b) - if len(blocks) > maxblocks: - i = random.randrange(maxblocks) - del blocks[i] - - # get the heap object - heap = multiprocessing.heap.BufferWrapper._heap - - # verify the state of the heap - all = [] - occupied = 0 - heap._lock.acquire() - self.addCleanup(heap._lock.release) - for L in list(heap._len_to_seq.values()): - for arena, start, stop in L: - all.append((heap._arenas.index(arena), start, stop, - stop-start, 'free')) - for arena, start, stop in heap._allocated_blocks: - all.append((heap._arenas.index(arena), start, stop, - stop-start, 'occupied')) - occupied += (stop-start) - - all.sort() - - for i in range(len(all)-1): - (arena, start, stop) = all[i][:3] - (narena, nstart, nstop) = all[i+1][:3] - self.assertTrue((arena != narena and nstart == 0) or - (stop == nstart)) - - def test_free_from_gc(self): - # Check that freeing of blocks by the garbage collector doesn't deadlock - # (issue #12352). - # Make sure the GC is enabled, and set lower collection thresholds to - # make collections more frequent (and increase the probability of - # deadlock). - if not gc.isenabled(): - gc.enable() - self.addCleanup(gc.disable) - thresholds = gc.get_threshold() - self.addCleanup(gc.set_threshold, *thresholds) - gc.set_threshold(10) - - # perform numerous block allocations, with cyclic references to make - # sure objects are collected asynchronously by the gc - for i in range(5000): - a = multiprocessing.heap.BufferWrapper(1) - b = multiprocessing.heap.BufferWrapper(1) - # circular references - a.buddy = b - b.buddy = a - -# -# -# - -class _Foo(Structure): - _fields_ = [ - ('x', c_int), - ('y', c_double) - ] - -class _TestSharedCTypes(BaseTestCase): - - ALLOWED_TYPES = ('processes',) - - def setUp(self): - if not HAS_SHAREDCTYPES: - self.skipTest("requires multiprocessing.sharedctypes") - - @classmethod - def _double(cls, x, y, foo, arr, string): - x.value *= 2 - y.value *= 2 - foo.x *= 2 - foo.y *= 2 - string.value *= 2 - for i in range(len(arr)): - arr[i] *= 2 - - def test_sharedctypes(self, lock=False): - x = Value('i', 7, lock=lock) - y = Value(c_double, 1.0/3.0, lock=lock) - foo = Value(_Foo, 3, 2, lock=lock) - arr = self.Array('d', list(range(10)), lock=lock) - string = self.Array('c', 20, lock=lock) - string.value = latin('hello') - - p = self.Process(target=self._double, args=(x, y, foo, arr, string)) - p.daemon = True - p.start() - p.join() - - self.assertEqual(x.value, 14) - self.assertAlmostEqual(y.value, 2.0/3.0) - self.assertEqual(foo.x, 6) - self.assertAlmostEqual(foo.y, 4.0) - for i in range(10): - self.assertAlmostEqual(arr[i], i*2) - self.assertEqual(string.value, latin('hellohello')) - - def test_synchronize(self): - self.test_sharedctypes(lock=True) - - def test_copy(self): - foo = _Foo(2, 5.0) - bar = copy(foo) - foo.x = 0 - foo.y = 0 - self.assertEqual(bar.x, 2) - self.assertAlmostEqual(bar.y, 5.0) - -# -# -# - -class _TestFinalize(BaseTestCase): - - ALLOWED_TYPES = ('processes',) - - @classmethod - def _test_finalize(cls, conn): - class Foo(object): - pass - - a = Foo() - util.Finalize(a, conn.send, args=('a',)) - del a # triggers callback for a - - b = Foo() - close_b = util.Finalize(b, conn.send, args=('b',)) - close_b() # triggers callback for b - close_b() # does nothing because callback has already been called - del b # does nothing because callback has already been called - - c = Foo() - util.Finalize(c, conn.send, args=('c',)) - - d10 = Foo() - util.Finalize(d10, conn.send, args=('d10',), exitpriority=1) - - d01 = Foo() - util.Finalize(d01, conn.send, args=('d01',), exitpriority=0) - d02 = Foo() - util.Finalize(d02, conn.send, args=('d02',), exitpriority=0) - d03 = Foo() - util.Finalize(d03, conn.send, args=('d03',), exitpriority=0) - - util.Finalize(None, conn.send, args=('e',), exitpriority=-10) - - util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100) - - # call multiprocessing's cleanup function then exit process without - # garbage collecting locals - util._exit_function() - conn.close() - os._exit(0) - - def test_finalize(self): - conn, child_conn = self.Pipe() - - p = self.Process(target=self._test_finalize, args=(child_conn,)) - p.daemon = True - p.start() - p.join() - - result = [obj for obj in iter(conn.recv, 'STOP')] - self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e']) - -# -# Test that from ... import * works for each module -# - -class _TestImportStar(BaseTestCase): - - ALLOWED_TYPES = ('processes',) - - def test_import(self): - modules = [ - 'multiprocessing', 'multiprocessing.connection', - 'multiprocessing.heap', 'multiprocessing.managers', - 'multiprocessing.pool', 'multiprocessing.process', - 'multiprocessing.synchronize', 'multiprocessing.util' - ] - - if HAS_REDUCTION: - modules.append('multiprocessing.reduction') - - if c_int is not None: - # This module requires _ctypes - modules.append('multiprocessing.sharedctypes') - - for name in modules: - __import__(name) - mod = sys.modules[name] - - for attr in getattr(mod, '__all__', ()): - self.assertTrue( - hasattr(mod, attr), - '%r does not have attribute %r' % (mod, attr) - ) - -# -# Quick test that logging works -- does not test logging output -# - -class _TestLogging(BaseTestCase): - - ALLOWED_TYPES = ('processes',) - - def test_enable_logging(self): - logger = multiprocessing.get_logger() - logger.setLevel(util.SUBWARNING) - self.assertTrue(logger is not None) - logger.debug('this will not be printed') - logger.info('nor will this') - logger.setLevel(LOG_LEVEL) - - @classmethod - def _test_level(cls, conn): - logger = multiprocessing.get_logger() - conn.send(logger.getEffectiveLevel()) - - def test_level(self): - LEVEL1 = 32 - LEVEL2 = 37 - - logger = multiprocessing.get_logger() - root_logger = logging.getLogger() - root_level = root_logger.level - - reader, writer = multiprocessing.Pipe(duplex=False) - - logger.setLevel(LEVEL1) - p = self.Process(target=self._test_level, args=(writer,)) - p.daemon = True - p.start() - self.assertEqual(LEVEL1, reader.recv()) - - logger.setLevel(logging.NOTSET) - root_logger.setLevel(LEVEL2) - p = self.Process(target=self._test_level, args=(writer,)) - p.daemon = True - p.start() - self.assertEqual(LEVEL2, reader.recv()) - - root_logger.setLevel(root_level) - logger.setLevel(level=LOG_LEVEL) - - -# class _TestLoggingProcessName(BaseTestCase): -# -# def handle(self, record): -# assert record.processName == multiprocessing.current_process().name -# self.__handled = True -# -# def test_logging(self): -# handler = logging.Handler() -# handler.handle = self.handle -# self.__handled = False -# # Bypass getLogger() and side-effects -# logger = logging.getLoggerClass()( -# 'multiprocessing.test.TestLoggingProcessName') -# logger.addHandler(handler) -# logger.propagate = False -# -# logger.warn('foo') -# assert self.__handled - -# -# Check that Process.join() retries if os.waitpid() fails with EINTR -# - -class _TestPollEintr(BaseTestCase): - - ALLOWED_TYPES = ('processes',) - - @classmethod - def _killer(cls, pid): - time.sleep(0.5) - os.kill(pid, signal.SIGUSR1) - - @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1') - def test_poll_eintr(self): - got_signal = [False] - def record(*args): - got_signal[0] = True - pid = os.getpid() - oldhandler = signal.signal(signal.SIGUSR1, record) - try: - killer = self.Process(target=self._killer, args=(pid,)) - killer.start() - p = self.Process(target=time.sleep, args=(1,)) - p.start() - p.join() - self.assertTrue(got_signal[0]) - self.assertEqual(p.exitcode, 0) - killer.join() - finally: - signal.signal(signal.SIGUSR1, oldhandler) - -# -# Test to verify handle verification, see issue 3321 -# - -class TestInvalidHandle(unittest.TestCase): - - @unittest.skipIf(WIN32, "skipped on Windows") - def test_invalid_handles(self): - conn = multiprocessing.connection.Connection(44977608) - try: - self.assertRaises((ValueError, OSError), conn.poll) - finally: - # Hack private attribute _handle to avoid printing an error - # in conn.__del__ - conn._handle = None - self.assertRaises((ValueError, OSError), - multiprocessing.connection.Connection, -1) - -# -# Functions used to create test cases from the base ones in this module -# - -def create_test_cases(Mixin, type): - result = {} - glob = globals() - Type = type.capitalize() - ALL_TYPES = {'processes', 'threads', 'manager'} - - for name in list(glob.keys()): - if name.startswith('_Test'): - base = glob[name] - assert set(base.ALLOWED_TYPES) <= ALL_TYPES, set(base.ALLOWED_TYPES) - if type in base.ALLOWED_TYPES: - newname = 'With' + Type + name[1:] - class Temp(base, Mixin, unittest.TestCase): - pass - result[newname] = Temp - Temp.__name__ = Temp.__qualname__ = newname - Temp.__module__ = Mixin.__module__ - return result - -# -# Create test cases -# - -class ProcessesMixin(object): - TYPE = 'processes' - Process = multiprocessing.Process - connection = multiprocessing.connection - current_process = staticmethod(multiprocessing.current_process) - active_children = staticmethod(multiprocessing.active_children) - Pool = staticmethod(multiprocessing.Pool) - Pipe = staticmethod(multiprocessing.Pipe) - Queue = staticmethod(multiprocessing.Queue) - JoinableQueue = staticmethod(multiprocessing.JoinableQueue) - Lock = staticmethod(multiprocessing.Lock) - RLock = staticmethod(multiprocessing.RLock) - Semaphore = staticmethod(multiprocessing.Semaphore) - BoundedSemaphore = staticmethod(multiprocessing.BoundedSemaphore) - Condition = staticmethod(multiprocessing.Condition) - Event = staticmethod(multiprocessing.Event) - Barrier = staticmethod(multiprocessing.Barrier) - Value = staticmethod(multiprocessing.Value) - Array = staticmethod(multiprocessing.Array) - RawValue = staticmethod(multiprocessing.RawValue) - RawArray = staticmethod(multiprocessing.RawArray) - -testcases_processes = create_test_cases(ProcessesMixin, type='processes') -globals().update(testcases_processes) - - -class ManagerMixin(object): - TYPE = 'manager' - Process = multiprocessing.Process - Queue = property(operator.attrgetter('manager.Queue')) - JoinableQueue = property(operator.attrgetter('manager.JoinableQueue')) - Lock = property(operator.attrgetter('manager.Lock')) - RLock = property(operator.attrgetter('manager.RLock')) - Semaphore = property(operator.attrgetter('manager.Semaphore')) - BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore')) - Condition = property(operator.attrgetter('manager.Condition')) - Event = property(operator.attrgetter('manager.Event')) - Barrier = property(operator.attrgetter('manager.Barrier')) - Value = property(operator.attrgetter('manager.Value')) - Array = property(operator.attrgetter('manager.Array')) - list = property(operator.attrgetter('manager.list')) - dict = property(operator.attrgetter('manager.dict')) - Namespace = property(operator.attrgetter('manager.Namespace')) - - @classmethod - def Pool(cls, *args, **kwds): - return cls.manager.Pool(*args, **kwds) - - @classmethod - def setUpClass(cls): - cls.manager = multiprocessing.Manager() - - @classmethod - def tearDownClass(cls): - # only the manager process should be returned by active_children() - # but this can take a bit on slow machines, so wait a few seconds - # if there are other children too (see #17395) - t = 0.01 - while len(multiprocessing.active_children()) > 1 and t < 5: - time.sleep(t) - t *= 2 - gc.collect() # do garbage collection - if cls.manager._number_of_objects() != 0: - # This is not really an error since some tests do not - # ensure that all processes which hold a reference to a - # managed object have been joined. - print('Shared objects which still exist at manager shutdown:') - print(cls.manager._debug_info()) - cls.manager.shutdown() - cls.manager.join() - cls.manager = None - -testcases_manager = create_test_cases(ManagerMixin, type='manager') -globals().update(testcases_manager) - - -class ThreadsMixin(object): - TYPE = 'threads' - Process = multiprocessing.dummy.Process - connection = multiprocessing.dummy.connection - current_process = staticmethod(multiprocessing.dummy.current_process) - active_children = staticmethod(multiprocessing.dummy.active_children) - Pool = staticmethod(multiprocessing.Pool) - Pipe = staticmethod(multiprocessing.dummy.Pipe) - Queue = staticmethod(multiprocessing.dummy.Queue) - JoinableQueue = staticmethod(multiprocessing.dummy.JoinableQueue) - Lock = staticmethod(multiprocessing.dummy.Lock) - RLock = staticmethod(multiprocessing.dummy.RLock) - Semaphore = staticmethod(multiprocessing.dummy.Semaphore) - BoundedSemaphore = staticmethod(multiprocessing.dummy.BoundedSemaphore) - Condition = staticmethod(multiprocessing.dummy.Condition) - Event = staticmethod(multiprocessing.dummy.Event) - Barrier = staticmethod(multiprocessing.dummy.Barrier) - Value = staticmethod(multiprocessing.dummy.Value) - Array = staticmethod(multiprocessing.dummy.Array) - -testcases_threads = create_test_cases(ThreadsMixin, type='threads') -globals().update(testcases_threads) - - -class OtherTest(unittest.TestCase): - # TODO: add more tests for deliver/answer challenge. - def test_deliver_challenge_auth_failure(self): - class _FakeConnection(object): - def recv_bytes(self, size): - return b'something bogus' - def send_bytes(self, data): - pass - self.assertRaises(multiprocessing.AuthenticationError, - multiprocessing.connection.deliver_challenge, - _FakeConnection(), b'abc') - - def test_answer_challenge_auth_failure(self): - class _FakeConnection(object): - def __init__(self): - self.count = 0 - def recv_bytes(self, size): - self.count += 1 - if self.count == 1: - return multiprocessing.connection.CHALLENGE - elif self.count == 2: - return b'something bogus' - return b'' - def send_bytes(self, data): - pass - self.assertRaises(multiprocessing.AuthenticationError, - multiprocessing.connection.answer_challenge, - _FakeConnection(), b'abc') - -# -# Test Manager.start()/Pool.__init__() initializer feature - see issue 5585 -# - -def initializer(ns): - ns.test += 1 - -class TestInitializers(unittest.TestCase): - def setUp(self): - self.mgr = multiprocessing.Manager() - self.ns = self.mgr.Namespace() - self.ns.test = 0 - - def tearDown(self): - self.mgr.shutdown() - self.mgr.join() - - def test_manager_initializer(self): - m = multiprocessing.managers.SyncManager() - self.assertRaises(TypeError, m.start, 1) - m.start(initializer, (self.ns,)) - self.assertEqual(self.ns.test, 1) - m.shutdown() - m.join() - - def test_pool_initializer(self): - self.assertRaises(TypeError, multiprocessing.Pool, initializer=1) - p = multiprocessing.Pool(1, initializer, (self.ns,)) - p.close() - p.join() - self.assertEqual(self.ns.test, 1) - -# -# Issue 5155, 5313, 5331: Test process in processes -# Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior -# - -def _ThisSubProcess(q): - try: - item = q.get(block=False) - except pyqueue.Empty: - pass - -def _TestProcess(q): - queue = multiprocessing.Queue() - subProc = multiprocessing.Process(target=_ThisSubProcess, args=(queue,)) - subProc.daemon = True - subProc.start() - subProc.join() - -def _afunc(x): - return x*x - -def pool_in_process(): - pool = multiprocessing.Pool(processes=4) - x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7]) - pool.close() - pool.join() - -class _file_like(object): - def __init__(self, delegate): - self._delegate = delegate - self._pid = None - - @property - def cache(self): - pid = os.getpid() - # There are no race conditions since fork keeps only the running thread - if pid != self._pid: - self._pid = pid - self._cache = [] - return self._cache - - def write(self, data): - self.cache.append(data) - - def flush(self): - self._delegate.write(''.join(self.cache)) - self._cache = [] - -class TestStdinBadfiledescriptor(unittest.TestCase): - - def test_queue_in_process(self): - queue = multiprocessing.Queue() - proc = multiprocessing.Process(target=_TestProcess, args=(queue,)) - proc.start() - proc.join() - - def test_pool_in_process(self): - p = multiprocessing.Process(target=pool_in_process) - p.start() - p.join() - - def test_flushing(self): - sio = io.StringIO() - flike = _file_like(sio) - flike.write('foo') - proc = multiprocessing.Process(target=lambda: flike.flush()) - flike.flush() - assert sio.getvalue() == 'foo' - - -class TestWait(unittest.TestCase): - - @classmethod - def _child_test_wait(cls, w, slow): - for i in range(10): - if slow: - time.sleep(random.random()*0.1) - w.send((i, os.getpid())) - w.close() - - def test_wait(self, slow=False): - from multiprocessing.connection import wait - readers = [] - procs = [] - messages = [] - - for i in range(4): - r, w = multiprocessing.Pipe(duplex=False) - p = multiprocessing.Process(target=self._child_test_wait, args=(w, slow)) - p.daemon = True - p.start() - w.close() - readers.append(r) - procs.append(p) - self.addCleanup(p.join) - - while readers: - for r in wait(readers): - try: - msg = r.recv() - except EOFError: - readers.remove(r) - r.close() - else: - messages.append(msg) - - messages.sort() - expected = sorted((i, p.pid) for i in range(10) for p in procs) - self.assertEqual(messages, expected) - - @classmethod - def _child_test_wait_socket(cls, address, slow): - s = socket.socket() - s.connect(address) - for i in range(10): - if slow: - time.sleep(random.random()*0.1) - s.sendall(('%s\n' % i).encode('ascii')) - s.close() - - def test_wait_socket(self, slow=False): - from multiprocessing.connection import wait - l = socket.socket() - l.bind(('', 0)) - l.listen(4) - addr = ('localhost', l.getsockname()[1]) - readers = [] - procs = [] - dic = {} - - for i in range(4): - p = multiprocessing.Process(target=self._child_test_wait_socket, - args=(addr, slow)) - p.daemon = True - p.start() - procs.append(p) - self.addCleanup(p.join) - - for i in range(4): - r, _ = l.accept() - readers.append(r) - dic[r] = [] - l.close() - - while readers: - for r in wait(readers): - msg = r.recv(32) - if not msg: - readers.remove(r) - r.close() - else: - dic[r].append(msg) - - expected = ''.join('%s\n' % i for i in range(10)).encode('ascii') - for v in dic.values(): - self.assertEqual(b''.join(v), expected) - - def test_wait_slow(self): - self.test_wait(True) - - def test_wait_socket_slow(self): - self.test_wait_socket(True) - - def test_wait_timeout(self): - from multiprocessing.connection import wait - - expected = 5 - a, b = multiprocessing.Pipe() - - start = time.time() - res = wait([a, b], expected) - delta = time.time() - start - - self.assertEqual(res, []) - self.assertLess(delta, expected * 2) - self.assertGreater(delta, expected * 0.5) - - b.send(None) - - start = time.time() - res = wait([a, b], 20) - delta = time.time() - start - - self.assertEqual(res, [a]) - self.assertLess(delta, 0.4) - - @classmethod - def signal_and_sleep(cls, sem, period): - sem.release() - time.sleep(period) - - def test_wait_integer(self): - from multiprocessing.connection import wait - - expected = 3 - sorted_ = lambda l: sorted(l, key=lambda x: id(x)) - sem = multiprocessing.Semaphore(0) - a, b = multiprocessing.Pipe() - p = multiprocessing.Process(target=self.signal_and_sleep, - args=(sem, expected)) - - p.start() - self.assertIsInstance(p.sentinel, int) - self.assertTrue(sem.acquire(timeout=20)) - - start = time.time() - res = wait([a, p.sentinel, b], expected + 20) - delta = time.time() - start - - self.assertEqual(res, [p.sentinel]) - self.assertLess(delta, expected + 2) - self.assertGreater(delta, expected - 2) - - a.send(None) - - start = time.time() - res = wait([a, p.sentinel, b], 20) - delta = time.time() - start - - self.assertEqual(sorted_(res), sorted_([p.sentinel, b])) - self.assertLess(delta, 0.4) - - b.send(None) - - start = time.time() - res = wait([a, p.sentinel, b], 20) - delta = time.time() - start - - self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b])) - self.assertLess(delta, 0.4) - - p.terminate() - p.join() - - def test_neg_timeout(self): - from multiprocessing.connection import wait - a, b = multiprocessing.Pipe() - t = time.time() - res = wait([a], timeout=-1) - t = time.time() - t - self.assertEqual(res, []) - self.assertLess(t, 1) - a.close() - b.close() - -# -# Issue 14151: Test invalid family on invalid environment -# - -class TestInvalidFamily(unittest.TestCase): - - @unittest.skipIf(WIN32, "skipped on Windows") - def test_invalid_family(self): - with self.assertRaises(ValueError): - multiprocessing.connection.Listener(r'\\.\test') - - @unittest.skipUnless(WIN32, "skipped on non-Windows platforms") - def test_invalid_family_win32(self): - with self.assertRaises(ValueError): - multiprocessing.connection.Listener('/var/test.pipe') - -# -# Issue 12098: check sys.flags of child matches that for parent -# - -class TestFlags(unittest.TestCase): - @classmethod - def run_in_grandchild(cls, conn): - conn.send(tuple(sys.flags)) - - @classmethod - def run_in_child(cls): - import json - r, w = multiprocessing.Pipe(duplex=False) - p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,)) - p.start() - grandchild_flags = r.recv() - p.join() - r.close() - w.close() - flags = (tuple(sys.flags), grandchild_flags) - print(json.dumps(flags)) - - def test_flags(self): - import json, subprocess - # start child process using unusual flags - prog = ('from test.test_multiprocessing import TestFlags; ' + - 'TestFlags.run_in_child()') - data = subprocess.check_output( - [sys.executable, '-E', '-S', '-O', '-c', prog]) - child_flags, grandchild_flags = json.loads(data.decode('ascii')) - self.assertEqual(child_flags, grandchild_flags) - -# -# Test interaction with socket timeouts - see Issue #6056 -# - -class TestTimeouts(unittest.TestCase): - @classmethod - def _test_timeout(cls, child, address): - time.sleep(1) - child.send(123) - child.close() - conn = multiprocessing.connection.Client(address) - conn.send(456) - conn.close() - - def test_timeout(self): - old_timeout = socket.getdefaulttimeout() - try: - socket.setdefaulttimeout(0.1) - parent, child = multiprocessing.Pipe(duplex=True) - l = multiprocessing.connection.Listener(family='AF_INET') - p = multiprocessing.Process(target=self._test_timeout, - args=(child, l.address)) - p.start() - child.close() - self.assertEqual(parent.recv(), 123) - parent.close() - conn = l.accept() - self.assertEqual(conn.recv(), 456) - conn.close() - l.close() - p.join(10) - finally: - socket.setdefaulttimeout(old_timeout) - -# -# Test what happens with no "if __name__ == '__main__'" -# - -class TestNoForkBomb(unittest.TestCase): - def test_noforkbomb(self): - name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py') - if WIN32: - rc, out, err = test.script_helper.assert_python_failure(name) - self.assertEqual('', out.decode('ascii')) - self.assertIn('RuntimeError', err.decode('ascii')) - else: - rc, out, err = test.script_helper.assert_python_ok(name) - self.assertEqual('123', out.decode('ascii').rstrip()) - self.assertEqual('', err.decode('ascii')) - -# -# Issue #17555: ForkAwareThreadLock -# - -class TestForkAwareThreadLock(unittest.TestCase): - # We recurisvely start processes. Issue #17555 meant that the - # after fork registry would get duplicate entries for the same - # lock. The size of the registry at generation n was ~2**n. - - @classmethod - def child(cls, n, conn): - if n > 1: - p = multiprocessing.Process(target=cls.child, args=(n-1, conn)) - p.start() - p.join() - else: - conn.send(len(util._afterfork_registry)) - conn.close() - - def test_lock(self): - r, w = multiprocessing.Pipe(False) - l = util.ForkAwareThreadLock() - old_size = len(util._afterfork_registry) - p = multiprocessing.Process(target=self.child, args=(5, w)) - p.start() - new_size = r.recv() - p.join() - self.assertLessEqual(new_size, old_size) - -# -# Issue #17097: EINTR should be ignored by recv(), send(), accept() etc -# - -class TestIgnoreEINTR(unittest.TestCase): - - @classmethod - def _test_ignore(cls, conn): - def handler(signum, frame): - pass - signal.signal(signal.SIGUSR1, handler) - conn.send('ready') - x = conn.recv() - conn.send(x) - conn.send_bytes(b'x'*(1024*1024)) # sending 1 MB should block - - @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1') - def test_ignore(self): - conn, child_conn = multiprocessing.Pipe() - try: - p = multiprocessing.Process(target=self._test_ignore, - args=(child_conn,)) - p.daemon = True - p.start() - child_conn.close() - self.assertEqual(conn.recv(), 'ready') - time.sleep(0.1) - os.kill(p.pid, signal.SIGUSR1) - time.sleep(0.1) - conn.send(1234) - self.assertEqual(conn.recv(), 1234) - time.sleep(0.1) - os.kill(p.pid, signal.SIGUSR1) - self.assertEqual(conn.recv_bytes(), b'x'*(1024*1024)) - time.sleep(0.1) - p.join() - finally: - conn.close() - - @classmethod - def _test_ignore_listener(cls, conn): - def handler(signum, frame): - pass - signal.signal(signal.SIGUSR1, handler) - l = multiprocessing.connection.Listener() - conn.send(l.address) - a = l.accept() - a.send('welcome') - - @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1') - def test_ignore_listener(self): - conn, child_conn = multiprocessing.Pipe() - try: - p = multiprocessing.Process(target=self._test_ignore_listener, - args=(child_conn,)) - p.daemon = True - p.start() - child_conn.close() - address = conn.recv() - time.sleep(0.1) - os.kill(p.pid, signal.SIGUSR1) - time.sleep(0.1) - client = multiprocessing.connection.Client(address) - self.assertEqual(client.recv(), 'welcome') - p.join() - finally: - conn.close() - -# -# -# - -def setUpModule(): - if sys.platform.startswith("linux"): - try: - lock = multiprocessing.RLock() - except OSError: - raise unittest.SkipTest("OSError raises on RLock creation, " - "see issue 3111!") - check_enough_semaphores() - util.get_temp_dir() # creates temp directory for use by all processes - multiprocessing.get_logger().setLevel(LOG_LEVEL) - - -def tearDownModule(): - # pause a bit so we don't get warning about dangling threads/processes - time.sleep(0.5) - - -if __name__ == '__main__': - unittest.main() diff -r 9877c25d9556 -r b3620777f54c Lib/test/test_multiprocessing_fork.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/Lib/test/test_multiprocessing_fork.py Wed Aug 07 22:43:26 2013 +0100 @@ -0,0 +1,6 @@ +import test._test_multiprocessing + +test._test_multiprocessing.install_tests_in_module_dict(globals(), 'fork') + +if __name__ == '__main__': + unittest.main() diff -r 9877c25d9556 -r b3620777f54c Lib/test/test_multiprocessing_forkserver.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/Lib/test/test_multiprocessing_forkserver.py Wed Aug 07 22:43:26 2013 +0100 @@ -0,0 +1,11 @@ +import sys +import test._test_multiprocessing +import unittest + +if sys.platform == 'darwin': + raise unittest.SkipTest('forkserver no supported on OSX') + +test._test_multiprocessing.install_tests_in_module_dict(globals(), 'forkserver') + +if __name__ == '__main__': + unittest.main() diff -r 9877c25d9556 -r b3620777f54c Lib/test/test_multiprocessing_spawn.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/Lib/test/test_multiprocessing_spawn.py Wed Aug 07 22:43:26 2013 +0100 @@ -0,0 +1,6 @@ +import test._test_multiprocessing + +test._test_multiprocessing.install_tests_in_module_dict(globals(), 'spawn') + +if __name__ == '__main__': + unittest.main() diff -r 9877c25d9556 -r b3620777f54c Makefile.pre.in --- a/Makefile.pre.in Wed Aug 07 05:54:28 2013 -0700 +++ b/Makefile.pre.in Wed Aug 07 22:43:26 2013 +0100 @@ -934,7 +934,10 @@ -@if which pybuildbot.identify >/dev/null 2>&1; then \ pybuildbot.identify "CC='$(CC)'" "CXX='$(CXX)'"; \ fi - $(TESTRUNNER) -j 1 -u all -W --timeout=$(TESTTIMEOUT) $(TESTOPTS) + $(TESTRUNNER) -j 1 -u all -v --timeout=1000 \ + test_multiprocessing_fork \ + test_multiprocessing_spawn \ + test_multiprocessing_forkserver QUICKTESTOPTS= $(TESTOPTS) -x test_subprocess test_io test_lib2to3 \ test_multibytecodec test_urllib2_localnet test_itertools \ diff -r 9877c25d9556 -r b3620777f54c Modules/_multiprocessing/multiprocessing.c --- a/Modules/_multiprocessing/multiprocessing.c Wed Aug 07 05:54:28 2013 -0700 +++ b/Modules/_multiprocessing/multiprocessing.c Wed Aug 07 22:43:26 2013 +0100 @@ -126,6 +126,7 @@ {"recv", multiprocessing_recv, METH_VARARGS, ""}, {"send", multiprocessing_send, METH_VARARGS, ""}, #endif + {"sem_unlink", _PyMp_sem_unlink, METH_VARARGS, ""}, {NULL} }; diff -r 9877c25d9556 -r b3620777f54c Modules/_multiprocessing/multiprocessing.h --- a/Modules/_multiprocessing/multiprocessing.h Wed Aug 07 05:54:28 2013 -0700 +++ b/Modules/_multiprocessing/multiprocessing.h Wed Aug 07 22:43:26 2013 +0100 @@ -98,5 +98,6 @@ */ extern PyTypeObject _PyMp_SemLockType; +extern PyObject *_PyMp_sem_unlink(PyObject *ignore, PyObject *args); #endif /* MULTIPROCESSING_H */ diff -r 9877c25d9556 -r b3620777f54c Modules/_multiprocessing/semaphore.c --- a/Modules/_multiprocessing/semaphore.c Wed Aug 07 05:54:28 2013 -0700 +++ b/Modules/_multiprocessing/semaphore.c Wed Aug 07 22:43:26 2013 +0100 @@ -18,6 +18,7 @@ int count; int maxvalue; int kind; + char *name; } SemLockObject; #define ISMINE(o) (o->count > 0 && PyThread_get_thread_ident() == o->last_tid) @@ -397,7 +398,8 @@ */ static PyObject * -newsemlockobject(PyTypeObject *type, SEM_HANDLE handle, int kind, int maxvalue) +newsemlockobject(PyTypeObject *type, SEM_HANDLE handle, int kind, int maxvalue, + char *name) { SemLockObject *self; @@ -409,21 +411,22 @@ self->count = 0; self->last_tid = 0; self->maxvalue = maxvalue; + self->name = name; return (PyObject*)self; } static PyObject * semlock_new(PyTypeObject *type, PyObject *args, PyObject *kwds) { - char buffer[256]; SEM_HANDLE handle = SEM_FAILED; - int kind, maxvalue, value; + int kind, maxvalue, value, unlink; PyObject *result; - static char *kwlist[] = {"kind", "value", "maxvalue", NULL}; - static int counter = 0; + char *name, *name_copy = NULL; + static char *kwlist[] = {"kind", "value", "maxvalue", "name", "unlink", + NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "iii", kwlist, - &kind, &value, &maxvalue)) + if (!PyArg_ParseTupleAndKeywords(args, kwds, "iiisi", kwlist, + &kind, &value, &maxvalue, &name, &unlink)) return NULL; if (kind != RECURSIVE_MUTEX && kind != SEMAPHORE) { @@ -431,18 +434,23 @@ return NULL; } - PyOS_snprintf(buffer, sizeof(buffer), "/mp%ld-%d", (long)getpid(), counter++); + if (!unlink) { + name_copy = PyMem_Malloc(strlen(name) + 1); + if (name_copy == NULL) + goto failure; + strcpy(name_copy, name); + } SEM_CLEAR_ERROR(); - handle = SEM_CREATE(buffer, value, maxvalue); + handle = SEM_CREATE(name, value, maxvalue); /* On Windows we should fail if GetLastError()==ERROR_ALREADY_EXISTS */ if (handle == SEM_FAILED || SEM_GET_LAST_ERROR() != 0) goto failure; - if (SEM_UNLINK(buffer) < 0) + if (unlink && SEM_UNLINK(name) < 0) goto failure; - result = newsemlockobject(type, handle, kind, maxvalue); + result = newsemlockobject(type, handle, kind, maxvalue, name_copy); if (!result) goto failure; @@ -451,6 +459,7 @@ failure: if (handle != SEM_FAILED) SEM_CLOSE(handle); + PyMem_Free(name_copy); _PyMp_SetError(NULL, MP_STANDARD_ERROR); return NULL; } @@ -460,12 +469,30 @@ { SEM_HANDLE handle; int kind, maxvalue; + char *name, *name_copy = NULL; - if (!PyArg_ParseTuple(args, F_SEM_HANDLE "ii", - &handle, &kind, &maxvalue)) + if (!PyArg_ParseTuple(args, F_SEM_HANDLE "iiz", + &handle, &kind, &maxvalue, &name)) return NULL; - return newsemlockobject(type, handle, kind, maxvalue); + if (name != NULL) { + name_copy = PyMem_Malloc(strlen(name) + 1); + if (name_copy == NULL) + return PyErr_NoMemory(); + strcpy(name_copy, name); + } + +#ifndef MS_WINDOWS + if (name != NULL) { + handle = sem_open(name, 0); + if (handle == SEM_FAILED) { + PyMem_Free(name_copy); + return PyErr_SetFromErrno(PyExc_OSError); + } + } +#endif + + return newsemlockobject(type, handle, kind, maxvalue, name_copy); } static void @@ -473,6 +500,7 @@ { if (self->handle != SEM_FAILED) SEM_CLOSE(self->handle); + PyMem_Free(self->name); PyObject_Del(self); } @@ -574,6 +602,8 @@ ""}, {"maxvalue", T_INT, offsetof(SemLockObject, maxvalue), READONLY, ""}, + {"name", T_STRING, offsetof(SemLockObject, name), READONLY, + ""}, {NULL} }; @@ -621,3 +651,23 @@ /* tp_alloc */ 0, /* tp_new */ semlock_new, }; + +/* + * Function to unlink semaphore names + */ + +PyObject * +_PyMp_sem_unlink(PyObject *ignore, PyObject *args) +{ + char *name; + + if (!PyArg_ParseTuple(args, "s", &name)) + return NULL; + + if (SEM_UNLINK(name) < 0) { + _PyMp_SetError(NULL, MP_STANDARD_ERROR); + return NULL; + } + + Py_RETURN_NONE; +}