# HG changeset patch # Parent e9b60eaf8f5012ee1c4dfacf7182539c626f683d diff -r e9b60eaf8f50 Doc/library/multiprocessing.rst --- a/Doc/library/multiprocessing.rst Thu Sep 08 12:40:21 2011 +0100 +++ b/Doc/library/multiprocessing.rst Tue Sep 13 14:48:21 2011 +0100 @@ -711,10 +711,73 @@ (By default :data:`sys.executable` is used). Embedders will probably need to do some thing like :: - setExecutable(os.path.join(sys.exec_prefix, 'pythonw.exe')) - - before they can create child processes. (Windows only) - + set_executable(os.path.join(sys.exec_prefix, 'pythonw.exe')) + + before they can create child processes. This is used on + Windows or on Unix when forking is disabled. + +.. function:: forking_is_enabled() + + Returns a boolean value indicating whether :mod:`multiprocessing` + is currently set to create child processes by forking the current + python process rather than by starting new instances of python. + + On Windows this always returns ``False``. On Unix it returns + ``True`` by default. + +.. function:: forking_enable() + + Enables the use of forking. This means that child processes will + use a forked copy of the current python interpreter. On Windows + forking is not supported, while on Unix forking is the default. + +.. function:: forking_disable() + + Disables the use of forking. This means that child processes will + each get their own newly created python interpreter. On Windows + forking is not supported, while on Unix forking is the default. + + On Unix one would normally disable forking by calling this + function at the beginning of the program, before using anything + else from :mod:`multiprocessing`. This is because it modifies the + way some objects are created, to ensure that they can be inherited + through pickling. + + Disabling forking on Unix will make :mod:`multiprocessing` work + on Unix in much the same way as it does on Windows, so the + programming guidelines which normally only apply to Windows must + also be followed. Creating processes will also be slower if + forking is disabled. + + Forking a process with multiple threads can causes difficult to + avoid problems because the new process must fix up any data or + resources which other threads may have been messing with at the + time the fork occurred. (See :issue:`6721`). Disabling forking + makes safely mixing threads and processes much easier on Unix. + + Note that on Unix disabling the use of fork will mean that named + semaphores (used for implementing locks, etc) may not be unlinked + from the file system if the process which created them does not + exit cleanly (for instance, if the process is killed by a signal). + If a named semaphore is not unlinked then it will survive even if + all the processes which use it have terminated. On Linux this + will usually mean that files with names like + ``/dev/shm/sem.mp-a9b00dd3-5133-1`` will be left over representing + "leaked" semaphores. If you do not remove these manually then + they will not be removed until the computer reboots, which is + problematic since Unix systems can have quite low limits on the + number of named semaphores which can exist at any one time. + +.. function:: get_semaphore_prefix() + + Returns the prefix used for generating semaphore names on Unix. + This is inherited by descendant processes. The initial value + contains a randomly generated hexadecimal number. + +.. function:: set_semaphore_prefix(prefix) + + Sets the prefix used for generating semaphore names on Unix. + *prefix* should be an ASCII string not containing forward slashes. .. note:: @@ -2191,10 +2254,12 @@ For more information, see :issue:`5155`, :issue:`5313` and :issue:`5331` -Windows -~~~~~~~ - -Since Windows lacks :func:`os.fork` it has a few extra restrictions: +Windows and Unix with forking disabled +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Since Windows lacks :func:`os.fork` it has a few extra restrictions. +These also apply on Unix if :func:`multiprocessing.set_use_fork` has +been used to disable forking. More picklability diff -r e9b60eaf8f50 Lib/multiprocessing/__init__.py --- a/Lib/multiprocessing/__init__.py Thu Sep 08 12:40:21 2011 +0100 +++ b/Lib/multiprocessing/__init__.py Tue Sep 13 14:48:21 2011 +0100 @@ -49,7 +49,9 @@ 'allow_connection_pickling', 'BufferTooShort', 'TimeoutError', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event', 'Queue', 'JoinableQueue', 'Pool', 'Value', 'Array', - 'RawValue', 'RawArray', 'SUBDEBUG', 'SUBWARNING', + 'RawValue', 'RawArray', 'SUBDEBUG', 'SUBWARNING', 'set_executable', + 'forking_disable', 'forking_enable', 'forking_is_enabled', + 'set_semaphore_prefix', 'get_semaphore_prefix' ] __author__ = 'R. Oudkerk (r.m.oudkerk@gmail.com)' @@ -262,15 +264,61 @@ # # -if sys.platform == 'win32': +def set_executable(executable): + ''' + Sets the path to a python.exe or pythonw.exe binary used to run + child processes on Windows instead of sys.executable. + Useful for people embedding Python. + ''' + from multiprocessing.forking import set_executable + set_executable(executable) - def set_executable(executable): - ''' - Sets the path to a python.exe or pythonw.exe binary used to run - child processes on Windows instead of sys.executable. - Useful for people embedding Python. - ''' - from multiprocessing.forking import set_executable - set_executable(executable) +def forking_is_enabled(): + ''' + Returns a boolean value indicating whether multiprocessing is + currently set to create child processes by forking the current + python process rather than by starting a new instances of python. - __all__ += ['set_executable'] + On Windows this always returns `False`. On Unix it returns `True` by + default. + ''' + from . import forking + return forking._forking_is_enabled + +def forking_enable(): + ''' + Enables creation of child process by forking the current process. + + On Windows this raises an error. On Unix it is enabled by default. + ''' + from . import forking + if sys.platform == 'win32': + raise ValueError('Forking not supported on Windows') + forking._forking_is_enabled = True + +def forking_disable(): + ''' + Disables creation of child process by forking the current process. + Instead new instances of python are started. + + On Windows this has no effect. On Unix forking this makes + `mulitprocessing` behave more like it does on Windows. + ''' + from . import forking + forking._forking_is_enabled = False + +def set_semaphore_prefix(prefix): + ''' + Sets the prefix used for the names of semaphores under Unix. + + This is inherited by child processes. + ''' + current_process()._semprefix = prefix.encode('ascii') + +def get_semaphore_prefix(): + ''' + Returns the prefix used for the names of semaphores under Unix. + + This is inherited by child processes. + ''' + return current_process()._semprefix.decode('ascii') diff -r e9b60eaf8f50 Lib/multiprocessing/forking.py --- a/Lib/multiprocessing/forking.py Thu Sep 08 12:40:21 2011 +0100 +++ b/Lib/multiprocessing/forking.py Tue Sep 13 14:48:21 2011 +0100 @@ -37,11 +37,19 @@ import signal import select +from pickle import dump, load, HIGHEST_PROTOCOL from multiprocessing import util, process __all__ = ['Popen', 'assert_spawning', 'exit', 'duplicate', 'close', 'ForkingPickler'] # +# Choose whether to do a fork or spawn (fork+exec) on Unix. +# This affects how some shared resources should be created. +# + +_forking_is_enabled = sys.platform != 'win32' + +# # Check that the current thread is spawning a child process # @@ -96,15 +104,22 @@ return partial(func, *args, **keywords) ForkingPickler.register(partial, _reduce_partial) +def dump(obj, file, protocol=None): + ForkingPickler(file, protocol).dump(obj) + # # Unix # if sys.platform != 'win32': import time + import _thread import select - exit = os._exit + WINEXE = False + WINSERVICE = False + + exit = os._exit # XXX only used with NoFork duplicate = os.dup close = os.close _select = util._eintr_retry(select.select) @@ -116,6 +131,8 @@ class Popen(object): + _tls = _thread._local() + def __init__(self, process_obj): sys.stdout.flush() sys.stderr.flush() @@ -124,17 +141,40 @@ r, w = os.pipe() self.sentinel = r - self.pid = os.fork() - if self.pid == 0: - os.close(r) - if 'random' in sys.modules: - import random - random.seed() - code = process_obj._bootstrap() - sys.stdout.flush() - sys.stderr.flush() - os._exit(code) + if _forking_is_enabled: + self.pid = os.fork() + if self.pid == 0: + os.close(r) + if 'random' in sys.modules: + import random + random.seed() + code = process_obj._bootstrap() + sys.stdout.flush() + sys.stderr.flush() + os._exit(code) + else: + from_parent_fd, to_child_fd = os.pipe() + cmd = get_command_line() + [str(from_parent_fd)] + + self.pid = os.fork() + if self.pid == 0: + os.close(r) + os.close(to_child_fd) + os.execv(sys.executable, cmd) + + # send information to child + prep_data = get_preparation_data(process_obj._name) + os.close(from_parent_fd) + to_child = os.fdopen(to_child_fd, 'wb') + Popen._tls.process_handle = self.pid + try: + dump(prep_data, to_child, HIGHEST_PROTOCOL) + dump(process_obj, to_child, HIGHEST_PROTOCOL) + finally: + del Popen._tls.process_handle + to_child.close() + # `w` will be closed when the child exits, at which point `r` # will become ready for reading (using e.g. select()). os.close(w) @@ -176,7 +216,14 @@ @staticmethod def thread_is_spawning(): - return False + if _forking_is_enabled: + return False + else: + return getattr(Popen._tls, 'process_handle', None) is not None + + @staticmethod + def duplicate_for_child(handle): + return handle # # Windows @@ -188,12 +235,7 @@ import _subprocess import time - from pickle import dump, load, HIGHEST_PROTOCOL from _multiprocessing import win32 - from .util import Finalize - - def dump(obj, file, protocol=None): - ForkingPickler(file, protocol).dump(obj) # # @@ -207,20 +249,6 @@ close = win32.CloseHandle # - # _python_exe is the assumed path to the python executable. - # People embedding Python want to modify it. - # - - if WINSERVICE: - _python_exe = os.path.join(sys.exec_prefix, 'python.exe') - else: - _python_exe = sys.executable - - def set_executable(exe): - global _python_exe - _python_exe = exe - - # # # @@ -312,126 +340,125 @@ if self.wait(timeout=0.1) is None: raise - # - # - # +# +# _python_exe is the assumed path to the python executable. +# People embedding Python want to modify it. +# - def is_forking(argv): - ''' - Return whether commandline indicates we are forking - ''' - if len(argv) >= 2 and argv[1] == '--multiprocessing-fork': - assert len(argv) == 3 - return True - else: - return False +if WINSERVICE: + _python_exe = os.path.join(sys.exec_prefix, 'python.exe') +else: + _python_exe = sys.executable +def set_executable(exe): + global _python_exe + _python_exe = exe - def freeze_support(): - ''' - Run code for process object if this in not the main process - ''' - if is_forking(sys.argv): - main() - sys.exit() +# +# +# +def is_forking(argv): + ''' + Return whether commandline indicates we are forking + ''' + if len(argv) >= 2 and argv[1] == '--multiprocessing-fork': + assert len(argv) == 3 + return True + else: + return False - def get_command_line(): - ''' - Returns prefix of command line used for spawning a child process - ''' - if process.current_process()._identity==() and is_forking(sys.argv): - raise RuntimeError(''' - Attempt to start a new process before the current process - has finished its bootstrapping phase. - This probably means that you are on Windows and you have - forgotten to use the proper idiom in the main module: +def freeze_support(): + ''' + Run code for process object if this in not the main process + ''' + if is_forking(sys.argv): + main() + sys.exit() - if __name__ == '__main__': - freeze_support() - ... - The "freeze_support()" line can be omitted if the program - is not going to be frozen to produce a Windows executable.''') +def get_command_line(): + ''' + Returns prefix of command line used for spawning a child process + ''' + if process.current_process()._identity==() and is_forking(sys.argv): + raise RuntimeError(''' + Attempt to start a new process before the current process + has finished its bootstrapping phase. - if getattr(sys, 'frozen', False): - return [sys.executable, '--multiprocessing-fork'] - else: - prog = 'from multiprocessing.forking import main; main()' - return [_python_exe, '-c', prog, '--multiprocessing-fork'] + This probably means that you are on Windows and you have + forgotten to use the proper idiom in the main module: + if __name__ == '__main__': + freeze_support() + ... - def main(): - ''' - Run code specifed by data received over pipe - ''' - assert is_forking(sys.argv) + The "freeze_support()" line can be omitted if the program + is not going to be frozen to produce a Windows executable.''') - handle = int(sys.argv[-1]) + if getattr(sys, 'frozen', False): + return [sys.executable, '--multiprocessing-fork'] + else: + prog = 'from multiprocessing.forking import main; main()' + return [_python_exe, '-c', prog, '--multiprocessing-fork'] + + +def main(): + ''' + Run code specifed by data received over pipe + ''' + assert is_forking(sys.argv) + _forking_is_enabled = False + + handle = int(sys.argv[-1]) + if sys.platform == 'win32': fd = msvcrt.open_osfhandle(handle, os.O_RDONLY) - from_parent = os.fdopen(fd, 'rb') + else: + fd = handle + from_parent = os.fdopen(fd, 'rb') - process.current_process()._inheriting = True - preparation_data = load(from_parent) - prepare(preparation_data) - self = load(from_parent) - process.current_process()._inheriting = False + process.current_process()._inheriting = True + preparation_data = load(from_parent) + prepare(preparation_data) + self = load(from_parent) + process.current_process()._inheriting = False - from_parent.close() + from_parent.close() - exitcode = self._bootstrap() - exit(exitcode) + exitcode = self._bootstrap() + exit(exitcode) - def get_preparation_data(name): - ''' - Return info about parent needed by child to unpickle process object - ''' - from .util import _logger, _log_to_stderr +def get_preparation_data(name): + ''' + Return info about parent needed by child to unpickle process object + ''' + from .util import _logger, _log_to_stderr - d = dict( - name=name, - sys_path=sys.path, - sys_argv=sys.argv, - log_to_stderr=_log_to_stderr, - orig_dir=process.ORIGINAL_DIR, - authkey=process.current_process().authkey, - ) + d = dict( + name=name, + sys_path=sys.path, + sys_argv=sys.argv, + log_to_stderr=_log_to_stderr, + orig_dir=process.ORIGINAL_DIR, + authkey=process.current_process().authkey, + ) - if _logger is not None: - d['log_level'] = _logger.getEffectiveLevel() + if _logger is not None: + d['log_level'] = _logger.getEffectiveLevel() - if not WINEXE and not WINSERVICE: - main_path = getattr(sys.modules['__main__'], '__file__', None) - if not main_path and sys.argv[0] not in ('', '-c'): - main_path = sys.argv[0] - if main_path is not None: - if not os.path.isabs(main_path) and \ - process.ORIGINAL_DIR is not None: - main_path = os.path.join(process.ORIGINAL_DIR, main_path) - d['main_path'] = os.path.normpath(main_path) + if sys.platform != 'win32' or (not WINEXE and not WINSERVICE): + main_path = getattr(sys.modules['__main__'], '__file__', None) + if not main_path and sys.argv[0] not in ('', '-c'): + main_path = sys.argv[0] + if main_path is not None: + if not os.path.isabs(main_path) and \ + process.ORIGINAL_DIR is not None: + main_path = os.path.join(process.ORIGINAL_DIR, main_path) + d['main_path'] = os.path.normpath(main_path) - return d - - # - # Make (Pipe)Connection picklable - # - - # Late import because of circular import - from .connection import Connection, PipeConnection - - def reduce_connection(conn): - if not Popen.thread_is_spawning(): - raise RuntimeError( - 'By default %s objects can only be shared between processes\n' - 'using inheritance' % type(conn).__name__ - ) - return type(conn), (Popen.duplicate_for_child(conn.fileno()), - conn.readable, conn.writable) - - ForkingPickler.register(Connection, reduce_connection) - ForkingPickler.register(PipeConnection, reduce_connection) + return d # # Prepare current process @@ -518,3 +545,29 @@ obj.__module__ = '__main__' except Exception: pass + +# +# Make (Pipe)Connection picklable +# + +# Late import because of circular import +from .connection import Connection + +def reduce_connection(conn): + # XXX check not necessary since only registered with ForkingPickler + if not Popen.thread_is_spawning(): + raise RuntimeError( + 'By default %s objects can only be shared between processes\n' + 'using inheritance' % type(conn).__name__ + ) + return type(conn), (Popen.duplicate_for_child(conn.fileno()), + conn.readable, conn.writable) + +ForkingPickler.register(Connection, reduce_connection) + +try: + from .connection import PipeConnection +except ImportError: + pass +else: + ForkingPickler.register(PipeConnection, reduce_connection) diff -r e9b60eaf8f50 Lib/multiprocessing/heap.py --- a/Lib/multiprocessing/heap.py Thu Sep 08 12:40:21 2011 +0100 +++ b/Lib/multiprocessing/heap.py Tue Sep 13 14:48:21 2011 +0100 @@ -41,8 +41,8 @@ import itertools import _multiprocessing -from multiprocessing.util import Finalize, info -from multiprocessing.forking import assert_spawning +from .util import Finalize, info, get_temp_dir +from .forking import assert_spawning, ForkingPickler __all__ = ['BufferWrapper'] @@ -78,10 +78,29 @@ class Arena(object): - def __init__(self, size): - self.buffer = mmap.mmap(-1, size) + _counter = itertools.count() + + def __init__(self, size, fileno=-1): + from .forking import _forking_is_enabled self.size = size - self.name = None + self.fileno = fileno + if fileno == -1 and not _forking_is_enabled: + name = os.path.join( + get_temp_dir(), + 'pym-%d-%d' % (os.getpid(), next(self._counter))) + self.fileno = os.open( + name, os.O_RDWR | os.O_CREAT | os.O_EXCL, 0o600) + os.unlink(name) + os.ftruncate(self.fileno, size) + self.buffer = mmap.mmap(self.fileno, self.size) + + def reduce_arena(a): + if a.fileno == -1: + raise ValueError('Arena is unpicklable because' + 'get_use_fork() was True when it was created') + return Arena, (a.size, a.fileno) + + ForkingPickler.register(Arena, reduce_arena) # # Class allowing allocation of chunks of memory from arenas diff -r e9b60eaf8f50 Lib/multiprocessing/process.py --- a/Lib/multiprocessing/process.py Thu Sep 08 12:40:21 2011 +0100 +++ b/Lib/multiprocessing/process.py Tue Sep 13 14:48:21 2011 +0100 @@ -42,6 +42,7 @@ import sys import signal import itertools +import binascii from _weakrefset import WeakSet # @@ -103,6 +104,7 @@ else: self._daemonic = _current_process._daemonic self._tempdir = _current_process._tempdir + self._semprefix = _current_process._semprefix self._parent_pid = os.getpid() self._popen = None self._target = target @@ -333,6 +335,7 @@ self._children = set() self._authkey = AuthenticationString(os.urandom(32)) self._tempdir = None + self._semprefix = b'mp-' + binascii.hexlify(os.urandom(4)) _current_process = _MainProcess() del _MainProcess diff -r e9b60eaf8f50 Lib/multiprocessing/synchronize.py --- a/Lib/multiprocessing/synchronize.py Thu Sep 08 12:40:21 2011 +0100 +++ b/Lib/multiprocessing/synchronize.py Tue Sep 13 14:48:21 2011 +0100 @@ -39,6 +39,7 @@ import threading import os import sys +import itertools from time import time as _time, sleep as _sleep @@ -65,14 +66,22 @@ RECURSIVE_MUTEX, SEMAPHORE = list(range(2)) SEM_VALUE_MAX = _multiprocessing.SemLock.SEM_VALUE_MAX +sem_unlink = _multiprocessing.SemLock.sem_unlink + # # Base class for semaphores and mutexes; wraps `_multiprocessing.SemLock` # class SemLock(object): + _counter = itertools.count() + def __init__(self, kind, value, maxvalue): - sl = self._semlock = _multiprocessing.SemLock(kind, value, maxvalue) + from .forking import _forking_is_enabled + unlink_immediately = _forking_is_enabled or sys.platform == 'win32' + sl = self._semlock = _multiprocessing.SemLock( + kind, value, maxvalue, self._make_name(), unlink_immediately) + debug('created semlock with handle %s' % sl.handle) self._make_methods() @@ -81,6 +90,12 @@ obj._semlock._after_fork() register_after_fork(self, _after_fork) + if self._semlock.name is not None: + # On Unix with forking disabled, when the object is + # garbage collected or the process shuts down we must + # unlink the semaphore name + Finalize(self, sem_unlink, (self._semlock.name,), exitpriority=0) + def _make_methods(self): self.acquire = self._semlock.acquire self.release = self._semlock.release @@ -94,13 +109,19 @@ def __getstate__(self): assert_spawning(self) sl = self._semlock - return (Popen.duplicate_for_child(sl.handle), sl.kind, sl.maxvalue) + return (Popen.duplicate_for_child(sl.handle), sl.kind, sl.maxvalue, + sl.name) def __setstate__(self, state): self._semlock = _multiprocessing.SemLock._rebuild(*state) debug('recreated blocker with handle %r' % state[0]) self._make_methods() + @staticmethod + def _make_name(): + return '/%s-%s-%s' % (current_process()._semprefix.decode('ascii'), + os.getpid(), next(SemLock._counter)) + # # Semaphore # diff -r e9b60eaf8f50 Modules/_multiprocessing/semaphore.c --- a/Modules/_multiprocessing/semaphore.c Thu Sep 08 12:40:21 2011 +0100 +++ b/Modules/_multiprocessing/semaphore.c Tue Sep 13 14:48:21 2011 +0100 @@ -17,6 +17,7 @@ int count; int maxvalue; int kind; + char *name; } SemLockObject; #define ISMINE(o) (o->count > 0 && PyThread_get_thread_ident() == o->last_tid) @@ -399,7 +400,8 @@ */ static PyObject * -newsemlockobject(PyTypeObject *type, SEM_HANDLE handle, int kind, int maxvalue) +newsemlockobject(PyTypeObject *type, SEM_HANDLE handle, int kind, int maxvalue, + char *name) { SemLockObject *self; @@ -411,21 +413,22 @@ self->count = 0; self->last_tid = 0; self->maxvalue = maxvalue; + self->name = name; return (PyObject*)self; } static PyObject * semlock_new(PyTypeObject *type, PyObject *args, PyObject *kwds) { - char buffer[256]; SEM_HANDLE handle = SEM_FAILED; - int kind, maxvalue, value; + int kind, maxvalue, value, unlink; PyObject *result; - static char *kwlist[] = {"kind", "value", "maxvalue", NULL}; - static int counter = 0; + char *name, *name_copy = NULL; + static char *kwlist[] = {"kind", "value", "maxvalue", "name", "unlink", + NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "iii", kwlist, - &kind, &value, &maxvalue)) + if (!PyArg_ParseTupleAndKeywords(args, kwds, "iiisi", kwlist, + &kind, &value, &maxvalue, &name, &unlink)) return NULL; if (kind != RECURSIVE_MUTEX && kind != SEMAPHORE) { @@ -433,18 +436,23 @@ return NULL; } - PyOS_snprintf(buffer, sizeof(buffer), "/mp%ld-%d", (long)getpid(), counter++); + if (!unlink) { + name_copy = PyMem_Malloc(strlen(name) + 1); + if (name_copy == NULL) + goto failure; + strcpy(name_copy, name); + } SEM_CLEAR_ERROR(); - handle = SEM_CREATE(buffer, value, maxvalue); + handle = SEM_CREATE(name, value, maxvalue); /* On Windows we should fail if GetLastError()==ERROR_ALREADY_EXISTS */ if (handle == SEM_FAILED || SEM_GET_LAST_ERROR() != 0) goto failure; - if (SEM_UNLINK(buffer) < 0) + if (unlink && SEM_UNLINK(name) < 0) goto failure; - result = newsemlockobject(type, handle, kind, maxvalue); + result = newsemlockobject(type, handle, kind, maxvalue, name_copy); if (!result) goto failure; @@ -453,6 +461,7 @@ failure: if (handle != SEM_FAILED) SEM_CLOSE(handle); + PyMem_Free(name_copy); mp_SetError(NULL, MP_STANDARD_ERROR); return NULL; } @@ -462,12 +471,21 @@ { SEM_HANDLE handle; int kind, maxvalue; + char *name; - if (!PyArg_ParseTuple(args, F_SEM_HANDLE "ii", - &handle, &kind, &maxvalue)) + if (!PyArg_ParseTuple(args, F_SEM_HANDLE "iiz", + &handle, &kind, &maxvalue, &name)) return NULL; - return newsemlockobject(type, handle, kind, maxvalue); +#ifndef MS_WINDOWS + if (name != NULL) { + handle = sem_open(name, 0); + if (handle == SEM_FAILED) + return NULL; + } +#endif + + return newsemlockobject(type, handle, kind, maxvalue, name); } static void @@ -475,6 +493,7 @@ { if (self->handle != SEM_FAILED) SEM_CLOSE(self->handle); + PyMem_Free(self->name); PyObject_Del(self); } @@ -537,6 +556,22 @@ Py_RETURN_NONE; } +static PyObject * +semlock_unlink(PyObject *ignore, PyObject *args) +{ + char *name; + + if (!PyArg_ParseTuple(args, "s", &name)) + return NULL; + + if (SEM_UNLINK(name) < 0) { + mp_SetError(NULL, MP_STANDARD_ERROR); + return NULL; + } + + Py_RETURN_NONE; +} + /* * Semaphore methods */ @@ -562,6 +597,9 @@ ""}, {"_after_fork", (PyCFunction)semlock_afterfork, METH_NOARGS, "rezero the net acquisition count after fork()"}, + {"sem_unlink", (PyCFunction)semlock_unlink, METH_VARARGS | METH_STATIC, + "unlink the named semaphore using sem_unlink()"}, + {NULL} }; @@ -576,6 +614,8 @@ ""}, {"maxvalue", T_INT, offsetof(SemLockObject, maxvalue), READONLY, ""}, + {"name", T_STRING, offsetof(SemLockObject, name), READONLY, + ""}, {NULL} };