diff -r 13b6cfb4e141 Python/ceval_gil.h --- a/Python/ceval_gil.h Fri Mar 18 08:55:14 2011 -0400 +++ b/Python/ceval_gil.h Mon Mar 21 11:59:28 2011 +0000 @@ -144,122 +144,10 @@ * Windows (2000 and later, as well as (hopefully) CE) support */ -#include +#define COND_T_ONLY +#include +#undef COND_T_ONLY -#define MUTEX_T CRITICAL_SECTION -#define MUTEX_INIT(mut) do { \ - if (!(InitializeCriticalSectionAndSpinCount(&(mut), 4000))) \ - Py_FatalError("CreateMutex(" #mut ") failed"); \ -} while (0) -#define MUTEX_FINI(mut) \ - DeleteCriticalSection(&(mut)) -#define MUTEX_LOCK(mut) \ - EnterCriticalSection(&(mut)) -#define MUTEX_UNLOCK(mut) \ - LeaveCriticalSection(&(mut)) - -/* We emulate condition variables with a semaphore. - We use a Semaphore rather than an auto-reset event, because although - an auto-resent event might appear to solve the lost-wakeup bug (race - condition between releasing the outer lock and waiting) because it - maintains state even though a wait hasn't happened, there is still - a lost wakeup problem if more than one thread are interrupted in the - critical place. A semaphore solves that. - Because it is ok to signal a condition variable with no one - waiting, we need to keep track of the number of - waiting threads. Otherwise, the semaphore's state could rise - without bound. - - Generic emulations of the pthread_cond_* API using - Win32 functions can be found on the Web. - The following read can be edificating (or not): - http://www.cse.wustl.edu/~schmidt/win32-cv-1.html -*/ -typedef struct COND_T -{ - HANDLE sem; /* the semaphore */ - int n_waiting; /* how many are unreleased */ -} COND_T; - -__inline static void _cond_init(COND_T *cond) -{ - /* A semaphore with a large max value, The positive value - * is only needed to catch those "lost wakeup" events and - * race conditions when a timed wait elapses. - */ - if (!(cond->sem = CreateSemaphore(NULL, 0, 1000, NULL))) - Py_FatalError("CreateSemaphore() failed"); - cond->n_waiting = 0; -} - -__inline static void _cond_fini(COND_T *cond) -{ - BOOL ok = CloseHandle(cond->sem); - if (!ok) - Py_FatalError("CloseHandle() failed"); -} - -__inline static void _cond_wait(COND_T *cond, MUTEX_T *mut) -{ - ++cond->n_waiting; - MUTEX_UNLOCK(*mut); - /* "lost wakeup bug" would occur if the caller were interrupted here, - * but we are safe because we are using a semaphore wich has an internal - * count. - */ - if (WaitForSingleObject(cond->sem, INFINITE) == WAIT_FAILED) - Py_FatalError("WaitForSingleObject() failed"); - MUTEX_LOCK(*mut); -} - -__inline static int _cond_timed_wait(COND_T *cond, MUTEX_T *mut, - int us) -{ - DWORD r; - ++cond->n_waiting; - MUTEX_UNLOCK(*mut); - r = WaitForSingleObject(cond->sem, us / 1000); - if (r == WAIT_FAILED) - Py_FatalError("WaitForSingleObject() failed"); - MUTEX_LOCK(*mut); - if (r == WAIT_TIMEOUT) - --cond->n_waiting; - /* Here we have a benign race condition with _cond_signal. If the - * wait operation has timed out, but before we can acquire the - * mutex again to decrement n_waiting, a thread holding the mutex - * still sees a positive n_waiting value and may call - * ReleaseSemaphore and decrement n_waiting. - * This will cause n_waiting to be decremented twice. - * This is benign, though, because ReleaseSemaphore will also have - * been called, leaving the semaphore state positive. We may - * thus end up with semaphore in state 1, and n_waiting == -1, and - * the next time someone calls _cond_wait(), that thread will - * pass right through, decrementing the semaphore state and - * incrementing n_waiting, thus correcting the extra _cond_signal. - */ - return r == WAIT_TIMEOUT; -} - -__inline static void _cond_signal(COND_T *cond) { - /* NOTE: This must be called with the mutex held */ - if (cond->n_waiting > 0) { - if (!ReleaseSemaphore(cond->sem, 1, NULL)) - Py_FatalError("ReleaseSemaphore() failed"); - --cond->n_waiting; - } -} - -#define COND_INIT(cond) \ - _cond_init(&(cond)) -#define COND_FINI(cond) \ - _cond_fini(&(cond)) -#define COND_SIGNAL(cond) \ - _cond_signal(&(cond)) -#define COND_WAIT(cond, mut) \ - _cond_wait(&(cond), &(mut)) -#define COND_TIMED_WAIT(cond, mut, us, timeout_result) do { \ - (timeout_result) = _cond_timed_wait(&(cond), &(mut), us); \ -} while (0) #else diff -r 13b6cfb4e141 Python/thread_nt.h --- a/Python/thread_nt.h Fri Mar 18 08:55:14 2011 -0400 +++ b/Python/thread_nt.h Mon Mar 21 11:59:28 2011 +0000 @@ -9,60 +9,189 @@ #include #endif +/* Mutex and condition variable support for NT */ + +#define MUTEX_T CRITICAL_SECTION +#define MUTEX_INIT(mut) do { \ + if (!(InitializeCriticalSectionAndSpinCount(&(mut), 4000))) \ + Py_FatalError("CreateMutex(" #mut ") failed"); \ +} while (0) +#define MUTEX_FINI(mut) \ + DeleteCriticalSection(&(mut)) +#define MUTEX_LOCK(mut) \ + EnterCriticalSection(&(mut)) +#define MUTEX_UNLOCK(mut) \ + LeaveCriticalSection(&(mut)) + +/* We emulate condition variables with a semaphore. + We use a Semaphore rather than an auto-reset event, because although + an auto-resent event might appear to solve the lost-wakeup bug (race + condition between releasing the outer lock and waiting) because it + maintains state even though a wait hasn't happened, there is still + a lost wakeup problem if more than one thread are interrupted in the + critical place. A semaphore solves that. + Because it is ok to signal a condition variable with no one + waiting, we need to keep track of the number of + waiting threads. Otherwise, the semaphore's state could rise + without bound. + + Generic emulations of the pthread_cond_* API using + Win32 functions can be found on the Web. + The following read can be edificating (or not): + http://www.cse.wustl.edu/~schmidt/win32-cv-1.html +*/ + +typedef struct COND_T +{ + HANDLE sem; /* the semaphore */ + int n_waiting; /* how many are unreleased */ +} COND_T; + +__inline static void _cond_init(COND_T *cond) +{ + /* A semaphore with a large max value, The positive value + * is only needed to catch those "lost wakeup" events and + * race conditions when a timed wait elapses. + */ + if (!(cond->sem = CreateSemaphore(NULL, 0, 1000, NULL))) + Py_FatalError("CreateSemaphore() failed"); + cond->n_waiting = 0; +} + +__inline static void _cond_fini(COND_T *cond) +{ + BOOL ok = CloseHandle(cond->sem); + if (!ok) + Py_FatalError("CloseHandle() failed"); +} + +__inline static void _cond_wait(COND_T *cond, MUTEX_T *mut) +{ + ++cond->n_waiting; + MUTEX_UNLOCK(*mut); + /* "lost wakeup bug" would occur if the caller were interrupted here, + * but we are safe because we are using a semaphore wich has an internal + * count. + */ + if (WaitForSingleObject(cond->sem, INFINITE) == WAIT_FAILED) + Py_FatalError("WaitForSingleObject() failed"); + MUTEX_LOCK(*mut); +} + +__inline static int _cond_timed_wait(COND_T *cond, MUTEX_T *mut, + int us) +{ + DWORD r; + ++cond->n_waiting; + MUTEX_UNLOCK(*mut); + r = WaitForSingleObject(cond->sem, us / 1000); + if (r == WAIT_FAILED) + Py_FatalError("WaitForSingleObject() failed"); + MUTEX_LOCK(*mut); + if (r == WAIT_TIMEOUT) + --cond->n_waiting; + /* Here we have a benign race condition with _cond_signal. If the + * wait operation has timed out, but before we can acquire the + * mutex again to decrement n_waiting, a thread holding the mutex + * still sees a positive n_waiting value and may call + * ReleaseSemaphore and decrement n_waiting. + * This will cause n_waiting to be decremented twice. + * This is benign, though, because ReleaseSemaphore will also have + * been called, leaving the semaphore state positive. We may + * thus end up with semaphore in state 1, and n_waiting == -1, and + * the next time someone calls _cond_wait(), that thread will + * pass right through, decrementing the semaphore state and + * incrementing n_waiting, thus correcting the extra _cond_signal. + */ + return r == WAIT_TIMEOUT; +} + +__inline static void _cond_signal(COND_T *cond) { + /* NOTE: This must be called with the mutex held */ + if (cond->n_waiting > 0) { + if (!ReleaseSemaphore(cond->sem, 1, NULL)) + Py_FatalError("ReleaseSemaphore() failed"); + --cond->n_waiting; + } +} + + +#define COND_INIT(cond) \ + _cond_init(&(cond)) +#define COND_FINI(cond) \ + _cond_fini(&(cond)) +#define COND_SIGNAL(cond) \ + _cond_signal(&(cond)) +#define COND_WAIT(cond, mut) \ + _cond_wait(&(cond), &(mut)) +#define COND_TIMED_WAIT(cond, mut, us, timeout_result) do { \ + (timeout_result) = _cond_timed_wait(&(cond), &(mut), us); \ +} while (0) + +#ifndef COND_T_ONLY + +/* an augmented semaphore, where part of the countint is done in user space */ typedef struct NRMUTEX { - LONG owned ; - DWORD thread_id ; - HANDLE hevent ; + COND_T cond; + MUTEX_T mutex; + int locked; } NRMUTEX, *PNRMUTEX ; BOOL InitializeNonRecursiveMutex(PNRMUTEX mutex) { - mutex->owned = -1 ; /* No threads have entered NonRecursiveMutex */ - mutex->thread_id = 0 ; - mutex->hevent = CreateEvent(NULL, FALSE, FALSE, NULL) ; - return mutex->hevent != NULL ; /* TRUE if the mutex is created */ + COND_INIT(mutex->cond); + MUTEX_INIT(mutex->mutex); + mutex->locked = 0; + return TRUE; } VOID DeleteNonRecursiveMutex(PNRMUTEX mutex) { /* No in-use check */ - CloseHandle(mutex->hevent) ; - mutex->hevent = NULL ; /* Just in case */ + COND_FINI(mutex->cond); + MUTEX_FINI(mutex->mutex); } DWORD EnterNonRecursiveMutex(PNRMUTEX mutex, DWORD milliseconds) { - /* Assume that the thread waits successfully */ - DWORD ret ; - - /* InterlockedIncrement(&mutex->owned) == 0 means that no thread currently owns the mutex */ - if (milliseconds == 0) + DWORD result; + MUTEX_LOCK(mutex->mutex); + while(mutex->locked) { - if (InterlockedCompareExchange(&mutex->owned, 0, -1) != -1) - return WAIT_TIMEOUT ; - ret = WAIT_OBJECT_0 ; + if (milliseconds != INFINITE) { + /* can't loop here without adjusting the timeout. + * just try once and possibly timeout early + */ + int dummy; + COND_TIMED_WAIT(mutex->cond, mutex->mutex, + milliseconds*1000, + dummy); + break; + } else + COND_WAIT(mutex->cond, mutex->mutex); } - else - ret = InterlockedIncrement(&mutex->owned) ? - /* Some thread owns the mutex, let's wait... */ - WaitForSingleObject(mutex->hevent, milliseconds) : WAIT_OBJECT_0 ; - - mutex->thread_id = GetCurrentThreadId() ; /* We own it */ - return ret ; + if (!mutex->locked) + { + mutex->locked = 1; + result = WAIT_OBJECT_0; + } else + result = WAIT_TIMEOUT; + MUTEX_UNLOCK(mutex->mutex); + return result; } BOOL LeaveNonRecursiveMutex(PNRMUTEX mutex) { - /* We don't own the mutex */ - mutex->thread_id = 0 ; - return - InterlockedDecrement(&mutex->owned) < 0 || - SetEvent(mutex->hevent) ; /* Other threads are waiting, wake one on them up */ + MUTEX_LOCK(mutex->mutex); + mutex->locked = 0; + COND_SIGNAL(mutex->cond); + MUTEX_UNLOCK(mutex->mutex); + return TRUE; } PNRMUTEX @@ -386,5 +515,5 @@ void PyThread_ReInitTLS(void) {} - +#endif /* defined COND_T_ONLY */ #endif