# HG changeset patch # Parent 74d3dc78f0db8b03aef694462aaff419b2c01a21 Make lock objects support timeouts properly diff -r 74d3dc78f0db Python/thread_nt.h --- a/Python/thread_nt.h Mon Mar 21 13:26:24 2011 +0100 +++ b/Python/thread_nt.h Tue Mar 22 12:39:27 2011 +0000 @@ -11,7 +11,7 @@ typedef struct NRMUTEX { LONG owned ; - DWORD thread_id ; + LONG timeouts ; HANDLE hevent ; } NRMUTEX, *PNRMUTEX ; @@ -20,7 +20,7 @@ InitializeNonRecursiveMutex(PNRMUTEX mutex) { mutex->owned = -1 ; /* No threads have entered NonRecursiveMutex */ - mutex->thread_id = 0 ; + mutex->timeouts = 0; mutex->hevent = CreateEvent(NULL, FALSE, FALSE, NULL) ; return mutex->hevent != NULL ; /* TRUE if the mutex is created */ } @@ -36,30 +36,64 @@ DWORD EnterNonRecursiveMutex(PNRMUTEX mutex, DWORD milliseconds) { - /* Assume that the thread waits successfully */ DWORD ret ; - /* InterlockedIncrement(&mutex->owned) == 0 means that no thread currently owns the mutex */ + /* mutex->owned - mutex->timeouts is an estimate of the number + threads currently waiting for the mutex. -1 means unlocked and + 0 means locked but no waiters. */ if (milliseconds == 0) { - if (InterlockedCompareExchange(&mutex->owned, 0, -1) != -1) + if (InterlockedCompareExchange(&mutex->owned, 0, -1) == -1) { + return WAIT_OBJECT_0 ; + } else if (mutex->timeouts == 0) { /* harmless race */ + /* If the test above fails because of a race then we fall + through to the safe slow path. If it succeeds because + of a race then there must be contention (or at least + very recent contention) for the lock, so timing out is + reasonable. */ return WAIT_TIMEOUT ; - ret = WAIT_OBJECT_0 ; + } + /* fall through to slow path */ } - else - ret = InterlockedIncrement(&mutex->owned) ? - /* Some thread owns the mutex, let's wait... */ - WaitForSingleObject(mutex->hevent, milliseconds) : WAIT_OBJECT_0 ; - mutex->thread_id = GetCurrentThreadId() ; /* We own it */ - return ret ; + if (InterlockedIncrement(&mutex->owned) == 0) + return WAIT_OBJECT_0 ; + + /* slow path */ + ret = WaitForSingleObject(mutex->hevent, milliseconds) ; + if (ret != WAIT_OBJECT_0) + { + /* We increment mutex->timeouts but leave mutex->owned + unchanged. This has the effect of blocking the fast + path. + + XXX We should really worry about mutex->owned and + mutex->timeouts overflowing -- might happen if a thread + holds the lock for a *very* long time */ + InterlockedIncrement(&mutex->timeouts) ; + return ret ; + } + + /* unblock the fast path */ + if (mutex->timeouts != 0) /* harmless race */ + { + /* If mutex->timeouts is non-zero then it will prevent use of the + fast path. We attempt to rezero it (adjusting mutex->owned + accordingly) using atomic operations. Since we own the lock the + worst interference we might get from other threads is that they + might increment mutex->timeout without us noticing: but that is + harmless. */ + LONG timeouts = InterlockedExchangeAdd(&mutex->timeouts, 0) ; + InterlockedExchangeAdd(&mutex->timeouts, -timeouts) ; + InterlockedExchangeAdd(&mutex->owned, -timeouts) ; + } + + return WAIT_OBJECT_0 ; } BOOL LeaveNonRecursiveMutex(PNRMUTEX mutex) { - /* We don't own the mutex */ - mutex->thread_id = 0 ; return InterlockedDecrement(&mutex->owned) < 0 || SetEvent(mutex->hevent) ; /* Other threads are waiting, wake one on them up */