diff --git a/Include/pyatomic.h b/Include/pyatomic.h --- a/Include/pyatomic.h +++ b/Include/pyatomic.h @@ -234,9 +234,9 @@ static __inline__ void #endif /* Standardized shortcuts. */ -#define _Py_atomic_store(ATOMIC_VAL, NEW_VAL) \ +#define _Py_atomic_store_strict(ATOMIC_VAL, NEW_VAL) \ _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, _Py_memory_order_seq_cst) -#define _Py_atomic_load(ATOMIC_VAL) \ +#define _Py_atomic_load_strict(ATOMIC_VAL) \ _Py_atomic_load_explicit(ATOMIC_VAL, _Py_memory_order_seq_cst) /* Python-local extensions */ diff --git a/Python/ceval.c b/Python/ceval.c --- a/Python/ceval.c +++ b/Python/ceval.c @@ -223,7 +223,7 @@ PyEval_GetCallStats(PyObject *self) #ifdef WITH_THREAD -#define GIL_REQUEST _Py_atomic_load_relaxed(&gil_drop_request) +#define GIL_REQUEST _Py_atomic_load_strict(&gil_drop_request) #else #define GIL_REQUEST 0 #endif @@ -232,23 +232,23 @@ PyEval_GetCallStats(PyObject *self) 1. We believe this is all right because the eval loop will release the GIL eventually anyway. */ #define COMPUTE_EVAL_BREAKER() \ - _Py_atomic_store_relaxed( \ + _Py_atomic_store_strict( \ &eval_breaker, \ GIL_REQUEST | \ - _Py_atomic_load_relaxed(&pendingcalls_to_do) | \ + _Py_atomic_load_strict(&pendingcalls_to_do) | \ pending_async_exc) #ifdef WITH_THREAD #define SET_GIL_DROP_REQUEST() \ do { \ - _Py_atomic_store_relaxed(&gil_drop_request, 1); \ - _Py_atomic_store_relaxed(&eval_breaker, 1); \ + _Py_atomic_store_strict(&gil_drop_request, 1); \ + _Py_atomic_store_strict(&eval_breaker, 1); \ } while (0) #define RESET_GIL_DROP_REQUEST() \ do { \ - _Py_atomic_store_relaxed(&gil_drop_request, 0); \ + _Py_atomic_store_strict(&gil_drop_request, 0); \ COMPUTE_EVAL_BREAKER(); \ } while (0) @@ -257,20 +257,20 @@ PyEval_GetCallStats(PyObject *self) /* Pending calls are only modified under pending_lock */ #define SIGNAL_PENDING_CALLS() \ do { \ - _Py_atomic_store_relaxed(&pendingcalls_to_do, 1); \ - _Py_atomic_store_relaxed(&eval_breaker, 1); \ + _Py_atomic_store_strict(&pendingcalls_to_do, 1); \ + _Py_atomic_store_strict(&eval_breaker, 1); \ } while (0) #define UNSIGNAL_PENDING_CALLS() \ do { \ - _Py_atomic_store_relaxed(&pendingcalls_to_do, 0); \ + _Py_atomic_store_strict(&pendingcalls_to_do, 0); \ COMPUTE_EVAL_BREAKER(); \ } while (0) #define SIGNAL_ASYNC_EXC() \ do { \ pending_async_exc = 1; \ - _Py_atomic_store_relaxed(&eval_breaker, 1); \ + _Py_atomic_store_strict(&eval_breaker, 1); \ } while (0) #define UNSIGNAL_ASYNC_EXC() \ @@ -342,7 +342,7 @@ PyEval_ReleaseLock(void) We therefore avoid PyThreadState_GET() which dumps a fatal error in debug mode. */ - drop_gil((PyThreadState*)_Py_atomic_load_relaxed( + drop_gil((PyThreadState*)_Py_atomic_load_strict( &_PyThreadState_Current)); } @@ -906,7 +906,7 @@ PyEval_EvalFrameEx(PyFrameObject *f, int #define DISPATCH() \ { \ - if (!_Py_atomic_load_relaxed(&eval_breaker)) { \ + if (!_Py_atomic_load_strict(&eval_breaker)) { \ FAST_DISPATCH(); \ } \ continue; \ @@ -1248,7 +1248,7 @@ PyEval_EvalFrameEx(PyFrameObject *f, int async I/O handler); see Py_AddPendingCall() and Py_MakePendingCalls() above. */ - if (_Py_atomic_load_relaxed(&eval_breaker)) { + if (_Py_atomic_load_strict(&eval_breaker)) { if (*next_instr == SETUP_FINALLY) { /* Make the last opcode before a try: finally: block uninterruptible. */ @@ -1257,12 +1257,12 @@ PyEval_EvalFrameEx(PyFrameObject *f, int #ifdef WITH_TSC ticked = 1; #endif - if (_Py_atomic_load_relaxed(&pendingcalls_to_do)) { + if (_Py_atomic_load_strict(&pendingcalls_to_do)) { if (Py_MakePendingCalls() < 0) goto error; } #ifdef WITH_THREAD - if (_Py_atomic_load_relaxed(&gil_drop_request)) { + if (_Py_atomic_load_strict(&gil_drop_request)) { /* Give another thread a chance */ if (PyThreadState_Swap(NULL) != tstate) Py_FatalError("ceval: tstate mix-up");