mirror of
				https://github.com/python/cpython.git
				synced 2025-10-31 21:51:50 +00:00 
			
		
		
		
	bpo-33608: Simplify ceval's DISPATCH by hoisting eval_breaker ahead of time. (gh-12062)
This includes fixes to various _Py_atomic_* macros.
This commit is contained in:
		
							parent
							
								
									b05b711a2c
								
							
						
					
					
						commit
						bda918bf65
					
				
					 2 changed files with 27 additions and 26 deletions
				
			
		|  | @ -58,10 +58,10 @@ typedef struct _Py_atomic_int { | ||||||
|     atomic_thread_fence(ORDER) |     atomic_thread_fence(ORDER) | ||||||
| 
 | 
 | ||||||
| #define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \ | #define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \ | ||||||
|     atomic_store_explicit(&(ATOMIC_VAL)->_value, NEW_VAL, ORDER) |     atomic_store_explicit(&((ATOMIC_VAL)->_value), NEW_VAL, ORDER) | ||||||
| 
 | 
 | ||||||
| #define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \ | #define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \ | ||||||
|     atomic_load_explicit(&(ATOMIC_VAL)->_value, ORDER) |     atomic_load_explicit(&((ATOMIC_VAL)->_value), ORDER) | ||||||
| 
 | 
 | ||||||
| /* Use builtin atomic operations in GCC >= 4.7 */ | /* Use builtin atomic operations in GCC >= 4.7 */ | ||||||
| #elif defined(HAVE_BUILTIN_ATOMIC) | #elif defined(HAVE_BUILTIN_ATOMIC) | ||||||
|  | @ -92,14 +92,14 @@ typedef struct _Py_atomic_int { | ||||||
|     (assert((ORDER) == __ATOMIC_RELAXED                       \ |     (assert((ORDER) == __ATOMIC_RELAXED                       \ | ||||||
|             || (ORDER) == __ATOMIC_SEQ_CST                    \ |             || (ORDER) == __ATOMIC_SEQ_CST                    \ | ||||||
|             || (ORDER) == __ATOMIC_RELEASE),                  \ |             || (ORDER) == __ATOMIC_RELEASE),                  \ | ||||||
|      __atomic_store_n(&(ATOMIC_VAL)->_value, NEW_VAL, ORDER)) |      __atomic_store_n(&((ATOMIC_VAL)->_value), NEW_VAL, ORDER)) | ||||||
| 
 | 
 | ||||||
| #define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER)           \ | #define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER)           \ | ||||||
|     (assert((ORDER) == __ATOMIC_RELAXED                       \ |     (assert((ORDER) == __ATOMIC_RELAXED                       \ | ||||||
|             || (ORDER) == __ATOMIC_SEQ_CST                    \ |             || (ORDER) == __ATOMIC_SEQ_CST                    \ | ||||||
|             || (ORDER) == __ATOMIC_ACQUIRE                    \ |             || (ORDER) == __ATOMIC_ACQUIRE                    \ | ||||||
|             || (ORDER) == __ATOMIC_CONSUME),                  \ |             || (ORDER) == __ATOMIC_CONSUME),                  \ | ||||||
|      __atomic_load_n(&(ATOMIC_VAL)->_value, ORDER)) |      __atomic_load_n(&((ATOMIC_VAL)->_value), ORDER)) | ||||||
| 
 | 
 | ||||||
| /* Only support GCC (for expression statements) and x86 (for simple
 | /* Only support GCC (for expression statements) and x86 (for simple
 | ||||||
|  * atomic semantics) and MSVC x86/x64/ARM */ |  * atomic semantics) and MSVC x86/x64/ARM */ | ||||||
|  | @ -324,7 +324,7 @@ inline intptr_t _Py_atomic_load_64bit(volatile uintptr_t* value, int order) { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| #else | #else | ||||||
| #define _Py_atomic_load_64bit(ATOMIC_VAL, ORDER) *ATOMIC_VAL | #define _Py_atomic_load_64bit(ATOMIC_VAL, ORDER) *(ATOMIC_VAL) | ||||||
| #endif | #endif | ||||||
| 
 | 
 | ||||||
| inline int _Py_atomic_load_32bit(volatile int* value, int order) { | inline int _Py_atomic_load_32bit(volatile int* value, int order) { | ||||||
|  | @ -359,15 +359,15 @@ inline int _Py_atomic_load_32bit(volatile int* value, int order) { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| #define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \ | #define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \ | ||||||
|   if (sizeof(*ATOMIC_VAL._value) == 8) { \ |   if (sizeof((ATOMIC_VAL)->_value) == 8) { \ | ||||||
|     _Py_atomic_store_64bit((volatile long long*)ATOMIC_VAL._value, NEW_VAL, ORDER) } else { \ |     _Py_atomic_store_64bit((volatile long long*)&((ATOMIC_VAL)->_value), NEW_VAL, ORDER) } else { \ | ||||||
|     _Py_atomic_store_32bit((volatile long*)ATOMIC_VAL._value, NEW_VAL, ORDER) } |     _Py_atomic_store_32bit((volatile long*)&((ATOMIC_VAL)->_value), NEW_VAL, ORDER) } | ||||||
| 
 | 
 | ||||||
| #define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \ | #define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \ | ||||||
|   ( \ |   ( \ | ||||||
|     sizeof(*(ATOMIC_VAL._value)) == 8 ? \ |     sizeof((ATOMIC_VAL)->_value) == 8 ? \ | ||||||
|     _Py_atomic_load_64bit((volatile long long*)ATOMIC_VAL._value, ORDER) : \ |     _Py_atomic_load_64bit((volatile long long*)&((ATOMIC_VAL)->_value), ORDER) : \ | ||||||
|     _Py_atomic_load_32bit((volatile long*)ATOMIC_VAL._value, ORDER) \ |     _Py_atomic_load_32bit((volatile long*)&((ATOMIC_VAL)->_value), ORDER) \ | ||||||
|   ) |   ) | ||||||
| #elif defined(_M_ARM) || defined(_M_ARM64) | #elif defined(_M_ARM) || defined(_M_ARM64) | ||||||
| typedef enum _Py_memory_order { | typedef enum _Py_memory_order { | ||||||
|  | @ -391,13 +391,13 @@ typedef struct _Py_atomic_int { | ||||||
| #define _Py_atomic_store_64bit(ATOMIC_VAL, NEW_VAL, ORDER) \ | #define _Py_atomic_store_64bit(ATOMIC_VAL, NEW_VAL, ORDER) \ | ||||||
|     switch (ORDER) { \ |     switch (ORDER) { \ | ||||||
|     case _Py_memory_order_acquire: \ |     case _Py_memory_order_acquire: \ | ||||||
|       _InterlockedExchange64_acq((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \ |       _InterlockedExchange64_acq((__int64 volatile*)&((ATOMIC_VAL)->_value), (__int64)NEW_VAL); \ | ||||||
|       break; \ |       break; \ | ||||||
|     case _Py_memory_order_release: \ |     case _Py_memory_order_release: \ | ||||||
|       _InterlockedExchange64_rel((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \ |       _InterlockedExchange64_rel((__int64 volatile*)&((ATOMIC_VAL)->_value), (__int64)NEW_VAL); \ | ||||||
|       break; \ |       break; \ | ||||||
|     default: \ |     default: \ | ||||||
|       _InterlockedExchange64((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \ |       _InterlockedExchange64((__int64 volatile*)&((ATOMIC_VAL)->_value), (__int64)NEW_VAL); \ | ||||||
|       break; \ |       break; \ | ||||||
|   } |   } | ||||||
| #else | #else | ||||||
|  | @ -407,13 +407,13 @@ typedef struct _Py_atomic_int { | ||||||
| #define _Py_atomic_store_32bit(ATOMIC_VAL, NEW_VAL, ORDER) \ | #define _Py_atomic_store_32bit(ATOMIC_VAL, NEW_VAL, ORDER) \ | ||||||
|   switch (ORDER) { \ |   switch (ORDER) { \ | ||||||
|   case _Py_memory_order_acquire: \ |   case _Py_memory_order_acquire: \ | ||||||
|     _InterlockedExchange_acq((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \ |     _InterlockedExchange_acq((volatile long*)&((ATOMIC_VAL)->_value), (int)NEW_VAL); \ | ||||||
|     break; \ |     break; \ | ||||||
|   case _Py_memory_order_release: \ |   case _Py_memory_order_release: \ | ||||||
|     _InterlockedExchange_rel((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \ |     _InterlockedExchange_rel((volatile long*)&((ATOMIC_VAL)->_value), (int)NEW_VAL); \ | ||||||
|     break; \ |     break; \ | ||||||
|   default: \ |   default: \ | ||||||
|     _InterlockedExchange((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \ |     _InterlockedExchange((volatile long*)&((ATOMIC_VAL)->_value), (int)NEW_VAL); \ | ||||||
|     break; \ |     break; \ | ||||||
|   } |   } | ||||||
| 
 | 
 | ||||||
|  | @ -454,7 +454,7 @@ inline intptr_t _Py_atomic_load_64bit(volatile uintptr_t* value, int order) { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| #else | #else | ||||||
| #define _Py_atomic_load_64bit(ATOMIC_VAL, ORDER) *ATOMIC_VAL | #define _Py_atomic_load_64bit(ATOMIC_VAL, ORDER) *(ATOMIC_VAL) | ||||||
| #endif | #endif | ||||||
| 
 | 
 | ||||||
| inline int _Py_atomic_load_32bit(volatile int* value, int order) { | inline int _Py_atomic_load_32bit(volatile int* value, int order) { | ||||||
|  | @ -489,15 +489,15 @@ inline int _Py_atomic_load_32bit(volatile int* value, int order) { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| #define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \ | #define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \ | ||||||
|   if (sizeof(*ATOMIC_VAL._value) == 8) { \ |   if (sizeof((ATOMIC_VAL)->_value) == 8) { \ | ||||||
|     _Py_atomic_store_64bit(ATOMIC_VAL._value, NEW_VAL, ORDER) } else { \ |     _Py_atomic_store_64bit(&((ATOMIC_VAL)->_value), NEW_VAL, ORDER) } else { \ | ||||||
|     _Py_atomic_store_32bit(ATOMIC_VAL._value, NEW_VAL, ORDER) } |     _Py_atomic_store_32bit(&((ATOMIC_VAL)->_value), NEW_VAL, ORDER) } | ||||||
| 
 | 
 | ||||||
| #define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \ | #define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \ | ||||||
|   ( \ |   ( \ | ||||||
|     sizeof(*(ATOMIC_VAL._value)) == 8 ? \ |     sizeof((ATOMIC_VAL)->_value) == 8 ? \ | ||||||
|     _Py_atomic_load_64bit(ATOMIC_VAL._value, ORDER) : \ |     _Py_atomic_load_64bit(&((ATOMIC_VAL)->_value), ORDER) : \ | ||||||
|     _Py_atomic_load_32bit(ATOMIC_VAL._value, ORDER) \ |     _Py_atomic_load_32bit(&((ATOMIC_VAL)->_value), ORDER) \ | ||||||
|   ) |   ) | ||||||
| #endif | #endif | ||||||
| #else  /* !gcc x86  !_msc_ver */ | #else  /* !gcc x86  !_msc_ver */ | ||||||
|  |  | ||||||
|  | @ -637,6 +637,7 @@ _PyEval_EvalFrameDefault(PyFrameObject *f, int throwflag) | ||||||
|     PyObject **fastlocals, **freevars; |     PyObject **fastlocals, **freevars; | ||||||
|     PyObject *retval = NULL;            /* Return value */ |     PyObject *retval = NULL;            /* Return value */ | ||||||
|     PyThreadState *tstate = _PyThreadState_GET(); |     PyThreadState *tstate = _PyThreadState_GET(); | ||||||
|  |     _Py_atomic_int *eval_breaker = &tstate->interp->ceval.eval_breaker; | ||||||
|     PyCodeObject *co; |     PyCodeObject *co; | ||||||
| 
 | 
 | ||||||
|     /* when tracing we set things up so that
 |     /* when tracing we set things up so that
 | ||||||
|  | @ -722,7 +723,7 @@ _PyEval_EvalFrameDefault(PyFrameObject *f, int throwflag) | ||||||
| 
 | 
 | ||||||
| #define DISPATCH() \ | #define DISPATCH() \ | ||||||
|     { \ |     { \ | ||||||
|         if (!_Py_atomic_load_relaxed(&tstate->interp->ceval.eval_breaker)) { \ |         if (!_Py_atomic_load_relaxed(eval_breaker)) { \ | ||||||
|                     FAST_DISPATCH(); \ |                     FAST_DISPATCH(); \ | ||||||
|         } \ |         } \ | ||||||
|         continue; \ |         continue; \ | ||||||
|  | @ -1024,7 +1025,7 @@ _PyEval_EvalFrameDefault(PyFrameObject *f, int throwflag) | ||||||
|            async I/O handler); see Py_AddPendingCall() and |            async I/O handler); see Py_AddPendingCall() and | ||||||
|            Py_MakePendingCalls() above. */ |            Py_MakePendingCalls() above. */ | ||||||
| 
 | 
 | ||||||
|         if (_Py_atomic_load_relaxed(&(tstate->interp->ceval.eval_breaker))) { |         if (_Py_atomic_load_relaxed(eval_breaker)) { | ||||||
|             opcode = _Py_OPCODE(*next_instr); |             opcode = _Py_OPCODE(*next_instr); | ||||||
|             if (opcode == SETUP_FINALLY || |             if (opcode == SETUP_FINALLY || | ||||||
|                 opcode == SETUP_WITH || |                 opcode == SETUP_WITH || | ||||||
|  |  | ||||||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue
	
	 Eric Snow
						Eric Snow