mirror of
				https://github.com/python/cpython.git
				synced 2025-10-31 13:41:24 +00:00 
			
		
		
		
	GH-93678: refactor compiler so that optimizer does not need the assembler and compiler structs (GH-93842)
This commit is contained in:
		
							parent
							
								
									77c839c98f
								
							
						
					
					
						commit
						889772fb56
					
				
					 2 changed files with 171 additions and 161 deletions
				
			
		|  | @ -0,0 +1 @@ | ||||||
|  | Refactor compiler optimisation code so that it no longer needs the ``struct assembler`` and ``struct compiler`` passed around. Instead, each function takes the CFG and other data that it actually needs. This will make it possible to test this code directly. | ||||||
							
								
								
									
										331
									
								
								Python/compile.c
									
										
									
									
									
								
							
							
						
						
									
										331
									
								
								Python/compile.c
									
										
									
									
									
								
							|  | @ -854,20 +854,26 @@ compiler_set_qualname(struct compiler *c) | ||||||
| /* Allocate a new block and return a pointer to it.
 | /* Allocate a new block and return a pointer to it.
 | ||||||
|    Returns NULL on error. |    Returns NULL on error. | ||||||
| */ | */ | ||||||
| 
 |  | ||||||
| static basicblock * | static basicblock * | ||||||
| compiler_new_block(struct compiler *c) | new_basicblock() | ||||||
| { | { | ||||||
|     basicblock *b; |     basicblock *b = (basicblock *)PyObject_Calloc(1, sizeof(basicblock)); | ||||||
|     struct compiler_unit *u; |  | ||||||
| 
 |  | ||||||
|     u = c->u; |  | ||||||
|     b = (basicblock *)PyObject_Calloc(1, sizeof(basicblock)); |  | ||||||
|     if (b == NULL) { |     if (b == NULL) { | ||||||
|         PyErr_NoMemory(); |         PyErr_NoMemory(); | ||||||
|         return NULL; |         return NULL; | ||||||
|     } |     } | ||||||
|  |     return b; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | static basicblock * | ||||||
|  | compiler_new_block(struct compiler *c) | ||||||
|  | { | ||||||
|  |     basicblock *b = new_basicblock(); | ||||||
|  |     if (b == NULL) { | ||||||
|  |         return NULL; | ||||||
|  |     } | ||||||
|     /* Extend the singly linked list of blocks with new block. */ |     /* Extend the singly linked list of blocks with new block. */ | ||||||
|  |     struct compiler_unit *u = c->u; | ||||||
|     b->b_list = u->u_blocks; |     b->b_list = u->u_blocks; | ||||||
|     u->u_blocks = b; |     u->u_blocks = b; | ||||||
|     return b; |     return b; | ||||||
|  | @ -883,13 +889,25 @@ compiler_use_next_block(struct compiler *c, basicblock *block) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static basicblock * | static basicblock * | ||||||
| compiler_copy_block(struct compiler *c, basicblock *block) | basicblock_new_b_list_successor(basicblock *prev) | ||||||
|  | { | ||||||
|  |     basicblock *result = new_basicblock(); | ||||||
|  |     if (result == NULL) { | ||||||
|  |         return NULL; | ||||||
|  |     } | ||||||
|  |     result->b_list = prev->b_list; | ||||||
|  |     prev->b_list = result; | ||||||
|  |     return result; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | static basicblock * | ||||||
|  | copy_basicblock(basicblock *block) | ||||||
| { | { | ||||||
|     /* Cannot copy a block if it has a fallthrough, since
 |     /* Cannot copy a block if it has a fallthrough, since
 | ||||||
|      * a block can only have one fallthrough predecessor. |      * a block can only have one fallthrough predecessor. | ||||||
|      */ |      */ | ||||||
|     assert(BB_NO_FALLTHROUGH(block)); |     assert(BB_NO_FALLTHROUGH(block)); | ||||||
|     basicblock *result = compiler_new_block(c); |     basicblock *result = basicblock_new_b_list_successor(block); | ||||||
|     if (result == NULL) { |     if (result == NULL) { | ||||||
|         return NULL; |         return NULL; | ||||||
|     } |     } | ||||||
|  | @ -1340,8 +1358,9 @@ compiler_add_o(PyObject *dict, PyObject *o) | ||||||
| 
 | 
 | ||||||
| // Merge const *o* recursively and return constant key object.
 | // Merge const *o* recursively and return constant key object.
 | ||||||
| static PyObject* | static PyObject* | ||||||
| merge_consts_recursive(struct compiler *c, PyObject *o) | merge_consts_recursive(PyObject *const_cache, PyObject *o) | ||||||
| { | { | ||||||
|  |     assert(PyDict_CheckExact(const_cache)); | ||||||
|     // None and Ellipsis are singleton, and key is the singleton.
 |     // None and Ellipsis are singleton, and key is the singleton.
 | ||||||
|     // No need to merge object and key.
 |     // No need to merge object and key.
 | ||||||
|     if (o == Py_None || o == Py_Ellipsis) { |     if (o == Py_None || o == Py_Ellipsis) { | ||||||
|  | @ -1355,22 +1374,22 @@ merge_consts_recursive(struct compiler *c, PyObject *o) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     // t is borrowed reference
 |     // t is borrowed reference
 | ||||||
|     PyObject *t = PyDict_SetDefault(c->c_const_cache, key, key); |     PyObject *t = PyDict_SetDefault(const_cache, key, key); | ||||||
|     if (t != key) { |     if (t != key) { | ||||||
|         // o is registered in c_const_cache.  Just use it.
 |         // o is registered in const_cache.  Just use it.
 | ||||||
|         Py_XINCREF(t); |         Py_XINCREF(t); | ||||||
|         Py_DECREF(key); |         Py_DECREF(key); | ||||||
|         return t; |         return t; | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     // We registered o in c_const_cache.
 |     // We registered o in const_cache.
 | ||||||
|     // When o is a tuple or frozenset, we want to merge its
 |     // When o is a tuple or frozenset, we want to merge its
 | ||||||
|     // items too.
 |     // items too.
 | ||||||
|     if (PyTuple_CheckExact(o)) { |     if (PyTuple_CheckExact(o)) { | ||||||
|         Py_ssize_t len = PyTuple_GET_SIZE(o); |         Py_ssize_t len = PyTuple_GET_SIZE(o); | ||||||
|         for (Py_ssize_t i = 0; i < len; i++) { |         for (Py_ssize_t i = 0; i < len; i++) { | ||||||
|             PyObject *item = PyTuple_GET_ITEM(o, i); |             PyObject *item = PyTuple_GET_ITEM(o, i); | ||||||
|             PyObject *u = merge_consts_recursive(c, item); |             PyObject *u = merge_consts_recursive(const_cache, item); | ||||||
|             if (u == NULL) { |             if (u == NULL) { | ||||||
|                 Py_DECREF(key); |                 Py_DECREF(key); | ||||||
|                 return NULL; |                 return NULL; | ||||||
|  | @ -1413,7 +1432,7 @@ merge_consts_recursive(struct compiler *c, PyObject *o) | ||||||
|         PyObject *item; |         PyObject *item; | ||||||
|         Py_hash_t hash; |         Py_hash_t hash; | ||||||
|         while (_PySet_NextEntry(o, &pos, &item, &hash)) { |         while (_PySet_NextEntry(o, &pos, &item, &hash)) { | ||||||
|             PyObject *k = merge_consts_recursive(c, item); |             PyObject *k = merge_consts_recursive(const_cache, item); | ||||||
|             if (k == NULL) { |             if (k == NULL) { | ||||||
|                 Py_DECREF(tuple); |                 Py_DECREF(tuple); | ||||||
|                 Py_DECREF(key); |                 Py_DECREF(key); | ||||||
|  | @ -1451,7 +1470,7 @@ merge_consts_recursive(struct compiler *c, PyObject *o) | ||||||
| static Py_ssize_t | static Py_ssize_t | ||||||
| compiler_add_const(struct compiler *c, PyObject *o) | compiler_add_const(struct compiler *c, PyObject *o) | ||||||
| { | { | ||||||
|     PyObject *key = merge_consts_recursive(c, o); |     PyObject *key = merge_consts_recursive(c->c_const_cache, o); | ||||||
|     if (key == NULL) { |     if (key == NULL) { | ||||||
|         return -1; |         return -1; | ||||||
|     } |     } | ||||||
|  | @ -6994,34 +7013,25 @@ compiler_match(struct compiler *c, stmt_ty s) | ||||||
| #undef WILDCARD_CHECK | #undef WILDCARD_CHECK | ||||||
| #undef WILDCARD_STAR_CHECK | #undef WILDCARD_STAR_CHECK | ||||||
| 
 | 
 | ||||||
|  | 
 | ||||||
| /* End of the compiler section, beginning of the assembler section */ | /* End of the compiler section, beginning of the assembler section */ | ||||||
| 
 | 
 | ||||||
| /* do depth-first search of basic block graph, starting with block.
 |  | ||||||
|    post records the block indices in post-order. |  | ||||||
| 
 |  | ||||||
|    XXX must handle implicit jumps from one block to next |  | ||||||
| */ |  | ||||||
| 
 |  | ||||||
| 
 | 
 | ||||||
| struct assembler { | struct assembler { | ||||||
|     PyObject *a_bytecode;  /* bytes containing bytecode */ |     PyObject *a_bytecode;  /* bytes containing bytecode */ | ||||||
|     PyObject *a_except_table;  /* bytes containing exception table */ |  | ||||||
|     basicblock *a_entry; |  | ||||||
|     int a_offset;              /* offset into bytecode */ |     int a_offset;              /* offset into bytecode */ | ||||||
|  |     PyObject *a_except_table;  /* bytes containing exception table */ | ||||||
|     int a_except_table_off;    /* offset into exception table */ |     int a_except_table_off;    /* offset into exception table */ | ||||||
|     int a_lineno;          /* lineno of last emitted instruction */ |  | ||||||
|     int a_end_lineno;      /* end_lineno of last emitted instruction */ |  | ||||||
|     int a_lineno_start;    /* bytecode start offset of current lineno */ |  | ||||||
|     int a_end_lineno_start; /* bytecode start offset of current end_lineno */ |  | ||||||
|     /* Location Info */ |     /* Location Info */ | ||||||
|  |     int a_lineno;          /* lineno of last emitted instruction */ | ||||||
|     PyObject* a_linetable; /* bytes containing location info */ |     PyObject* a_linetable; /* bytes containing location info */ | ||||||
|     int a_location_off;    /* offset of last written location info frame */ |     int a_location_off;    /* offset of last written location info frame */ | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| static basicblock** | static basicblock** | ||||||
| make_cfg_traversal_stack(basicblock *entry) { | make_cfg_traversal_stack(basicblock *entryblock) { | ||||||
|     int nblocks = 0; |     int nblocks = 0; | ||||||
|     for (basicblock *b = entry; b != NULL; b = b->b_next) { |     for (basicblock *b = entryblock; b != NULL; b = b->b_next) { | ||||||
|         b->b_visited = 0; |         b->b_visited = 0; | ||||||
|         nblocks++; |         nblocks++; | ||||||
|     } |     } | ||||||
|  | @ -7047,22 +7057,22 @@ stackdepth_push(basicblock ***sp, basicblock *b, int depth) | ||||||
|  * cycles in the flow graph have no net effect on the stack depth. |  * cycles in the flow graph have no net effect on the stack depth. | ||||||
|  */ |  */ | ||||||
| static int | static int | ||||||
| stackdepth(struct compiler *c, basicblock *entry) | stackdepth(basicblock *entryblock, int code_flags) | ||||||
| { | { | ||||||
|     for (basicblock *b = entry; b != NULL; b = b->b_next) { |     for (basicblock *b = entryblock; b != NULL; b = b->b_next) { | ||||||
|         b->b_startdepth = INT_MIN; |         b->b_startdepth = INT_MIN; | ||||||
|     } |     } | ||||||
|     basicblock **stack = make_cfg_traversal_stack(entry); |     basicblock **stack = make_cfg_traversal_stack(entryblock); | ||||||
|     if (!stack) { |     if (!stack) { | ||||||
|         return -1; |         return -1; | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     int maxdepth = 0; |     int maxdepth = 0; | ||||||
|     basicblock **sp = stack; |     basicblock **sp = stack; | ||||||
|     if (c->u->u_ste->ste_generator || c->u->u_ste->ste_coroutine) { |     if (code_flags & (CO_GENERATOR | CO_COROUTINE | CO_ASYNC_GENERATOR)) { | ||||||
|         stackdepth_push(&sp, entry, 1); |         stackdepth_push(&sp, entryblock, 1); | ||||||
|     } else { |     } else { | ||||||
|         stackdepth_push(&sp, entry, 0); |         stackdepth_push(&sp, entryblock, 0); | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     while (sp != stack) { |     while (sp != stack) { | ||||||
|  | @ -7117,11 +7127,10 @@ stackdepth(struct compiler *c, basicblock *entry) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static int | static int | ||||||
| assemble_init(struct assembler *a, int nblocks, int firstlineno) | assemble_init(struct assembler *a, int firstlineno) | ||||||
| { | { | ||||||
|     memset(a, 0, sizeof(struct assembler)); |     memset(a, 0, sizeof(struct assembler)); | ||||||
|     a->a_lineno = firstlineno; |     a->a_lineno = firstlineno; | ||||||
|     a->a_end_lineno = firstlineno; |  | ||||||
|     a->a_linetable = NULL; |     a->a_linetable = NULL; | ||||||
|     a->a_location_off = 0; |     a->a_location_off = 0; | ||||||
|     a->a_except_table = NULL; |     a->a_except_table = NULL; | ||||||
|  | @ -7137,10 +7146,6 @@ assemble_init(struct assembler *a, int nblocks, int firstlineno) | ||||||
|     if (a->a_except_table == NULL) { |     if (a->a_except_table == NULL) { | ||||||
|         goto error; |         goto error; | ||||||
|     } |     } | ||||||
|     if ((size_t)nblocks > SIZE_MAX / sizeof(basicblock *)) { |  | ||||||
|         PyErr_NoMemory(); |  | ||||||
|         goto error; |  | ||||||
|     } |  | ||||||
|     return 1; |     return 1; | ||||||
| error: | error: | ||||||
|     Py_XDECREF(a->a_bytecode); |     Py_XDECREF(a->a_bytecode); | ||||||
|  | @ -7216,8 +7221,8 @@ copy_except_stack(ExceptStack *stack) { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static int | static int | ||||||
| label_exception_targets(basicblock *entry) { | label_exception_targets(basicblock *entryblock) { | ||||||
|     basicblock **todo_stack = make_cfg_traversal_stack(entry); |     basicblock **todo_stack = make_cfg_traversal_stack(entryblock); | ||||||
|     if (todo_stack == NULL) { |     if (todo_stack == NULL) { | ||||||
|         return -1; |         return -1; | ||||||
|     } |     } | ||||||
|  | @ -7228,9 +7233,9 @@ label_exception_targets(basicblock *entry) { | ||||||
|         return -1; |         return -1; | ||||||
|     } |     } | ||||||
|     except_stack->depth = 0; |     except_stack->depth = 0; | ||||||
|     todo_stack[0] = entry; |     todo_stack[0] = entryblock; | ||||||
|     entry->b_visited = 1; |     entryblock->b_visited = 1; | ||||||
|     entry->b_exceptstack = except_stack; |     entryblock->b_exceptstack = except_stack; | ||||||
|     basicblock **todo = &todo_stack[1]; |     basicblock **todo = &todo_stack[1]; | ||||||
|     basicblock *handler = NULL; |     basicblock *handler = NULL; | ||||||
|     while (todo > todo_stack) { |     while (todo > todo_stack) { | ||||||
|  | @ -7295,7 +7300,7 @@ label_exception_targets(basicblock *entry) { | ||||||
|         } |         } | ||||||
|     } |     } | ||||||
| #ifdef Py_DEBUG | #ifdef Py_DEBUG | ||||||
|     for (basicblock *b = entry; b != NULL; b = b->b_next) { |     for (basicblock *b = entryblock; b != NULL; b = b->b_next) { | ||||||
|         assert(b->b_exceptstack == NULL); |         assert(b->b_exceptstack == NULL); | ||||||
|     } |     } | ||||||
| #endif | #endif | ||||||
|  | @ -7308,15 +7313,15 @@ label_exception_targets(basicblock *entry) { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static int | static int | ||||||
| mark_warm(basicblock *entry) { | mark_warm(basicblock *entryblock) { | ||||||
|     basicblock **stack = make_cfg_traversal_stack(entry); |     basicblock **stack = make_cfg_traversal_stack(entryblock); | ||||||
|     if (stack == NULL) { |     if (stack == NULL) { | ||||||
|         return -1; |         return -1; | ||||||
|     } |     } | ||||||
|     basicblock **sp = stack; |     basicblock **sp = stack; | ||||||
| 
 | 
 | ||||||
|     *sp++ = entry; |     *sp++ = entryblock; | ||||||
|     entry->b_visited = 1; |     entryblock->b_visited = 1; | ||||||
|     while (sp > stack) { |     while (sp > stack) { | ||||||
|         basicblock *b = *(--sp); |         basicblock *b = *(--sp); | ||||||
|         assert(!b->b_except_predecessors); |         assert(!b->b_except_predecessors); | ||||||
|  | @ -7339,21 +7344,21 @@ mark_warm(basicblock *entry) { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static int | static int | ||||||
| mark_cold(basicblock *entry) { | mark_cold(basicblock *entryblock) { | ||||||
|     for (basicblock *b = entry; b != NULL; b = b->b_next) { |     for (basicblock *b = entryblock; b != NULL; b = b->b_next) { | ||||||
|         assert(!b->b_cold && !b->b_warm); |         assert(!b->b_cold && !b->b_warm); | ||||||
|     } |     } | ||||||
|     if (mark_warm(entry) < 0) { |     if (mark_warm(entryblock) < 0) { | ||||||
|         return -1; |         return -1; | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     basicblock **stack = make_cfg_traversal_stack(entry); |     basicblock **stack = make_cfg_traversal_stack(entryblock); | ||||||
|     if (stack == NULL) { |     if (stack == NULL) { | ||||||
|         return -1; |         return -1; | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     basicblock **sp = stack; |     basicblock **sp = stack; | ||||||
|     for (basicblock *b = entry; b != NULL; b = b->b_next) { |     for (basicblock *b = entryblock; b != NULL; b = b->b_next) { | ||||||
|         if (b->b_except_predecessors) { |         if (b->b_except_predecessors) { | ||||||
|             assert(b->b_except_predecessors == b->b_predecessors); |             assert(b->b_except_predecessors == b->b_predecessors); | ||||||
|             assert(!b->b_warm); |             assert(!b->b_warm); | ||||||
|  | @ -7389,20 +7394,20 @@ mark_cold(basicblock *entry) { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static int | static int | ||||||
| push_cold_blocks_to_end(struct compiler *c, basicblock *entry, int code_flags) { | push_cold_blocks_to_end(basicblock *entryblock, int code_flags) { | ||||||
|     if (entry->b_next == NULL) { |     if (entryblock->b_next == NULL) { | ||||||
|         /* single basicblock, no need to reorder */ |         /* single basicblock, no need to reorder */ | ||||||
|         return 0; |         return 0; | ||||||
|     } |     } | ||||||
|     if (mark_cold(entry) < 0) { |     if (mark_cold(entryblock) < 0) { | ||||||
|         return -1; |         return -1; | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /* If we have a cold block with fallthrough to a warm block, add */ |     /* If we have a cold block with fallthrough to a warm block, add */ | ||||||
|     /* an explicit jump instead of fallthrough */ |     /* an explicit jump instead of fallthrough */ | ||||||
|     for (basicblock *b = entry; b != NULL; b = b->b_next) { |     for (basicblock *b = entryblock; b != NULL; b = b->b_next) { | ||||||
|         if (b->b_cold && BB_HAS_FALLTHROUGH(b) && b->b_next && b->b_next->b_warm) { |         if (b->b_cold && BB_HAS_FALLTHROUGH(b) && b->b_next && b->b_next->b_warm) { | ||||||
|             basicblock *explicit_jump = compiler_new_block(c); |             basicblock *explicit_jump = basicblock_new_b_list_successor(b); | ||||||
|             if (explicit_jump == NULL) { |             if (explicit_jump == NULL) { | ||||||
|                 return -1; |                 return -1; | ||||||
|             } |             } | ||||||
|  | @ -7414,11 +7419,11 @@ push_cold_blocks_to_end(struct compiler *c, basicblock *entry, int code_flags) { | ||||||
|         } |         } | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     assert(!entry->b_cold);  /* First block can't be cold */ |     assert(!entryblock->b_cold);  /* First block can't be cold */ | ||||||
|     basicblock *cold_blocks = NULL; |     basicblock *cold_blocks = NULL; | ||||||
|     basicblock *cold_blocks_tail = NULL; |     basicblock *cold_blocks_tail = NULL; | ||||||
| 
 | 
 | ||||||
|     basicblock *b = entry; |     basicblock *b = entryblock; | ||||||
|     while(b->b_next) { |     while(b->b_next) { | ||||||
|         assert(!b->b_cold); |         assert(!b->b_cold); | ||||||
|         while (b->b_next && !b->b_next->b_cold) { |         while (b->b_next && !b->b_next->b_cold) { | ||||||
|  | @ -7457,8 +7462,8 @@ push_cold_blocks_to_end(struct compiler *c, basicblock *entry, int code_flags) { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static void | static void | ||||||
| convert_exception_handlers_to_nops(basicblock *entry) { | convert_exception_handlers_to_nops(basicblock *entryblock) { | ||||||
|     for (basicblock *b = entry; b != NULL; b = b->b_next) { |     for (basicblock *b = entryblock; b != NULL; b = b->b_next) { | ||||||
|         for (int i = 0; i < b->b_iused; i++) { |         for (int i = 0; i < b->b_iused; i++) { | ||||||
|             struct instr *instr = &b->b_instr[i]; |             struct instr *instr = &b->b_instr[i]; | ||||||
|             if (is_block_push(instr) || instr->i_opcode == POP_BLOCK) { |             if (is_block_push(instr) || instr->i_opcode == POP_BLOCK) { | ||||||
|  | @ -7528,13 +7533,13 @@ assemble_emit_exception_table_entry(struct assembler *a, int start, int end, bas | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static int | static int | ||||||
| assemble_exception_table(struct assembler *a) | assemble_exception_table(struct assembler *a, basicblock *entryblock) | ||||||
| { | { | ||||||
|     basicblock *b; |     basicblock *b; | ||||||
|     int ioffset = 0; |     int ioffset = 0; | ||||||
|     basicblock *handler = NULL; |     basicblock *handler = NULL; | ||||||
|     int start = -1; |     int start = -1; | ||||||
|     for (b = a->a_entry; b != NULL; b = b->b_next) { |     for (b = entryblock; b != NULL; b = b->b_next) { | ||||||
|         ioffset = b->b_offset; |         ioffset = b->b_offset; | ||||||
|         for (int i = 0; i < b->b_iused; i++) { |         for (int i = 0; i < b->b_iused; i++) { | ||||||
|             struct instr *instr = &b->b_instr[i]; |             struct instr *instr = &b->b_instr[i]; | ||||||
|  | @ -7727,12 +7732,12 @@ assemble_emit(struct assembler *a, struct instr *i) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static void | static void | ||||||
| normalize_jumps(struct assembler *a) | normalize_jumps(basicblock *entryblock) | ||||||
| { | { | ||||||
|     for (basicblock *b = a->a_entry; b != NULL; b = b->b_next) { |     for (basicblock *b = entryblock; b != NULL; b = b->b_next) { | ||||||
|         b->b_visited = 0; |         b->b_visited = 0; | ||||||
|     } |     } | ||||||
|     for (basicblock *b = a->a_entry; b != NULL; b = b->b_next) { |     for (basicblock *b = entryblock; b != NULL; b = b->b_next) { | ||||||
|         b->b_visited = 1; |         b->b_visited = 1; | ||||||
|         if (b->b_iused == 0) { |         if (b->b_iused == 0) { | ||||||
|             continue; |             continue; | ||||||
|  | @ -7786,25 +7791,23 @@ normalize_jumps(struct assembler *a) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static void | static void | ||||||
| assemble_jump_offsets(struct assembler *a, struct compiler *c) | assemble_jump_offsets(basicblock *entryblock) | ||||||
| { | { | ||||||
|     basicblock *b; |  | ||||||
|     int bsize, totsize, extended_arg_recompile; |     int bsize, totsize, extended_arg_recompile; | ||||||
|     int i; |  | ||||||
| 
 | 
 | ||||||
|     /* Compute the size of each block and fixup jump args.
 |     /* Compute the size of each block and fixup jump args.
 | ||||||
|        Replace block pointer with position in bytecode. */ |        Replace block pointer with position in bytecode. */ | ||||||
|     do { |     do { | ||||||
|         totsize = 0; |         totsize = 0; | ||||||
|         for (basicblock *b = a->a_entry; b != NULL; b = b->b_next) { |         for (basicblock *b = entryblock; b != NULL; b = b->b_next) { | ||||||
|             bsize = blocksize(b); |             bsize = blocksize(b); | ||||||
|             b->b_offset = totsize; |             b->b_offset = totsize; | ||||||
|             totsize += bsize; |             totsize += bsize; | ||||||
|         } |         } | ||||||
|         extended_arg_recompile = 0; |         extended_arg_recompile = 0; | ||||||
|         for (b = c->u->u_blocks; b != NULL; b = b->b_list) { |         for (basicblock *b = entryblock; b != NULL; b = b->b_next) { | ||||||
|             bsize = b->b_offset; |             bsize = b->b_offset; | ||||||
|             for (i = 0; i < b->b_iused; i++) { |             for (int i = 0; i < b->b_iused; i++) { | ||||||
|                 struct instr *instr = &b->b_instr[i]; |                 struct instr *instr = &b->b_instr[i]; | ||||||
|                 int isize = instr_size(instr); |                 int isize = instr_size(instr); | ||||||
|                 /* Relative jumps are computed relative to
 |                 /* Relative jumps are computed relative to
 | ||||||
|  | @ -7917,17 +7920,17 @@ scan_block_for_local(int target, basicblock *b, bool unsafe_to_start, | ||||||
| #undef MAYBE_PUSH | #undef MAYBE_PUSH | ||||||
| 
 | 
 | ||||||
| static int | static int | ||||||
| add_checks_for_loads_of_unknown_variables(struct assembler *a, | add_checks_for_loads_of_unknown_variables(basicblock *entryblock, | ||||||
|                                           struct compiler *c) |                                           struct compiler *c) | ||||||
| { | { | ||||||
|     basicblock **stack = make_cfg_traversal_stack(a->a_entry); |     basicblock **stack = make_cfg_traversal_stack(entryblock); | ||||||
|     if (stack == NULL) { |     if (stack == NULL) { | ||||||
|         return -1; |         return -1; | ||||||
|     } |     } | ||||||
|     Py_ssize_t nparams = PyList_GET_SIZE(c->u->u_ste->ste_varnames); |     Py_ssize_t nparams = PyList_GET_SIZE(c->u->u_ste->ste_varnames); | ||||||
|     int nlocals = (int)PyDict_GET_SIZE(c->u->u_varnames); |     int nlocals = (int)PyDict_GET_SIZE(c->u->u_varnames); | ||||||
|     for (int target = 0; target < nlocals; target++) { |     for (int target = 0; target < nlocals; target++) { | ||||||
|         for (basicblock *b = a->a_entry; b != NULL; b = b->b_next) { |         for (basicblock *b = entryblock; b != NULL; b = b->b_next) { | ||||||
|             b->b_visited = 0; |             b->b_visited = 0; | ||||||
|         } |         } | ||||||
|         basicblock **stack_top = stack; |         basicblock **stack_top = stack; | ||||||
|  | @ -7937,10 +7940,10 @@ add_checks_for_loads_of_unknown_variables(struct assembler *a, | ||||||
|         // which are the entry block and any DELETE_FAST statements.
 |         // which are the entry block and any DELETE_FAST statements.
 | ||||||
|         if (target >= nparams) { |         if (target >= nparams) { | ||||||
|             // only non-parameter locals start out uninitialized.
 |             // only non-parameter locals start out uninitialized.
 | ||||||
|             *(stack_top++) = a->a_entry; |             *(stack_top++) = entryblock; | ||||||
|             a->a_entry->b_visited = 1; |             entryblock->b_visited = 1; | ||||||
|         } |         } | ||||||
|         for (basicblock *b = a->a_entry; b != NULL; b = b->b_next) { |         for (basicblock *b = entryblock; b != NULL; b = b->b_next) { | ||||||
|             scan_block_for_local(target, b, false, &stack_top); |             scan_block_for_local(target, b, false, &stack_top); | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|  | @ -8034,15 +8037,16 @@ compute_code_flags(struct compiler *c) | ||||||
| // Merge *obj* with constant cache.
 | // Merge *obj* with constant cache.
 | ||||||
| // Unlike merge_consts_recursive(), this function doesn't work recursively.
 | // Unlike merge_consts_recursive(), this function doesn't work recursively.
 | ||||||
| static int | static int | ||||||
| merge_const_one(struct compiler *c, PyObject **obj) | merge_const_one(PyObject *const_cache, PyObject **obj) | ||||||
| { | { | ||||||
|  |     PyDict_CheckExact(const_cache); | ||||||
|     PyObject *key = _PyCode_ConstantKey(*obj); |     PyObject *key = _PyCode_ConstantKey(*obj); | ||||||
|     if (key == NULL) { |     if (key == NULL) { | ||||||
|         return 0; |         return 0; | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     // t is borrowed reference
 |     // t is borrowed reference
 | ||||||
|     PyObject *t = PyDict_SetDefault(c->c_const_cache, key, key); |     PyObject *t = PyDict_SetDefault(const_cache, key, key); | ||||||
|     Py_DECREF(key); |     Py_DECREF(key); | ||||||
|     if (t == NULL) { |     if (t == NULL) { | ||||||
|         return 0; |         return 0; | ||||||
|  | @ -8125,7 +8129,7 @@ makecode(struct compiler *c, struct assembler *a, PyObject *constslist, | ||||||
|     if (!names) { |     if (!names) { | ||||||
|         goto error; |         goto error; | ||||||
|     } |     } | ||||||
|     if (!merge_const_one(c, &names)) { |     if (!merge_const_one(c->c_const_cache, &names)) { | ||||||
|         goto error; |         goto error; | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|  | @ -8133,7 +8137,7 @@ makecode(struct compiler *c, struct assembler *a, PyObject *constslist, | ||||||
|     if (consts == NULL) { |     if (consts == NULL) { | ||||||
|         goto error; |         goto error; | ||||||
|     } |     } | ||||||
|     if (!merge_const_one(c, &consts)) { |     if (!merge_const_one(c->c_const_cache, &consts)) { | ||||||
|         goto error; |         goto error; | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|  | @ -8184,7 +8188,7 @@ makecode(struct compiler *c, struct assembler *a, PyObject *constslist, | ||||||
|         goto error; |         goto error; | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     if (!merge_const_one(c, &localsplusnames)) { |     if (!merge_const_one(c->c_const_cache, &localsplusnames)) { | ||||||
|         goto error; |         goto error; | ||||||
|     } |     } | ||||||
|     con.localsplusnames = localsplusnames; |     con.localsplusnames = localsplusnames; | ||||||
|  | @ -8249,14 +8253,14 @@ static int | ||||||
| normalize_basic_block(basicblock *bb); | normalize_basic_block(basicblock *bb); | ||||||
| 
 | 
 | ||||||
| static int | static int | ||||||
| optimize_cfg(struct compiler *c, struct assembler *a, PyObject *consts); | optimize_cfg(basicblock *entryblock, PyObject *consts, PyObject *const_cache); | ||||||
| 
 | 
 | ||||||
| static int | static int | ||||||
| trim_unused_consts(struct assembler *a, PyObject *consts); | trim_unused_consts(basicblock *entryblock, PyObject *consts); | ||||||
| 
 | 
 | ||||||
| /* Duplicates exit BBs, so that line numbers can be propagated to them */ | /* Duplicates exit BBs, so that line numbers can be propagated to them */ | ||||||
| static int | static int | ||||||
| duplicate_exits_without_lineno(struct compiler *c); | duplicate_exits_without_lineno(basicblock *entryblock); | ||||||
| 
 | 
 | ||||||
| static int | static int | ||||||
| extend_block(basicblock *bb); | extend_block(basicblock *bb); | ||||||
|  | @ -8390,10 +8394,10 @@ insert_prefix_instructions(struct compiler *c, basicblock *entryblock, | ||||||
|  * The resulting line number may not be correct according to PEP 626, |  * The resulting line number may not be correct according to PEP 626, | ||||||
|  * but should be "good enough", and no worse than in older versions. */ |  * but should be "good enough", and no worse than in older versions. */ | ||||||
| static void | static void | ||||||
| guarantee_lineno_for_exits(struct assembler *a, int firstlineno) { | guarantee_lineno_for_exits(basicblock *entryblock, int firstlineno) { | ||||||
|     int lineno = firstlineno; |     int lineno = firstlineno; | ||||||
|     assert(lineno > 0); |     assert(lineno > 0); | ||||||
|     for (basicblock *b = a->a_entry; b != NULL; b = b->b_next) { |     for (basicblock *b = entryblock; b != NULL; b = b->b_next) { | ||||||
|         if (b->b_iused == 0) { |         if (b->b_iused == 0) { | ||||||
|             continue; |             continue; | ||||||
|         } |         } | ||||||
|  | @ -8460,21 +8464,21 @@ fix_cell_offsets(struct compiler *c, basicblock *entryblock, int *fixedmap) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static void | static void | ||||||
| propagate_line_numbers(struct assembler *a); | propagate_line_numbers(basicblock *entryblock); | ||||||
| 
 | 
 | ||||||
| static void | static void | ||||||
| eliminate_empty_basic_blocks(basicblock *entry); | eliminate_empty_basic_blocks(basicblock *entryblock); | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| static void | static void | ||||||
| remove_redundant_jumps(basicblock *entry) { | remove_redundant_jumps(basicblock *entryblock) { | ||||||
|     /* If a non-empty block ends with a jump instruction, check if the next
 |     /* If a non-empty block ends with a jump instruction, check if the next
 | ||||||
|      * non-empty block reached through normal flow control is the target |      * non-empty block reached through normal flow control is the target | ||||||
|      * of that jump. If it is, then the jump instruction is redundant and |      * of that jump. If it is, then the jump instruction is redundant and | ||||||
|      * can be deleted. |      * can be deleted. | ||||||
|      */ |      */ | ||||||
|     int removed = 0; |     int removed = 0; | ||||||
|     for (basicblock *b = entry; b != NULL; b = b->b_next) { |     for (basicblock *b = entryblock; b != NULL; b = b->b_next) { | ||||||
|         if (b->b_iused > 0) { |         if (b->b_iused > 0) { | ||||||
|             struct instr *b_last_instr = &b->b_instr[b->b_iused - 1]; |             struct instr *b_last_instr = &b->b_instr[b->b_iused - 1]; | ||||||
|             assert(!IS_ASSEMBLER_OPCODE(b_last_instr->i_opcode)); |             assert(!IS_ASSEMBLER_OPCODE(b_last_instr->i_opcode)); | ||||||
|  | @ -8489,16 +8493,13 @@ remove_redundant_jumps(basicblock *entry) { | ||||||
|         } |         } | ||||||
|     } |     } | ||||||
|     if (removed) { |     if (removed) { | ||||||
|         eliminate_empty_basic_blocks(entry); |         eliminate_empty_basic_blocks(entryblock); | ||||||
|     } |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static PyCodeObject * | static PyCodeObject * | ||||||
| assemble(struct compiler *c, int addNone) | assemble(struct compiler *c, int addNone) | ||||||
| { | { | ||||||
|     basicblock *b, *entryblock; |  | ||||||
|     struct assembler a; |  | ||||||
|     int j, nblocks; |  | ||||||
|     PyCodeObject *co = NULL; |     PyCodeObject *co = NULL; | ||||||
|     PyObject *consts = NULL; |     PyObject *consts = NULL; | ||||||
| 
 | 
 | ||||||
|  | @ -8527,15 +8528,6 @@ assemble(struct compiler *c, int addNone) | ||||||
|         } |         } | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     nblocks = 0; |  | ||||||
|     entryblock = NULL; |  | ||||||
|     for (b = c->u->u_blocks; b != NULL; b = b->b_list) { |  | ||||||
|         nblocks++; |  | ||||||
|         entryblock = b; |  | ||||||
|         assert(b->b_warm == 0 && b->b_cold == 0); |  | ||||||
|     } |  | ||||||
|     assert(entryblock != NULL); |  | ||||||
| 
 |  | ||||||
|     assert(PyDict_GET_SIZE(c->u->u_varnames) < INT_MAX); |     assert(PyDict_GET_SIZE(c->u->u_varnames) < INT_MAX); | ||||||
|     assert(PyDict_GET_SIZE(c->u->u_cellvars) < INT_MAX); |     assert(PyDict_GET_SIZE(c->u->u_cellvars) < INT_MAX); | ||||||
|     assert(PyDict_GET_SIZE(c->u->u_freevars) < INT_MAX); |     assert(PyDict_GET_SIZE(c->u->u_freevars) < INT_MAX); | ||||||
|  | @ -8550,6 +8542,18 @@ assemble(struct compiler *c, int addNone) | ||||||
|         goto error; |         goto error; | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|  |     int nblocks = 0; | ||||||
|  |     basicblock *entryblock = NULL; | ||||||
|  |     for (basicblock *b = c->u->u_blocks; b != NULL; b = b->b_list) { | ||||||
|  |         nblocks++; | ||||||
|  |         entryblock = b; | ||||||
|  |     } | ||||||
|  |     assert(entryblock != NULL); | ||||||
|  |     if ((size_t)nblocks > SIZE_MAX / sizeof(basicblock *)) { | ||||||
|  |         PyErr_NoMemory(); | ||||||
|  |         goto error; | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|     /* Set firstlineno if it wasn't explicitly set. */ |     /* Set firstlineno if it wasn't explicitly set. */ | ||||||
|     if (!c->u->u_firstlineno) { |     if (!c->u->u_firstlineno) { | ||||||
|         if (entryblock->b_instr && entryblock->b_instr->i_loc.lineno) { |         if (entryblock->b_instr && entryblock->b_instr->i_loc.lineno) { | ||||||
|  | @ -8565,10 +8569,6 @@ assemble(struct compiler *c, int addNone) | ||||||
|         goto error; |         goto error; | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     if (!assemble_init(&a, nblocks, c->u->u_firstlineno)) |  | ||||||
|         goto error; |  | ||||||
|     a.a_entry = entryblock; |  | ||||||
| 
 |  | ||||||
|     int numdropped = fix_cell_offsets(c, entryblock, cellfixedoffsets); |     int numdropped = fix_cell_offsets(c, entryblock, cellfixedoffsets); | ||||||
|     PyMem_Free(cellfixedoffsets);  // At this point we're done with it.
 |     PyMem_Free(cellfixedoffsets);  // At this point we're done with it.
 | ||||||
|     cellfixedoffsets = NULL; |     cellfixedoffsets = NULL; | ||||||
|  | @ -8582,18 +8582,19 @@ assemble(struct compiler *c, int addNone) | ||||||
|         goto error; |         goto error; | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     if (optimize_cfg(c, &a, consts)) { |     if (optimize_cfg(entryblock, consts, c->c_const_cache)) { | ||||||
|         goto error; |         goto error; | ||||||
|     } |     } | ||||||
|     if (duplicate_exits_without_lineno(c)) { |     if (duplicate_exits_without_lineno(entryblock)) { | ||||||
|         return NULL; |         return NULL; | ||||||
|     } |     } | ||||||
|     if (trim_unused_consts(&a, consts)) { |     if (trim_unused_consts(entryblock, consts)) { | ||||||
|         goto error; |         goto error; | ||||||
|     } |     } | ||||||
|     propagate_line_numbers(&a); |     propagate_line_numbers(entryblock); | ||||||
|     guarantee_lineno_for_exits(&a, c->u->u_firstlineno); |     guarantee_lineno_for_exits(entryblock, c->u->u_firstlineno); | ||||||
|     int maxdepth = stackdepth(c, entryblock); | 
 | ||||||
|  |     int maxdepth = stackdepth(entryblock, code_flags); | ||||||
|     if (maxdepth < 0) { |     if (maxdepth < 0) { | ||||||
|         goto error; |         goto error; | ||||||
|     } |     } | ||||||
|  | @ -8609,62 +8610,67 @@ assemble(struct compiler *c, int addNone) | ||||||
|     } |     } | ||||||
|     convert_exception_handlers_to_nops(entryblock); |     convert_exception_handlers_to_nops(entryblock); | ||||||
| 
 | 
 | ||||||
|     if (push_cold_blocks_to_end(c, entryblock, code_flags) < 0) { |     if (push_cold_blocks_to_end(entryblock, code_flags) < 0) { | ||||||
|         goto error; |         goto error; | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     remove_redundant_jumps(entryblock); |     remove_redundant_jumps(entryblock); | ||||||
|     assert(a.a_entry == entryblock); |  | ||||||
|     for (basicblock *b = entryblock; b != NULL; b = b->b_next) { |     for (basicblock *b = entryblock; b != NULL; b = b->b_next) { | ||||||
|         clean_basic_block(b); |         clean_basic_block(b); | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /* Order of basic blocks must have been determined by now */ |     /* Order of basic blocks must have been determined by now */ | ||||||
|     normalize_jumps(&a); |     normalize_jumps(entryblock); | ||||||
| 
 | 
 | ||||||
|     if (add_checks_for_loads_of_unknown_variables(&a, c) < 0) { |     if (add_checks_for_loads_of_unknown_variables(entryblock, c) < 0) { | ||||||
|         goto error; |         goto error; | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /* Can't modify the bytecode after computing jump offsets. */ |     /* Can't modify the bytecode after computing jump offsets. */ | ||||||
|     assemble_jump_offsets(&a, c); |     assemble_jump_offsets(entryblock); | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  |     /* Create assembler */ | ||||||
|  |     struct assembler a; | ||||||
|  |     if (!assemble_init(&a, c->u->u_firstlineno)) | ||||||
|  |         goto error; | ||||||
| 
 | 
 | ||||||
|     /* Emit code. */ |     /* Emit code. */ | ||||||
|     for(b = entryblock; b != NULL; b = b->b_next) { |     for (basicblock *b = entryblock; b != NULL; b = b->b_next) { | ||||||
|         for (j = 0; j < b->b_iused; j++) |         for (int j = 0; j < b->b_iused; j++) | ||||||
|             if (!assemble_emit(&a, &b->b_instr[j])) |             if (!assemble_emit(&a, &b->b_instr[j])) | ||||||
|                 goto error; |                 goto error; | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /* Emit location info */ |     /* Emit location info */ | ||||||
|     a.a_lineno = c->u->u_firstlineno; |     a.a_lineno = c->u->u_firstlineno; | ||||||
|     for(b = entryblock; b != NULL; b = b->b_next) { |     for (basicblock *b = entryblock; b != NULL; b = b->b_next) { | ||||||
|         for (j = 0; j < b->b_iused; j++) |         for (int j = 0; j < b->b_iused; j++) | ||||||
|             if (!assemble_emit_location(&a, &b->b_instr[j])) |             if (!assemble_emit_location(&a, &b->b_instr[j])) | ||||||
|                 goto error; |                 goto error; | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     if (!assemble_exception_table(&a)) { |     if (!assemble_exception_table(&a, entryblock)) { | ||||||
|         goto error; |         goto error; | ||||||
|     } |     } | ||||||
|     if (_PyBytes_Resize(&a.a_except_table, a.a_except_table_off) < 0) { |     if (_PyBytes_Resize(&a.a_except_table, a.a_except_table_off) < 0) { | ||||||
|         goto error; |         goto error; | ||||||
|     } |     } | ||||||
|     if (!merge_const_one(c, &a.a_except_table)) { |     if (!merge_const_one(c->c_const_cache, &a.a_except_table)) { | ||||||
|         goto error; |         goto error; | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     if (_PyBytes_Resize(&a.a_linetable, a.a_location_off) < 0) { |     if (_PyBytes_Resize(&a.a_linetable, a.a_location_off) < 0) { | ||||||
|         goto error; |         goto error; | ||||||
|     } |     } | ||||||
|     if (!merge_const_one(c, &a.a_linetable)) { |     if (!merge_const_one(c->c_const_cache, &a.a_linetable)) { | ||||||
|         goto error; |         goto error; | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     if (_PyBytes_Resize(&a.a_bytecode, a.a_offset * sizeof(_Py_CODEUNIT)) < 0) { |     if (_PyBytes_Resize(&a.a_bytecode, a.a_offset * sizeof(_Py_CODEUNIT)) < 0) { | ||||||
|         goto error; |         goto error; | ||||||
|     } |     } | ||||||
|     if (!merge_const_one(c, &a.a_bytecode)) { |     if (!merge_const_one(c->c_const_cache, &a.a_bytecode)) { | ||||||
|         goto error; |         goto error; | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|  | @ -8703,11 +8709,12 @@ get_const_value(int opcode, int oparg, PyObject *co_consts) | ||||||
|    Called with codestr pointing to the first LOAD_CONST. |    Called with codestr pointing to the first LOAD_CONST. | ||||||
| */ | */ | ||||||
| static int | static int | ||||||
| fold_tuple_on_constants(struct compiler *c, | fold_tuple_on_constants(PyObject *const_cache, | ||||||
|                         struct instr *inst, |                         struct instr *inst, | ||||||
|                         int n, PyObject *consts) |                         int n, PyObject *consts) | ||||||
| { | { | ||||||
|     /* Pre-conditions */ |     /* Pre-conditions */ | ||||||
|  |     assert(PyDict_CheckExact(const_cache)); | ||||||
|     assert(PyList_CheckExact(consts)); |     assert(PyList_CheckExact(consts)); | ||||||
|     assert(inst[n].i_opcode == BUILD_TUPLE); |     assert(inst[n].i_opcode == BUILD_TUPLE); | ||||||
|     assert(inst[n].i_oparg == n); |     assert(inst[n].i_oparg == n); | ||||||
|  | @ -8732,7 +8739,7 @@ fold_tuple_on_constants(struct compiler *c, | ||||||
|         } |         } | ||||||
|         PyTuple_SET_ITEM(newconst, i, constant); |         PyTuple_SET_ITEM(newconst, i, constant); | ||||||
|     } |     } | ||||||
|     if (merge_const_one(c, &newconst) == 0) { |     if (merge_const_one(const_cache, &newconst) == 0) { | ||||||
|         Py_DECREF(newconst); |         Py_DECREF(newconst); | ||||||
|         return -1; |         return -1; | ||||||
|     } |     } | ||||||
|  | @ -8955,8 +8962,9 @@ jump_thread(struct instr *inst, struct instr *target, int opcode) | ||||||
| 
 | 
 | ||||||
| /* Optimization */ | /* Optimization */ | ||||||
| static int | static int | ||||||
| optimize_basic_block(struct compiler *c, basicblock *bb, PyObject *consts) | optimize_basic_block(PyObject *const_cache, basicblock *bb, PyObject *consts) | ||||||
| { | { | ||||||
|  |     assert(PyDict_CheckExact(const_cache)); | ||||||
|     assert(PyList_CheckExact(consts)); |     assert(PyList_CheckExact(consts)); | ||||||
|     struct instr nop; |     struct instr nop; | ||||||
|     nop.i_opcode = NOP; |     nop.i_opcode = NOP; | ||||||
|  | @ -9063,7 +9071,7 @@ optimize_basic_block(struct compiler *c, basicblock *bb, PyObject *consts) | ||||||
|                     } |                     } | ||||||
|                 } |                 } | ||||||
|                 if (i >= oparg) { |                 if (i >= oparg) { | ||||||
|                     if (fold_tuple_on_constants(c, inst-oparg, oparg, consts)) { |                     if (fold_tuple_on_constants(const_cache, inst-oparg, oparg, consts)) { | ||||||
|                         goto error; |                         goto error; | ||||||
|                     } |                     } | ||||||
|                 } |                 } | ||||||
|  | @ -9295,14 +9303,14 @@ normalize_basic_block(basicblock *bb) { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static int | static int | ||||||
| mark_reachable(struct assembler *a) { | mark_reachable(basicblock *entryblock) { | ||||||
|     basicblock **stack = make_cfg_traversal_stack(a->a_entry); |     basicblock **stack = make_cfg_traversal_stack(entryblock); | ||||||
|     if (stack == NULL) { |     if (stack == NULL) { | ||||||
|         return -1; |         return -1; | ||||||
|     } |     } | ||||||
|     basicblock **sp = stack; |     basicblock **sp = stack; | ||||||
|     a->a_entry->b_predecessors = 1; |     entryblock->b_predecessors = 1; | ||||||
|     *sp++ = a->a_entry; |     *sp++ = entryblock; | ||||||
|     while (sp > stack) { |     while (sp > stack) { | ||||||
|         basicblock *b = *(--sp); |         basicblock *b = *(--sp); | ||||||
|         b->b_visited = 1; |         b->b_visited = 1; | ||||||
|  | @ -9336,9 +9344,9 @@ mark_reachable(struct assembler *a) { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static void | static void | ||||||
| eliminate_empty_basic_blocks(basicblock *entry) { | eliminate_empty_basic_blocks(basicblock *entryblock) { | ||||||
|     /* Eliminate empty blocks */ |     /* Eliminate empty blocks */ | ||||||
|     for (basicblock *b = entry; b != NULL; b = b->b_next) { |     for (basicblock *b = entryblock; b != NULL; b = b->b_next) { | ||||||
|         basicblock *next = b->b_next; |         basicblock *next = b->b_next; | ||||||
|         if (next) { |         if (next) { | ||||||
|             while (next->b_iused == 0 && next->b_next) { |             while (next->b_iused == 0 && next->b_next) { | ||||||
|  | @ -9347,7 +9355,7 @@ eliminate_empty_basic_blocks(basicblock *entry) { | ||||||
|             b->b_next = next; |             b->b_next = next; | ||||||
|         } |         } | ||||||
|     } |     } | ||||||
|     for (basicblock *b = entry; b != NULL; b = b->b_next) { |     for (basicblock *b = entryblock; b != NULL; b = b->b_next) { | ||||||
|         if (b->b_iused == 0) { |         if (b->b_iused == 0) { | ||||||
|             continue; |             continue; | ||||||
|         } |         } | ||||||
|  | @ -9373,8 +9381,8 @@ eliminate_empty_basic_blocks(basicblock *entry) { | ||||||
|  * but has no impact on the generated line number events. |  * but has no impact on the generated line number events. | ||||||
|  */ |  */ | ||||||
| static void | static void | ||||||
| propagate_line_numbers(struct assembler *a) { | propagate_line_numbers(basicblock *entryblock) { | ||||||
|     for (basicblock *b = a->a_entry; b != NULL; b = b->b_next) { |     for (basicblock *b = entryblock; b != NULL; b = b->b_next) { | ||||||
|         if (b->b_iused == 0) { |         if (b->b_iused == 0) { | ||||||
|             continue; |             continue; | ||||||
|         } |         } | ||||||
|  | @ -9415,31 +9423,32 @@ propagate_line_numbers(struct assembler *a) { | ||||||
| */ | */ | ||||||
| 
 | 
 | ||||||
| static int | static int | ||||||
| optimize_cfg(struct compiler *c, struct assembler *a, PyObject *consts) | optimize_cfg(basicblock *entryblock, PyObject *consts, PyObject *const_cache) | ||||||
| { | { | ||||||
|     for (basicblock *b = a->a_entry; b != NULL; b = b->b_next) { |     assert(PyDict_CheckExact(const_cache)); | ||||||
|         if (optimize_basic_block(c, b, consts)) { |     for (basicblock *b = entryblock; b != NULL; b = b->b_next) { | ||||||
|  |         if (optimize_basic_block(const_cache, b, consts)) { | ||||||
|             return -1; |             return -1; | ||||||
|         } |         } | ||||||
|         clean_basic_block(b); |         clean_basic_block(b); | ||||||
|         assert(b->b_predecessors == 0); |         assert(b->b_predecessors == 0); | ||||||
|     } |     } | ||||||
|     for (basicblock *b = c->u->u_blocks; b != NULL; b = b->b_list) { |     for (basicblock *b = entryblock; b != NULL; b = b->b_next) { | ||||||
|         if (extend_block(b)) { |         if (extend_block(b)) { | ||||||
|             return -1; |             return -1; | ||||||
|         } |         } | ||||||
|     } |     } | ||||||
|     if (mark_reachable(a)) { |     if (mark_reachable(entryblock)) { | ||||||
|         return -1; |         return -1; | ||||||
|     } |     } | ||||||
|     /* Delete unreachable instructions */ |     /* Delete unreachable instructions */ | ||||||
|     for (basicblock *b = a->a_entry; b != NULL; b = b->b_next) { |     for (basicblock *b = entryblock; b != NULL; b = b->b_next) { | ||||||
|        if (b->b_predecessors == 0) { |        if (b->b_predecessors == 0) { | ||||||
|             b->b_iused = 0; |             b->b_iused = 0; | ||||||
|        } |        } | ||||||
|     } |     } | ||||||
|     eliminate_empty_basic_blocks(a->a_entry); |     eliminate_empty_basic_blocks(entryblock); | ||||||
|     for (basicblock *b = a->a_entry; b != NULL; b = b->b_next) { |     for (basicblock *b = entryblock; b != NULL; b = b->b_next) { | ||||||
|         clean_basic_block(b); |         clean_basic_block(b); | ||||||
|     } |     } | ||||||
|     return 0; |     return 0; | ||||||
|  | @ -9447,13 +9456,13 @@ optimize_cfg(struct compiler *c, struct assembler *a, PyObject *consts) | ||||||
| 
 | 
 | ||||||
| // Remove trailing unused constants.
 | // Remove trailing unused constants.
 | ||||||
| static int | static int | ||||||
| trim_unused_consts(struct assembler *a, PyObject *consts) | trim_unused_consts(basicblock *entryblock, PyObject *consts) | ||||||
| { | { | ||||||
|     assert(PyList_CheckExact(consts)); |     assert(PyList_CheckExact(consts)); | ||||||
| 
 | 
 | ||||||
|     // The first constant may be docstring; keep it always.
 |     // The first constant may be docstring; keep it always.
 | ||||||
|     int max_const_index = 0; |     int max_const_index = 0; | ||||||
|     for (basicblock *b = a->a_entry; b != NULL; b = b->b_next) { |     for (basicblock *b = entryblock; b != NULL; b = b->b_next) { | ||||||
|         for (int i = 0; i < b->b_iused; i++) { |         for (int i = 0; i < b->b_iused; i++) { | ||||||
|             if ((b->b_instr[i].i_opcode == LOAD_CONST || |             if ((b->b_instr[i].i_opcode == LOAD_CONST || | ||||||
|                 b->b_instr[i].i_opcode == KW_NAMES) && |                 b->b_instr[i].i_opcode == KW_NAMES) && | ||||||
|  | @ -9496,15 +9505,15 @@ is_exit_without_lineno(basicblock *b) { | ||||||
|  * copy the line number from the sole predecessor block. |  * copy the line number from the sole predecessor block. | ||||||
|  */ |  */ | ||||||
| static int | static int | ||||||
| duplicate_exits_without_lineno(struct compiler *c) | duplicate_exits_without_lineno(basicblock *entryblock) | ||||||
| { | { | ||||||
|     /* Copy all exit blocks without line number that are targets of a jump.
 |     /* Copy all exit blocks without line number that are targets of a jump.
 | ||||||
|      */ |      */ | ||||||
|     for (basicblock *b = c->u->u_blocks; b != NULL; b = b->b_list) { |     for (basicblock *b = entryblock; b != NULL; b = b->b_next) { | ||||||
|         if (b->b_iused > 0 && is_jump(&b->b_instr[b->b_iused-1])) { |         if (b->b_iused > 0 && is_jump(&b->b_instr[b->b_iused-1])) { | ||||||
|             basicblock *target = b->b_instr[b->b_iused-1].i_target; |             basicblock *target = b->b_instr[b->b_iused-1].i_target; | ||||||
|             if (is_exit_without_lineno(target) && target->b_predecessors > 1) { |             if (is_exit_without_lineno(target) && target->b_predecessors > 1) { | ||||||
|                 basicblock *new_target = compiler_copy_block(c, target); |                 basicblock *new_target = copy_basicblock(target); | ||||||
|                 if (new_target == NULL) { |                 if (new_target == NULL) { | ||||||
|                     return -1; |                     return -1; | ||||||
|                 } |                 } | ||||||
|  | @ -9518,14 +9527,14 @@ duplicate_exits_without_lineno(struct compiler *c) | ||||||
|         } |         } | ||||||
|     } |     } | ||||||
|     /* Eliminate empty blocks */ |     /* Eliminate empty blocks */ | ||||||
|     for (basicblock *b = c->u->u_blocks; b != NULL; b = b->b_list) { |     for (basicblock *b = entryblock; b != NULL; b = b->b_next) { | ||||||
|         while (b->b_next && b->b_next->b_iused == 0) { |         while (b->b_next && b->b_next->b_iused == 0) { | ||||||
|             b->b_next = b->b_next->b_next; |             b->b_next = b->b_next->b_next; | ||||||
|         } |         } | ||||||
|     } |     } | ||||||
|     /* Any remaining reachable exit blocks without line number can only be reached by
 |     /* Any remaining reachable exit blocks without line number can only be reached by
 | ||||||
|      * fall through, and thus can only have a single predecessor */ |      * fall through, and thus can only have a single predecessor */ | ||||||
|     for (basicblock *b = c->u->u_blocks; b != NULL; b = b->b_list) { |     for (basicblock *b = entryblock; b != NULL; b = b->b_next) { | ||||||
|         if (BB_HAS_FALLTHROUGH(b) && b->b_next && b->b_iused > 0) { |         if (BB_HAS_FALLTHROUGH(b) && b->b_next && b->b_iused > 0) { | ||||||
|             if (is_exit_without_lineno(b->b_next)) { |             if (is_exit_without_lineno(b->b_next)) { | ||||||
|                 assert(b->b_next->b_iused > 0); |                 assert(b->b_next->b_iused > 0); | ||||||
|  |  | ||||||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue
	
	 Irit Katriel
						Irit Katriel