mirror of
				https://github.com/python/cpython.git
				synced 2025-10-31 13:41:24 +00:00 
			
		
		
		
	 01481f2dc1
			
		
	
	
		01481f2dc1
		
			
		
	
	
	
	
		
			
			* The lexer, which include the actual lexeme producing logic, goes into the `lexer` directory. * The wrappers, one wrapper per input mode (file, string, utf-8, and readline), go into the `tokenizer` directory and include logic for creating a lexer instance and managing the buffer for different modes. --------- Co-authored-by: Pablo Galindo <pablogsal@gmail.com> Co-authored-by: blurb-it[bot] <43283697+blurb-it[bot]@users.noreply.github.com>
		
			
				
	
	
		
			14 lines
		
	
	
	
		
			476 B
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			14 lines
		
	
	
	
		
			476 B
		
	
	
	
		
			C
		
	
	
	
	
	
| #ifndef Py_TOKENIZER_H
 | |
| #define Py_TOKENIZER_H
 | |
| 
 | |
| #include "Python.h"
 | |
| 
 | |
| struct tok_state *_PyTokenizer_FromString(const char *, int, int);
 | |
| struct tok_state *_PyTokenizer_FromUTF8(const char *, int, int);
 | |
| struct tok_state *_PyTokenizer_FromReadline(PyObject*, const char*, int, int);
 | |
| struct tok_state *_PyTokenizer_FromFile(FILE *, const char*,
 | |
|                                               const char *, const char *);
 | |
| 
 | |
| #define tok_dump _Py_tok_dump
 | |
| 
 | |
| #endif /* !Py_TOKENIZER_H */
 |