| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | # | 
					
						
							|  |  |  | # Module providing the `Pool` class for managing a process pool | 
					
						
							|  |  |  | # | 
					
						
							|  |  |  | # multiprocessing/pool.py | 
					
						
							|  |  |  | # | 
					
						
							| 
									
										
										
										
											2010-12-14 01:38:16 +00:00
										 |  |  | # Copyright (c) 2006-2008, R Oudkerk | 
					
						
							| 
									
										
										
										
											2012-04-30 12:13:55 +01:00
										 |  |  | # Licensed to PSF under a Contributor Agreement. | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | # | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-08-14 15:35:41 +01:00
										 |  |  | __all__ = ['Pool', 'ThreadPool'] | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  | # | 
					
						
							|  |  |  | # Imports | 
					
						
							|  |  |  | # | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | import collections | 
					
						
							| 
									
										
										
										
											2018-12-20 20:33:51 +01:00
										 |  |  | import itertools | 
					
						
							| 
									
										
										
										
											2013-06-28 19:25:45 +02:00
										 |  |  | import os | 
					
						
							| 
									
										
										
										
											2018-12-20 20:33:51 +01:00
										 |  |  | import queue | 
					
						
							|  |  |  | import threading | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | import time | 
					
						
							| 
									
										
										
										
											2013-05-06 11:38:25 +01:00
										 |  |  | import traceback | 
					
						
							| 
									
										
										
										
											2020-04-10 17:46:36 +03:00
										 |  |  | import types | 
					
						
							| 
									
										
										
										
											2018-12-20 20:33:51 +01:00
										 |  |  | import warnings | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-08-14 15:35:41 +01:00
										 |  |  | # If threading is available then ThreadPool should be provided.  Therefore | 
					
						
							|  |  |  | # we avoid top-level imports which are liable to fail on some systems. | 
					
						
							|  |  |  | from . import util | 
					
						
							| 
									
										
										
										
											2014-03-20 09:16:38 +01:00
										 |  |  | from . import get_context, TimeoutError | 
					
						
							| 
									
										
										
										
											2019-03-16 22:34:24 +00:00
										 |  |  | from .connection import wait | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  | # | 
					
						
							|  |  |  | # Constants representing the state of a pool | 
					
						
							|  |  |  | # | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-12-20 20:33:51 +01:00
										 |  |  | INIT = "INIT" | 
					
						
							| 
									
										
										
										
											2018-12-14 11:13:18 +01:00
										 |  |  | RUN = "RUN" | 
					
						
							|  |  |  | CLOSE = "CLOSE" | 
					
						
							|  |  |  | TERMINATE = "TERMINATE" | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  | # | 
					
						
							|  |  |  | # Miscellaneous | 
					
						
							|  |  |  | # | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | job_counter = itertools.count() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | def mapstar(args): | 
					
						
							|  |  |  |     return list(map(*args)) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-12-21 11:03:24 +01:00
										 |  |  | def starmapstar(args): | 
					
						
							|  |  |  |     return list(itertools.starmap(args[0], args[1])) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-05-06 11:38:25 +01:00
										 |  |  | # | 
					
						
							|  |  |  | # Hack to embed stringification of remote traceback in local traceback | 
					
						
							|  |  |  | # | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | class RemoteTraceback(Exception): | 
					
						
							|  |  |  |     def __init__(self, tb): | 
					
						
							|  |  |  |         self.tb = tb | 
					
						
							|  |  |  |     def __str__(self): | 
					
						
							|  |  |  |         return self.tb | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | class ExceptionWithTraceback: | 
					
						
							|  |  |  |     def __init__(self, exc, tb): | 
					
						
							|  |  |  |         tb = traceback.format_exception(type(exc), exc, tb) | 
					
						
							|  |  |  |         tb = ''.join(tb) | 
					
						
							|  |  |  |         self.exc = exc | 
					
						
							|  |  |  |         self.tb = '\n"""\n%s"""' % tb | 
					
						
							|  |  |  |     def __reduce__(self): | 
					
						
							|  |  |  |         return rebuild_exc, (self.exc, self.tb) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | def rebuild_exc(exc, tb): | 
					
						
							|  |  |  |     exc.__cause__ = RemoteTraceback(tb) | 
					
						
							|  |  |  |     return exc | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | # | 
					
						
							|  |  |  | # Code run by worker processes | 
					
						
							|  |  |  | # | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-11-09 20:55:52 +00:00
										 |  |  | class MaybeEncodingError(Exception): | 
					
						
							|  |  |  |     """Wraps possible unpickleable errors, so they can be
 | 
					
						
							|  |  |  |     safely sent through the socket."""
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     def __init__(self, exc, value): | 
					
						
							|  |  |  |         self.exc = repr(exc) | 
					
						
							|  |  |  |         self.value = repr(value) | 
					
						
							|  |  |  |         super(MaybeEncodingError, self).__init__(self.exc, self.value) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     def __str__(self): | 
					
						
							|  |  |  |         return "Error sending result: '%s'. Reason: '%s'" % (self.value, | 
					
						
							|  |  |  |                                                              self.exc) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     def __repr__(self): | 
					
						
							| 
									
										
										
										
											2014-07-25 23:36:00 +03:00
										 |  |  |         return "<%s: %s>" % (self.__class__.__name__, self) | 
					
						
							| 
									
										
										
										
											2010-11-09 20:55:52 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-03-23 12:30:54 +00:00
										 |  |  | def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None, | 
					
						
							|  |  |  |            wrap_exception=False): | 
					
						
							| 
									
										
										
										
											2017-08-29 17:52:18 -05:00
										 |  |  |     if (maxtasks is not None) and not (isinstance(maxtasks, int) | 
					
						
							|  |  |  |                                        and maxtasks >= 1): | 
					
						
							|  |  |  |         raise AssertionError("Maxtasks {!r} is not valid".format(maxtasks)) | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |     put = outqueue.put | 
					
						
							|  |  |  |     get = inqueue.get | 
					
						
							|  |  |  |     if hasattr(inqueue, '_writer'): | 
					
						
							|  |  |  |         inqueue._writer.close() | 
					
						
							|  |  |  |         outqueue._reader.close() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     if initializer is not None: | 
					
						
							|  |  |  |         initializer(*initargs) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-01-27 03:36:01 +00:00
										 |  |  |     completed = 0 | 
					
						
							|  |  |  |     while maxtasks is None or (maxtasks and completed < maxtasks): | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         try: | 
					
						
							|  |  |  |             task = get() | 
					
						
							| 
									
										
										
										
											2012-12-25 16:47:37 +02:00
										 |  |  |         except (EOFError, OSError): | 
					
						
							| 
									
										
										
										
											2013-08-14 15:35:41 +01:00
										 |  |  |             util.debug('worker got EOFError or OSError -- exiting') | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |             break | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |         if task is None: | 
					
						
							| 
									
										
										
										
											2013-08-14 15:35:41 +01:00
										 |  |  |             util.debug('worker got sentinel -- exiting') | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |             break | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |         job, i, func, args, kwds = task | 
					
						
							|  |  |  |         try: | 
					
						
							|  |  |  |             result = (True, func(*args, **kwds)) | 
					
						
							|  |  |  |         except Exception as e: | 
					
						
							| 
									
										
										
										
											2017-03-29 11:58:54 +08:00
										 |  |  |             if wrap_exception and func is not _helper_reraises_exception: | 
					
						
							| 
									
										
										
										
											2014-03-23 12:30:54 +00:00
										 |  |  |                 e = ExceptionWithTraceback(e, e.__traceback__) | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |             result = (False, e) | 
					
						
							| 
									
										
										
										
											2010-11-09 20:55:52 +00:00
										 |  |  |         try: | 
					
						
							|  |  |  |             put((job, i, result)) | 
					
						
							|  |  |  |         except Exception as e: | 
					
						
							|  |  |  |             wrapped = MaybeEncodingError(e, result[1]) | 
					
						
							| 
									
										
										
										
											2013-08-14 15:35:41 +01:00
										 |  |  |             util.debug("Possible encoding error while sending result: %s" % ( | 
					
						
							| 
									
										
										
										
											2010-11-09 20:55:52 +00:00
										 |  |  |                 wrapped)) | 
					
						
							|  |  |  |             put((job, i, (False, wrapped))) | 
					
						
							| 
									
										
										
										
											2017-03-24 13:52:11 +01:00
										 |  |  | 
 | 
					
						
							|  |  |  |         task = job = result = func = args = kwds = None | 
					
						
							| 
									
										
										
										
											2010-01-27 03:36:01 +00:00
										 |  |  |         completed += 1 | 
					
						
							| 
									
										
										
										
											2013-08-14 15:35:41 +01:00
										 |  |  |     util.debug('worker exiting after %d tasks' % completed) | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-03-29 11:58:54 +08:00
										 |  |  | def _helper_reraises_exception(ex): | 
					
						
							|  |  |  |     'Pickle-able helper function for use by _guarded_task_generation.' | 
					
						
							|  |  |  |     raise ex | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | # | 
					
						
							|  |  |  | # Class representing a process pool | 
					
						
							|  |  |  | # | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-03-16 22:34:24 +00:00
										 |  |  | class _PoolCache(dict): | 
					
						
							|  |  |  |     """
 | 
					
						
							|  |  |  |     Class that implements a cache for the Pool class that will notify | 
					
						
							|  |  |  |     the pool management threads every time the cache is emptied. The | 
					
						
							|  |  |  |     notification is done by the use of a queue that is provided when | 
					
						
							|  |  |  |     instantiating the cache. | 
					
						
							|  |  |  |     """
 | 
					
						
							| 
									
										
										
										
											2019-06-01 11:00:15 +03:00
										 |  |  |     def __init__(self, /, *args, notifier=None, **kwds): | 
					
						
							| 
									
										
										
										
											2019-03-16 22:34:24 +00:00
										 |  |  |         self.notifier = notifier | 
					
						
							|  |  |  |         super().__init__(*args, **kwds) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     def __delitem__(self, item): | 
					
						
							|  |  |  |         super().__delitem__(item) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |         # Notify that the cache is empty. This is important because the | 
					
						
							|  |  |  |         # pool keeps maintaining workers until the cache gets drained. This | 
					
						
							|  |  |  |         # eliminates a race condition in which a task is finished after the | 
					
						
							|  |  |  |         # the pool's _handle_workers method has enter another iteration of the | 
					
						
							|  |  |  |         # loop. In this situation, the only event that can wake up the pool | 
					
						
							|  |  |  |         # is the cache to be emptied (no more tasks available). | 
					
						
							|  |  |  |         if not self: | 
					
						
							|  |  |  |             self.notifier.put(None) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | class Pool(object): | 
					
						
							|  |  |  |     '''
 | 
					
						
							| 
									
										
										
										
											2008-11-22 08:51:39 +00:00
										 |  |  |     Class which supports an async version of applying functions to arguments. | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |     '''
 | 
					
						
							| 
									
										
										
										
											2014-03-23 12:30:54 +00:00
										 |  |  |     _wrap_exception = True | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-02-11 17:29:00 +00:00
										 |  |  |     @staticmethod | 
					
						
							|  |  |  |     def Process(ctx, *args, **kwds): | 
					
						
							|  |  |  |         return ctx.Process(*args, **kwds) | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-01-27 03:36:01 +00:00
										 |  |  |     def __init__(self, processes=None, initializer=None, initargs=(), | 
					
						
							| 
									
										
										
										
											2013-10-16 16:41:56 +01:00
										 |  |  |                  maxtasksperchild=None, context=None): | 
					
						
							| 
									
										
										
										
											2018-12-20 20:33:51 +01:00
										 |  |  |         # Attributes initialized early to make sure that they exist in | 
					
						
							|  |  |  |         # __del__() if __init__() raises an exception | 
					
						
							|  |  |  |         self._pool = [] | 
					
						
							|  |  |  |         self._state = INIT | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-10-16 16:41:56 +01:00
										 |  |  |         self._ctx = context or get_context() | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         self._setup_queues() | 
					
						
							| 
									
										
										
										
											2018-01-18 10:38:03 +01:00
										 |  |  |         self._taskqueue = queue.SimpleQueue() | 
					
						
							| 
									
										
										
										
											2019-03-16 22:34:24 +00:00
										 |  |  |         # The _change_notifier queue exist to wake up self._handle_workers() | 
					
						
							|  |  |  |         # when the cache (self._cache) is empty or when there is a change in | 
					
						
							|  |  |  |         # the _state variable of the thread that runs _handle_workers. | 
					
						
							|  |  |  |         self._change_notifier = self._ctx.SimpleQueue() | 
					
						
							|  |  |  |         self._cache = _PoolCache(notifier=self._change_notifier) | 
					
						
							| 
									
										
										
										
											2010-01-27 03:36:01 +00:00
										 |  |  |         self._maxtasksperchild = maxtasksperchild | 
					
						
							|  |  |  |         self._initializer = initializer | 
					
						
							|  |  |  |         self._initargs = initargs | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  |         if processes is None: | 
					
						
							| 
									
										
										
										
											2023-10-01 03:14:57 +02:00
										 |  |  |             processes = os.process_cpu_count() or 1 | 
					
						
							| 
									
										
										
										
											2011-06-20 17:53:35 +02:00
										 |  |  |         if processes < 1: | 
					
						
							|  |  |  |             raise ValueError("Number of processes must be at least 1") | 
					
						
							| 
									
										
										
										
											2022-06-17 08:14:26 +01:00
										 |  |  |         if maxtasksperchild is not None: | 
					
						
							|  |  |  |             if not isinstance(maxtasksperchild, int) or maxtasksperchild <= 0: | 
					
						
							|  |  |  |                 raise ValueError("maxtasksperchild must be a positive int or None") | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-10-28 14:45:05 +02:00
										 |  |  |         if initializer is not None and not callable(initializer): | 
					
						
							| 
									
										
											  
											
												Merged revisions 70912,70944,70968,71033,71041,71208,71263,71286,71395-71396,71405-71406,71485,71492,71494 via svnmerge from
svn+ssh://pythondev@svn.python.org/python/trunk
........
  r70912 | georg.brandl | 2009-03-31 17:35:46 -0500 (Tue, 31 Mar 2009) | 1 line
  #5617: add a handy function to print a unicode string to gdbinit.
........
  r70944 | georg.brandl | 2009-03-31 23:32:39 -0500 (Tue, 31 Mar 2009) | 1 line
  #5631: add upload to list of possible commands, which is presented in --help-commands.
........
  r70968 | michael.foord | 2009-04-01 13:25:38 -0500 (Wed, 01 Apr 2009) | 1 line
  Adding Wing project file
........
  r71033 | brett.cannon | 2009-04-01 22:34:53 -0500 (Wed, 01 Apr 2009) | 3 lines
  Fix two issues introduced by issue #71031 by changing the signature of
  PyImport_AppendInittab() to take a const char *.
........
  r71041 | jesse.noller | 2009-04-02 00:17:26 -0500 (Thu, 02 Apr 2009) | 1 line
  Add custom initializer argument to multiprocess.Manager*, courtesy of lekma
........
  r71208 | michael.foord | 2009-04-04 20:15:01 -0500 (Sat, 04 Apr 2009) | 4 lines
  Change the way unittest.TestSuite use their tests to always access them through iteration. Non behavior changing, this allows you to create custom subclasses that override __iter__.
  Issue #5693
........
  r71263 | michael.foord | 2009-04-05 14:19:28 -0500 (Sun, 05 Apr 2009) | 4 lines
  Adding assertIs and assertIsNot methods to unittest.TestCase
  Issue #2578
........
  r71286 | tarek.ziade | 2009-04-05 17:04:38 -0500 (Sun, 05 Apr 2009) | 1 line
  added a simplest test to distutils.spawn._nt_quote_args
........
  r71395 | benjamin.peterson | 2009-04-08 08:27:29 -0500 (Wed, 08 Apr 2009) | 1 line
  these must be installed to correctly run tests
........
  r71396 | benjamin.peterson | 2009-04-08 08:29:41 -0500 (Wed, 08 Apr 2009) | 1 line
  fix syntax
........
  r71405 | andrew.kuchling | 2009-04-09 06:22:47 -0500 (Thu, 09 Apr 2009) | 1 line
  Add items
........
  r71406 | andrew.kuchling | 2009-04-09 06:23:36 -0500 (Thu, 09 Apr 2009) | 1 line
  Typo fixes
........
  r71485 | andrew.kuchling | 2009-04-11 11:12:23 -0500 (Sat, 11 Apr 2009) | 1 line
  Add various items
........
  r71492 | georg.brandl | 2009-04-11 13:19:27 -0500 (Sat, 11 Apr 2009) | 1 line
  Take credit for a patch of mine.
........
  r71494 | benjamin.peterson | 2009-04-11 14:31:00 -0500 (Sat, 11 Apr 2009) | 1 line
  ignore py3_test_grammar when compiling the library
........
											
										 
											2009-04-11 20:45:40 +00:00
										 |  |  |             raise TypeError('initializer must be a callable') | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-01-27 03:36:01 +00:00
										 |  |  |         self._processes = processes | 
					
						
							| 
									
										
										
										
											2018-11-04 23:40:32 +01:00
										 |  |  |         try: | 
					
						
							|  |  |  |             self._repopulate_pool() | 
					
						
							|  |  |  |         except Exception: | 
					
						
							|  |  |  |             for p in self._pool: | 
					
						
							|  |  |  |                 if p.exitcode is None: | 
					
						
							|  |  |  |                     p.terminate() | 
					
						
							|  |  |  |             for p in self._pool: | 
					
						
							|  |  |  |                 p.join() | 
					
						
							|  |  |  |             raise | 
					
						
							| 
									
										
										
										
											2010-01-27 03:36:01 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-03-16 22:34:24 +00:00
										 |  |  |         sentinels = self._get_sentinels() | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-01-27 03:36:01 +00:00
										 |  |  |         self._worker_handler = threading.Thread( | 
					
						
							|  |  |  |             target=Pool._handle_workers, | 
					
						
							| 
									
										
										
										
											2019-02-11 17:29:00 +00:00
										 |  |  |             args=(self._cache, self._taskqueue, self._ctx, self.Process, | 
					
						
							|  |  |  |                   self._processes, self._pool, self._inqueue, self._outqueue, | 
					
						
							|  |  |  |                   self._initializer, self._initargs, self._maxtasksperchild, | 
					
						
							| 
									
										
										
										
											2019-03-16 22:34:24 +00:00
										 |  |  |                   self._wrap_exception, sentinels, self._change_notifier) | 
					
						
							| 
									
										
										
										
											2010-01-27 03:36:01 +00:00
										 |  |  |             ) | 
					
						
							|  |  |  |         self._worker_handler.daemon = True | 
					
						
							|  |  |  |         self._worker_handler._state = RUN | 
					
						
							|  |  |  |         self._worker_handler.start() | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-12-06 08:51:47 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         self._task_handler = threading.Thread( | 
					
						
							|  |  |  |             target=Pool._handle_tasks, | 
					
						
							| 
									
										
										
										
											2013-10-28 23:11:58 +00:00
										 |  |  |             args=(self._taskqueue, self._quick_put, self._outqueue, | 
					
						
							|  |  |  |                   self._pool, self._cache) | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |             ) | 
					
						
							| 
									
										
										
										
											2008-08-18 18:40:08 +00:00
										 |  |  |         self._task_handler.daemon = True | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         self._task_handler._state = RUN | 
					
						
							|  |  |  |         self._task_handler.start() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |         self._result_handler = threading.Thread( | 
					
						
							|  |  |  |             target=Pool._handle_results, | 
					
						
							|  |  |  |             args=(self._outqueue, self._quick_get, self._cache) | 
					
						
							|  |  |  |             ) | 
					
						
							| 
									
										
										
										
											2008-08-18 18:40:08 +00:00
										 |  |  |         self._result_handler.daemon = True | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         self._result_handler._state = RUN | 
					
						
							|  |  |  |         self._result_handler.start() | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-08-14 15:35:41 +01:00
										 |  |  |         self._terminate = util.Finalize( | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |             self, self._terminate_pool, | 
					
						
							|  |  |  |             args=(self._taskqueue, self._inqueue, self._outqueue, self._pool, | 
					
						
							| 
									
										
										
										
											2019-03-16 22:34:24 +00:00
										 |  |  |                   self._change_notifier, self._worker_handler, self._task_handler, | 
					
						
							| 
									
										
										
										
											2010-01-27 03:36:01 +00:00
										 |  |  |                   self._result_handler, self._cache), | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |             exitpriority=15 | 
					
						
							|  |  |  |             ) | 
					
						
							| 
									
										
										
										
											2018-12-20 20:33:51 +01:00
										 |  |  |         self._state = RUN | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     # Copy globals as function locals to make sure that they are available | 
					
						
							|  |  |  |     # during Python shutdown when the Pool is destroyed. | 
					
						
							|  |  |  |     def __del__(self, _warn=warnings.warn, RUN=RUN): | 
					
						
							|  |  |  |         if self._state == RUN: | 
					
						
							|  |  |  |             _warn(f"unclosed running multiprocessing pool {self!r}", | 
					
						
							|  |  |  |                   ResourceWarning, source=self) | 
					
						
							| 
									
										
										
										
											2019-03-16 22:34:24 +00:00
										 |  |  |             if getattr(self, '_change_notifier', None) is not None: | 
					
						
							|  |  |  |                 self._change_notifier.put(None) | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-12-14 11:13:18 +01:00
										 |  |  |     def __repr__(self): | 
					
						
							|  |  |  |         cls = self.__class__ | 
					
						
							|  |  |  |         return (f'<{cls.__module__}.{cls.__qualname__} ' | 
					
						
							|  |  |  |                 f'state={self._state} ' | 
					
						
							|  |  |  |                 f'pool_size={len(self._pool)}>') | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-03-16 22:34:24 +00:00
										 |  |  |     def _get_sentinels(self): | 
					
						
							|  |  |  |         task_queue_sentinels = [self._outqueue._reader] | 
					
						
							|  |  |  |         self_notifier_sentinels = [self._change_notifier._reader] | 
					
						
							|  |  |  |         return [*task_queue_sentinels, *self_notifier_sentinels] | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     @staticmethod | 
					
						
							|  |  |  |     def _get_worker_sentinels(workers): | 
					
						
							|  |  |  |         return [worker.sentinel for worker in | 
					
						
							|  |  |  |                 workers if hasattr(worker, "sentinel")] | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-02-11 17:29:00 +00:00
										 |  |  |     @staticmethod | 
					
						
							|  |  |  |     def _join_exited_workers(pool): | 
					
						
							| 
									
										
										
										
											2010-01-27 03:36:01 +00:00
										 |  |  |         """Cleanup after any worker processes which have exited due to reaching
 | 
					
						
							|  |  |  |         their specified lifetime.  Returns True if any workers were cleaned up. | 
					
						
							|  |  |  |         """
 | 
					
						
							|  |  |  |         cleaned = False | 
					
						
							| 
									
										
										
										
											2019-02-11 17:29:00 +00:00
										 |  |  |         for i in reversed(range(len(pool))): | 
					
						
							|  |  |  |             worker = pool[i] | 
					
						
							| 
									
										
										
										
											2010-01-27 03:36:01 +00:00
										 |  |  |             if worker.exitcode is not None: | 
					
						
							|  |  |  |                 # worker exited | 
					
						
							| 
									
										
										
										
											2013-08-14 15:35:41 +01:00
										 |  |  |                 util.debug('cleaning up worker %d' % i) | 
					
						
							| 
									
										
										
										
											2010-01-27 03:36:01 +00:00
										 |  |  |                 worker.join() | 
					
						
							|  |  |  |                 cleaned = True | 
					
						
							| 
									
										
										
										
											2019-02-11 17:29:00 +00:00
										 |  |  |                 del pool[i] | 
					
						
							| 
									
										
										
										
											2010-01-27 03:36:01 +00:00
										 |  |  |         return cleaned | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     def _repopulate_pool(self): | 
					
						
							| 
									
										
										
										
											2019-02-11 17:29:00 +00:00
										 |  |  |         return self._repopulate_pool_static(self._ctx, self.Process, | 
					
						
							|  |  |  |                                             self._processes, | 
					
						
							|  |  |  |                                             self._pool, self._inqueue, | 
					
						
							|  |  |  |                                             self._outqueue, self._initializer, | 
					
						
							|  |  |  |                                             self._initargs, | 
					
						
							|  |  |  |                                             self._maxtasksperchild, | 
					
						
							|  |  |  |                                             self._wrap_exception) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     @staticmethod | 
					
						
							|  |  |  |     def _repopulate_pool_static(ctx, Process, processes, pool, inqueue, | 
					
						
							|  |  |  |                                 outqueue, initializer, initargs, | 
					
						
							|  |  |  |                                 maxtasksperchild, wrap_exception): | 
					
						
							| 
									
										
										
										
											2010-01-27 03:36:01 +00:00
										 |  |  |         """Bring the number of pool processes up to the specified number,
 | 
					
						
							|  |  |  |         for use after reaping workers which have exited. | 
					
						
							|  |  |  |         """
 | 
					
						
							| 
									
										
										
										
											2019-02-11 17:29:00 +00:00
										 |  |  |         for i in range(processes - len(pool)): | 
					
						
							|  |  |  |             w = Process(ctx, target=worker, | 
					
						
							|  |  |  |                         args=(inqueue, outqueue, | 
					
						
							|  |  |  |                               initializer, | 
					
						
							|  |  |  |                               initargs, maxtasksperchild, | 
					
						
							|  |  |  |                               wrap_exception)) | 
					
						
							| 
									
										
										
										
											2010-01-27 03:36:01 +00:00
										 |  |  |             w.name = w.name.replace('Process', 'PoolWorker') | 
					
						
							|  |  |  |             w.daemon = True | 
					
						
							|  |  |  |             w.start() | 
					
						
							| 
									
										
										
										
											2019-02-11 17:29:00 +00:00
										 |  |  |             pool.append(w) | 
					
						
							| 
									
										
										
										
											2013-08-14 15:35:41 +01:00
										 |  |  |             util.debug('added worker') | 
					
						
							| 
									
										
										
										
											2010-01-27 03:36:01 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-02-11 17:29:00 +00:00
										 |  |  |     @staticmethod | 
					
						
							|  |  |  |     def _maintain_pool(ctx, Process, processes, pool, inqueue, outqueue, | 
					
						
							|  |  |  |                        initializer, initargs, maxtasksperchild, | 
					
						
							|  |  |  |                        wrap_exception): | 
					
						
							| 
									
										
										
										
											2010-01-27 03:36:01 +00:00
										 |  |  |         """Clean up any exited workers and start replacements for them.
 | 
					
						
							|  |  |  |         """
 | 
					
						
							| 
									
										
										
										
											2019-02-11 17:29:00 +00:00
										 |  |  |         if Pool._join_exited_workers(pool): | 
					
						
							|  |  |  |             Pool._repopulate_pool_static(ctx, Process, processes, pool, | 
					
						
							|  |  |  |                                          inqueue, outqueue, initializer, | 
					
						
							|  |  |  |                                          initargs, maxtasksperchild, | 
					
						
							|  |  |  |                                          wrap_exception) | 
					
						
							| 
									
										
										
										
											2010-01-27 03:36:01 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |     def _setup_queues(self): | 
					
						
							| 
									
										
										
										
											2013-10-16 16:41:56 +01:00
										 |  |  |         self._inqueue = self._ctx.SimpleQueue() | 
					
						
							|  |  |  |         self._outqueue = self._ctx.SimpleQueue() | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         self._quick_put = self._inqueue._writer.send | 
					
						
							|  |  |  |         self._quick_get = self._outqueue._reader.recv | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-12-13 02:15:30 +01:00
										 |  |  |     def _check_running(self): | 
					
						
							|  |  |  |         if self._state != RUN: | 
					
						
							|  |  |  |             raise ValueError("Pool not running") | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |     def apply(self, func, args=(), kwds={}): | 
					
						
							|  |  |  |         '''
 | 
					
						
							| 
									
										
										
										
											2008-11-22 08:51:39 +00:00
										 |  |  |         Equivalent of `func(*args, **kwds)`. | 
					
						
							| 
									
										
										
										
											2017-08-29 17:52:18 -05:00
										 |  |  |         Pool must be running. | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         '''
 | 
					
						
							|  |  |  |         return self.apply_async(func, args, kwds).get() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     def map(self, func, iterable, chunksize=None): | 
					
						
							|  |  |  |         '''
 | 
					
						
							| 
									
										
										
										
											2008-11-22 08:51:39 +00:00
										 |  |  |         Apply `func` to each element in `iterable`, collecting the results | 
					
						
							|  |  |  |         in a list that is returned. | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         '''
 | 
					
						
							| 
									
										
										
										
											2011-12-21 11:03:24 +01:00
										 |  |  |         return self._map_async(func, iterable, mapstar, chunksize).get() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     def starmap(self, func, iterable, chunksize=None): | 
					
						
							|  |  |  |         '''
 | 
					
						
							|  |  |  |         Like `map()` method but the elements of the `iterable` are expected to | 
					
						
							|  |  |  |         be iterables as well and will be unpacked as arguments. Hence | 
					
						
							|  |  |  |         `func` and (a, b) becomes func(a, b). | 
					
						
							|  |  |  |         '''
 | 
					
						
							|  |  |  |         return self._map_async(func, iterable, starmapstar, chunksize).get() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     def starmap_async(self, func, iterable, chunksize=None, callback=None, | 
					
						
							|  |  |  |             error_callback=None): | 
					
						
							|  |  |  |         '''
 | 
					
						
							|  |  |  |         Asynchronous version of `starmap()` method. | 
					
						
							|  |  |  |         '''
 | 
					
						
							|  |  |  |         return self._map_async(func, iterable, starmapstar, chunksize, | 
					
						
							|  |  |  |                                callback, error_callback) | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-03-29 11:58:54 +08:00
										 |  |  |     def _guarded_task_generation(self, result_job, func, iterable): | 
					
						
							|  |  |  |         '''Provides a generator of tasks for imap and imap_unordered with
 | 
					
						
							|  |  |  |         appropriate handling for iterables which throw exceptions during | 
					
						
							|  |  |  |         iteration.'''
 | 
					
						
							|  |  |  |         try: | 
					
						
							|  |  |  |             i = -1 | 
					
						
							|  |  |  |             for i, x in enumerate(iterable): | 
					
						
							|  |  |  |                 yield (result_job, i, func, (x,), {}) | 
					
						
							|  |  |  |         except Exception as e: | 
					
						
							|  |  |  |             yield (result_job, i+1, _helper_reraises_exception, (e,), {}) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |     def imap(self, func, iterable, chunksize=1): | 
					
						
							|  |  |  |         '''
 | 
					
						
							| 
									
										
										
										
											2008-11-22 08:51:39 +00:00
										 |  |  |         Equivalent of `map()` -- can be MUCH slower than `Pool.map()`. | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         '''
 | 
					
						
							| 
									
										
										
										
											2018-12-13 02:15:30 +01:00
										 |  |  |         self._check_running() | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         if chunksize == 1: | 
					
						
							| 
									
										
										
										
											2019-02-11 17:29:00 +00:00
										 |  |  |             result = IMapIterator(self) | 
					
						
							| 
									
										
										
										
											2017-03-29 11:58:54 +08:00
										 |  |  |             self._taskqueue.put( | 
					
						
							|  |  |  |                 ( | 
					
						
							|  |  |  |                     self._guarded_task_generation(result._job, func, iterable), | 
					
						
							|  |  |  |                     result._set_length | 
					
						
							|  |  |  |                 )) | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |             return result | 
					
						
							|  |  |  |         else: | 
					
						
							| 
									
										
										
										
											2017-08-29 17:52:18 -05:00
										 |  |  |             if chunksize < 1: | 
					
						
							|  |  |  |                 raise ValueError( | 
					
						
							|  |  |  |                     "Chunksize must be 1+, not {0:n}".format( | 
					
						
							|  |  |  |                         chunksize)) | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |             task_batches = Pool._get_tasks(func, iterable, chunksize) | 
					
						
							| 
									
										
										
										
											2019-02-11 17:29:00 +00:00
										 |  |  |             result = IMapIterator(self) | 
					
						
							| 
									
										
										
										
											2017-03-29 11:58:54 +08:00
										 |  |  |             self._taskqueue.put( | 
					
						
							|  |  |  |                 ( | 
					
						
							|  |  |  |                     self._guarded_task_generation(result._job, | 
					
						
							|  |  |  |                                                   mapstar, | 
					
						
							|  |  |  |                                                   task_batches), | 
					
						
							|  |  |  |                     result._set_length | 
					
						
							|  |  |  |                 )) | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |             return (item for chunk in result for item in chunk) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     def imap_unordered(self, func, iterable, chunksize=1): | 
					
						
							|  |  |  |         '''
 | 
					
						
							| 
									
										
										
										
											2008-11-22 08:51:39 +00:00
										 |  |  |         Like `imap()` method but ordering of results is arbitrary. | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         '''
 | 
					
						
							| 
									
										
										
										
											2018-12-13 02:15:30 +01:00
										 |  |  |         self._check_running() | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         if chunksize == 1: | 
					
						
							| 
									
										
										
										
											2019-02-11 17:29:00 +00:00
										 |  |  |             result = IMapUnorderedIterator(self) | 
					
						
							| 
									
										
										
										
											2017-03-29 11:58:54 +08:00
										 |  |  |             self._taskqueue.put( | 
					
						
							|  |  |  |                 ( | 
					
						
							|  |  |  |                     self._guarded_task_generation(result._job, func, iterable), | 
					
						
							|  |  |  |                     result._set_length | 
					
						
							|  |  |  |                 )) | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |             return result | 
					
						
							|  |  |  |         else: | 
					
						
							| 
									
										
										
										
											2017-08-29 17:52:18 -05:00
										 |  |  |             if chunksize < 1: | 
					
						
							|  |  |  |                 raise ValueError( | 
					
						
							|  |  |  |                     "Chunksize must be 1+, not {0!r}".format(chunksize)) | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |             task_batches = Pool._get_tasks(func, iterable, chunksize) | 
					
						
							| 
									
										
										
										
											2019-02-11 17:29:00 +00:00
										 |  |  |             result = IMapUnorderedIterator(self) | 
					
						
							| 
									
										
										
										
											2017-03-29 11:58:54 +08:00
										 |  |  |             self._taskqueue.put( | 
					
						
							|  |  |  |                 ( | 
					
						
							|  |  |  |                     self._guarded_task_generation(result._job, | 
					
						
							|  |  |  |                                                   mapstar, | 
					
						
							|  |  |  |                                                   task_batches), | 
					
						
							|  |  |  |                     result._set_length | 
					
						
							|  |  |  |                 )) | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |             return (item for chunk in result for item in chunk) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-11-09 20:55:52 +00:00
										 |  |  |     def apply_async(self, func, args=(), kwds={}, callback=None, | 
					
						
							|  |  |  |             error_callback=None): | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         '''
 | 
					
						
							| 
									
										
										
										
											2008-11-22 08:51:39 +00:00
										 |  |  |         Asynchronous version of `apply()` method. | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         '''
 | 
					
						
							| 
									
										
										
										
											2018-12-13 02:15:30 +01:00
										 |  |  |         self._check_running() | 
					
						
							| 
									
										
										
										
											2019-02-11 17:29:00 +00:00
										 |  |  |         result = ApplyResult(self, callback, error_callback) | 
					
						
							| 
									
										
										
										
											2017-03-29 11:58:54 +08:00
										 |  |  |         self._taskqueue.put(([(result._job, 0, func, args, kwds)], None)) | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         return result | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-11-09 20:55:52 +00:00
										 |  |  |     def map_async(self, func, iterable, chunksize=None, callback=None, | 
					
						
							|  |  |  |             error_callback=None): | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         '''
 | 
					
						
							| 
									
										
										
										
											2008-11-22 08:51:39 +00:00
										 |  |  |         Asynchronous version of `map()` method. | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         '''
 | 
					
						
							| 
									
										
										
										
											2012-10-27 12:53:02 +02:00
										 |  |  |         return self._map_async(func, iterable, mapstar, chunksize, callback, | 
					
						
							|  |  |  |             error_callback) | 
					
						
							| 
									
										
										
										
											2011-12-21 11:03:24 +01:00
										 |  |  | 
 | 
					
						
							|  |  |  |     def _map_async(self, func, iterable, mapper, chunksize=None, callback=None, | 
					
						
							|  |  |  |             error_callback=None): | 
					
						
							|  |  |  |         '''
 | 
					
						
							|  |  |  |         Helper function to implement map, starmap and their async counterparts. | 
					
						
							|  |  |  |         '''
 | 
					
						
							| 
									
										
										
										
											2018-12-13 02:15:30 +01:00
										 |  |  |         self._check_running() | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         if not hasattr(iterable, '__len__'): | 
					
						
							|  |  |  |             iterable = list(iterable) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |         if chunksize is None: | 
					
						
							|  |  |  |             chunksize, extra = divmod(len(iterable), len(self._pool) * 4) | 
					
						
							|  |  |  |             if extra: | 
					
						
							|  |  |  |                 chunksize += 1 | 
					
						
							| 
									
										
											  
											
												Merged revisions 73995,74002,74005,74007-74008,74011,74019-74023 via svnmerge from
svn+ssh://pythondev@svn.python.org/python/trunk
........
  r73995 | vinay.sajip | 2009-07-13 07:21:05 -0400 (Mon, 13 Jul 2009) | 1 line
  Issue #6314: logging: Extra checks on the "level" argument in more places.
........
  r74002 | marc-andre.lemburg | 2009-07-13 16:23:49 -0400 (Mon, 13 Jul 2009) | 6 lines
  Use a new global DEV_NULL instead of hard-coding /dev/null into the system
  command helper functions.
  See #6479 for some motivation.
........
  r74005 | marc-andre.lemburg | 2009-07-13 17:28:33 -0400 (Mon, 13 Jul 2009) | 6 lines
  Use a different VER command output parser to address the localization
  issues mentioned in #3410.
  Prepare for Windows 7 (still commented out).
........
  r74007 | michael.foord | 2009-07-14 13:58:12 -0400 (Tue, 14 Jul 2009) | 1 line
  Move TestRunner initialisation into unittest.TestProgram.runTests. Fixes issue 6418.
........
  r74008 | benjamin.peterson | 2009-07-14 20:46:42 -0400 (Tue, 14 Jul 2009) | 1 line
  update year
........
  r74011 | ezio.melotti | 2009-07-15 13:07:04 -0400 (Wed, 15 Jul 2009) | 1 line
  methods' names pep8ification
........
  r74019 | amaury.forgeotdarc | 2009-07-15 17:29:27 -0400 (Wed, 15 Jul 2009) | 2 lines
  #6076 Add a title to the IDLE Preferences window.
........
  r74020 | georg.brandl | 2009-07-16 03:18:07 -0400 (Thu, 16 Jul 2009) | 1 line
  #5910: fix kqueue for calls with more than one event.
........
  r74021 | georg.brandl | 2009-07-16 03:33:04 -0400 (Thu, 16 Jul 2009) | 1 line
  #6486: start with built in functions rather than "built in objects".
........
  r74022 | georg.brandl | 2009-07-16 03:38:35 -0400 (Thu, 16 Jul 2009) | 1 line
  #6481: fix typo in os.system() replacement.
........
  r74023 | jesse.noller | 2009-07-16 10:23:04 -0400 (Thu, 16 Jul 2009) | 1 line
  Issue 6433: multiprocessing.pool.map hangs on empty list
........
											
										 
											2009-07-17 09:18:18 +00:00
										 |  |  |         if len(iterable) == 0: | 
					
						
							|  |  |  |             chunksize = 0 | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  |         task_batches = Pool._get_tasks(func, iterable, chunksize) | 
					
						
							| 
									
										
										
										
											2019-02-11 17:29:00 +00:00
										 |  |  |         result = MapResult(self, chunksize, len(iterable), callback, | 
					
						
							| 
									
										
										
										
											2010-11-09 20:55:52 +00:00
										 |  |  |                            error_callback=error_callback) | 
					
						
							| 
									
										
										
										
											2017-03-29 11:58:54 +08:00
										 |  |  |         self._taskqueue.put( | 
					
						
							|  |  |  |             ( | 
					
						
							|  |  |  |                 self._guarded_task_generation(result._job, | 
					
						
							|  |  |  |                                               mapper, | 
					
						
							|  |  |  |                                               task_batches), | 
					
						
							|  |  |  |                 None | 
					
						
							|  |  |  |             ) | 
					
						
							|  |  |  |         ) | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         return result | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-01-27 03:36:01 +00:00
										 |  |  |     @staticmethod | 
					
						
							| 
									
										
										
										
											2019-03-16 22:34:24 +00:00
										 |  |  |     def _wait_for_updates(sentinels, change_notifier, timeout=None): | 
					
						
							|  |  |  |         wait(sentinels, timeout=timeout) | 
					
						
							|  |  |  |         while not change_notifier.empty(): | 
					
						
							|  |  |  |             change_notifier.get() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     @classmethod | 
					
						
							|  |  |  |     def _handle_workers(cls, cache, taskqueue, ctx, Process, processes, | 
					
						
							|  |  |  |                         pool, inqueue, outqueue, initializer, initargs, | 
					
						
							|  |  |  |                         maxtasksperchild, wrap_exception, sentinels, | 
					
						
							|  |  |  |                         change_notifier): | 
					
						
							| 
									
										
										
										
											2011-10-24 18:45:29 +02:00
										 |  |  |         thread = threading.current_thread() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |         # Keep maintaining workers until the cache gets drained, unless the pool | 
					
						
							|  |  |  |         # is terminated. | 
					
						
							| 
									
										
										
										
											2019-02-11 17:29:00 +00:00
										 |  |  |         while thread._state == RUN or (cache and thread._state != TERMINATE): | 
					
						
							| 
									
										
										
										
											2019-03-16 22:34:24 +00:00
										 |  |  |             cls._maintain_pool(ctx, Process, processes, pool, inqueue, | 
					
						
							|  |  |  |                                outqueue, initializer, initargs, | 
					
						
							|  |  |  |                                maxtasksperchild, wrap_exception) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |             current_sentinels = [*cls._get_worker_sentinels(pool), *sentinels] | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |             cls._wait_for_updates(current_sentinels, change_notifier) | 
					
						
							| 
									
										
										
										
											2011-04-11 00:18:59 +02:00
										 |  |  |         # send sentinel to stop workers | 
					
						
							| 
									
										
										
										
											2019-02-11 17:29:00 +00:00
										 |  |  |         taskqueue.put(None) | 
					
						
							| 
									
										
										
										
											2013-08-14 15:35:41 +01:00
										 |  |  |         util.debug('worker handler exiting') | 
					
						
							| 
									
										
										
										
											2010-01-27 03:36:01 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |     @staticmethod | 
					
						
							| 
									
										
										
										
											2013-10-28 23:11:58 +00:00
										 |  |  |     def _handle_tasks(taskqueue, put, outqueue, pool, cache): | 
					
						
							| 
									
										
										
										
											2008-06-11 19:14:14 +00:00
										 |  |  |         thread = threading.current_thread() | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  |         for taskseq, set_length in iter(taskqueue.get, None): | 
					
						
							| 
									
										
										
										
											2015-03-13 08:25:26 +02:00
										 |  |  |             task = None | 
					
						
							|  |  |  |             try: | 
					
						
							| 
									
										
										
										
											2017-03-29 11:58:54 +08:00
										 |  |  |                 # iterating taskseq cannot fail | 
					
						
							|  |  |  |                 for task in taskseq: | 
					
						
							| 
									
										
										
										
											2018-12-14 11:13:18 +01:00
										 |  |  |                     if thread._state != RUN: | 
					
						
							| 
									
										
										
										
											2015-03-13 08:25:26 +02:00
										 |  |  |                         util.debug('task handler found thread._state != RUN') | 
					
						
							|  |  |  |                         break | 
					
						
							| 
									
										
										
										
											2013-10-28 23:11:58 +00:00
										 |  |  |                     try: | 
					
						
							| 
									
										
										
										
											2015-03-13 08:25:26 +02:00
										 |  |  |                         put(task) | 
					
						
							|  |  |  |                     except Exception as e: | 
					
						
							| 
									
										
										
										
											2017-03-29 11:58:54 +08:00
										 |  |  |                         job, idx = task[:2] | 
					
						
							| 
									
										
										
										
											2015-03-13 08:25:26 +02:00
										 |  |  |                         try: | 
					
						
							| 
									
										
										
										
											2017-03-29 11:58:54 +08:00
										 |  |  |                             cache[job]._set(idx, (False, e)) | 
					
						
							| 
									
										
										
										
											2015-03-13 08:25:26 +02:00
										 |  |  |                         except KeyError: | 
					
						
							|  |  |  |                             pass | 
					
						
							|  |  |  |                 else: | 
					
						
							|  |  |  |                     if set_length: | 
					
						
							|  |  |  |                         util.debug('doing set_length()') | 
					
						
							| 
									
										
										
										
											2017-03-29 11:58:54 +08:00
										 |  |  |                         idx = task[1] if task else -1 | 
					
						
							|  |  |  |                         set_length(idx + 1) | 
					
						
							| 
									
										
										
										
											2015-03-13 08:25:26 +02:00
										 |  |  |                     continue | 
					
						
							|  |  |  |                 break | 
					
						
							| 
									
										
										
										
											2017-03-24 13:52:11 +01:00
										 |  |  |             finally: | 
					
						
							|  |  |  |                 task = taskseq = job = None | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         else: | 
					
						
							| 
									
										
										
										
											2013-08-14 15:35:41 +01:00
										 |  |  |             util.debug('task handler got sentinel') | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  |         try: | 
					
						
							|  |  |  |             # tell result handler to finish when cache is empty | 
					
						
							| 
									
										
										
										
											2013-08-14 15:35:41 +01:00
										 |  |  |             util.debug('task handler sending sentinel to result handler') | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |             outqueue.put(None) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |             # tell workers there is no more work | 
					
						
							| 
									
										
										
										
											2013-08-14 15:35:41 +01:00
										 |  |  |             util.debug('task handler sending sentinel to workers') | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |             for p in pool: | 
					
						
							|  |  |  |                 put(None) | 
					
						
							| 
									
										
										
										
											2012-12-25 16:47:37 +02:00
										 |  |  |         except OSError: | 
					
						
							| 
									
										
										
										
											2013-08-14 15:35:41 +01:00
										 |  |  |             util.debug('task handler got OSError when sending sentinels') | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-08-14 15:35:41 +01:00
										 |  |  |         util.debug('task handler exiting') | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  |     @staticmethod | 
					
						
							|  |  |  |     def _handle_results(outqueue, get, cache): | 
					
						
							| 
									
										
										
										
											2008-06-11 19:14:14 +00:00
										 |  |  |         thread = threading.current_thread() | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  |         while 1: | 
					
						
							|  |  |  |             try: | 
					
						
							|  |  |  |                 task = get() | 
					
						
							| 
									
										
										
										
											2012-12-25 16:47:37 +02:00
										 |  |  |             except (OSError, EOFError): | 
					
						
							| 
									
										
										
										
											2013-08-14 15:35:41 +01:00
										 |  |  |                 util.debug('result handler got EOFError/OSError -- exiting') | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |                 return | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-12-16 23:40:49 +01:00
										 |  |  |             if thread._state != RUN: | 
					
						
							| 
									
										
										
										
											2017-08-29 17:52:18 -05:00
										 |  |  |                 assert thread._state == TERMINATE, "Thread not in TERMINATE" | 
					
						
							| 
									
										
										
										
											2013-08-14 15:35:41 +01:00
										 |  |  |                 util.debug('result handler found thread._state=TERMINATE') | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |                 break | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |             if task is None: | 
					
						
							| 
									
										
										
										
											2013-08-14 15:35:41 +01:00
										 |  |  |                 util.debug('result handler got sentinel') | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |                 break | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |             job, i, obj = task | 
					
						
							|  |  |  |             try: | 
					
						
							|  |  |  |                 cache[job]._set(i, obj) | 
					
						
							|  |  |  |             except KeyError: | 
					
						
							|  |  |  |                 pass | 
					
						
							| 
									
										
										
										
											2017-03-24 13:52:11 +01:00
										 |  |  |             task = job = obj = None | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  |         while cache and thread._state != TERMINATE: | 
					
						
							|  |  |  |             try: | 
					
						
							|  |  |  |                 task = get() | 
					
						
							| 
									
										
										
										
											2012-12-25 16:47:37 +02:00
										 |  |  |             except (OSError, EOFError): | 
					
						
							| 
									
										
										
										
											2013-08-14 15:35:41 +01:00
										 |  |  |                 util.debug('result handler got EOFError/OSError -- exiting') | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |                 return | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |             if task is None: | 
					
						
							| 
									
										
										
										
											2013-08-14 15:35:41 +01:00
										 |  |  |                 util.debug('result handler ignoring extra sentinel') | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |                 continue | 
					
						
							|  |  |  |             job, i, obj = task | 
					
						
							|  |  |  |             try: | 
					
						
							|  |  |  |                 cache[job]._set(i, obj) | 
					
						
							|  |  |  |             except KeyError: | 
					
						
							|  |  |  |                 pass | 
					
						
							| 
									
										
										
										
											2017-03-24 13:52:11 +01:00
										 |  |  |             task = job = obj = None | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  |         if hasattr(outqueue, '_reader'): | 
					
						
							| 
									
										
										
										
											2013-08-14 15:35:41 +01:00
										 |  |  |             util.debug('ensuring that outqueue is not full') | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |             # If we don't make room available in outqueue then | 
					
						
							|  |  |  |             # attempts to add the sentinel (None) to outqueue may | 
					
						
							|  |  |  |             # block.  There is guaranteed to be no more than 2 sentinels. | 
					
						
							|  |  |  |             try: | 
					
						
							|  |  |  |                 for i in range(10): | 
					
						
							|  |  |  |                     if not outqueue._reader.poll(): | 
					
						
							|  |  |  |                         break | 
					
						
							|  |  |  |                     get() | 
					
						
							| 
									
										
										
										
											2012-12-25 16:47:37 +02:00
										 |  |  |             except (OSError, EOFError): | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |                 pass | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-08-14 15:35:41 +01:00
										 |  |  |         util.debug('result handler exiting: len(cache)=%s, thread._state=%s', | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |               len(cache), thread._state) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     @staticmethod | 
					
						
							|  |  |  |     def _get_tasks(func, it, size): | 
					
						
							|  |  |  |         it = iter(it) | 
					
						
							|  |  |  |         while 1: | 
					
						
							|  |  |  |             x = tuple(itertools.islice(it, size)) | 
					
						
							|  |  |  |             if not x: | 
					
						
							|  |  |  |                 return | 
					
						
							|  |  |  |             yield (func, x) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     def __reduce__(self): | 
					
						
							|  |  |  |         raise NotImplementedError( | 
					
						
							|  |  |  |               'pool objects cannot be passed between processes or pickled' | 
					
						
							|  |  |  |               ) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     def close(self): | 
					
						
							| 
									
										
										
										
											2013-08-14 15:35:41 +01:00
										 |  |  |         util.debug('closing pool') | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         if self._state == RUN: | 
					
						
							|  |  |  |             self._state = CLOSE | 
					
						
							| 
									
										
										
										
											2010-01-27 03:36:01 +00:00
										 |  |  |             self._worker_handler._state = CLOSE | 
					
						
							| 
									
										
										
										
											2019-03-16 22:34:24 +00:00
										 |  |  |             self._change_notifier.put(None) | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  |     def terminate(self): | 
					
						
							| 
									
										
										
										
											2013-08-14 15:35:41 +01:00
										 |  |  |         util.debug('terminating pool') | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         self._state = TERMINATE | 
					
						
							|  |  |  |         self._terminate() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     def join(self): | 
					
						
							| 
									
										
										
										
											2013-08-14 15:35:41 +01:00
										 |  |  |         util.debug('joining pool') | 
					
						
							| 
									
										
										
										
											2017-08-29 17:52:18 -05:00
										 |  |  |         if self._state == RUN: | 
					
						
							|  |  |  |             raise ValueError("Pool is still running") | 
					
						
							|  |  |  |         elif self._state not in (CLOSE, TERMINATE): | 
					
						
							|  |  |  |             raise ValueError("In unknown state") | 
					
						
							| 
									
										
										
										
											2010-01-27 03:36:01 +00:00
										 |  |  |         self._worker_handler.join() | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         self._task_handler.join() | 
					
						
							|  |  |  |         self._result_handler.join() | 
					
						
							|  |  |  |         for p in self._pool: | 
					
						
							|  |  |  |             p.join() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     @staticmethod | 
					
						
							|  |  |  |     def _help_stuff_finish(inqueue, task_handler, size): | 
					
						
							|  |  |  |         # task_handler may be blocked trying to put items on inqueue | 
					
						
							| 
									
										
										
										
											2013-08-14 15:35:41 +01:00
										 |  |  |         util.debug('removing tasks from inqueue until task handler finished') | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         inqueue._rlock.acquire() | 
					
						
							| 
									
										
										
										
											2008-06-11 19:14:14 +00:00
										 |  |  |         while task_handler.is_alive() and inqueue._reader.poll(): | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |             inqueue._reader.recv() | 
					
						
							|  |  |  |             time.sleep(0) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     @classmethod | 
					
						
							| 
									
										
										
										
											2019-03-16 22:34:24 +00:00
										 |  |  |     def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool, change_notifier, | 
					
						
							| 
									
										
										
										
											2010-01-27 03:36:01 +00:00
										 |  |  |                         worker_handler, task_handler, result_handler, cache): | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         # this is guaranteed to only be called once | 
					
						
							| 
									
										
										
										
											2013-08-14 15:35:41 +01:00
										 |  |  |         util.debug('finalizing pool') | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-03-15 22:45:56 +03:00
										 |  |  |         # Notify that the worker_handler state has been changed so the | 
					
						
							|  |  |  |         # _handle_workers loop can be unblocked (and exited) in order to | 
					
						
							|  |  |  |         # send the finalization sentinel all the workers. | 
					
						
							| 
									
										
										
										
											2010-01-27 03:36:01 +00:00
										 |  |  |         worker_handler._state = TERMINATE | 
					
						
							| 
									
										
										
										
											2020-03-15 22:45:56 +03:00
										 |  |  |         change_notifier.put(None) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         task_handler._state = TERMINATE | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-08-14 15:35:41 +01:00
										 |  |  |         util.debug('helping task handler/workers to finish') | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         cls._help_stuff_finish(inqueue, task_handler, len(pool)) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-08-29 17:52:18 -05:00
										 |  |  |         if (not result_handler.is_alive()) and (len(cache) != 0): | 
					
						
							|  |  |  |             raise AssertionError( | 
					
						
							| 
									
										
										
										
											2022-11-30 16:27:28 +01:00
										 |  |  |                 "Cannot have cache with result_handler not alive") | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  |         result_handler._state = TERMINATE | 
					
						
							| 
									
										
										
										
											2019-03-16 22:34:24 +00:00
										 |  |  |         change_notifier.put(None) | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         outqueue.put(None)                  # sentinel | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-04-11 00:18:59 +02:00
										 |  |  |         # We must wait for the worker handler to exit before terminating | 
					
						
							|  |  |  |         # workers because we don't want workers to be restarted behind our back. | 
					
						
							| 
									
										
										
										
											2013-08-14 15:35:41 +01:00
										 |  |  |         util.debug('joining worker handler') | 
					
						
							| 
									
										
										
										
											2012-06-18 15:54:57 +01:00
										 |  |  |         if threading.current_thread() is not worker_handler: | 
					
						
							|  |  |  |             worker_handler.join() | 
					
						
							| 
									
										
										
										
											2011-04-11 00:18:59 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-01-27 03:36:01 +00:00
										 |  |  |         # Terminate workers which haven't already finished. | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         if pool and hasattr(pool[0], 'terminate'): | 
					
						
							| 
									
										
										
										
											2013-08-14 15:35:41 +01:00
										 |  |  |             util.debug('terminating workers') | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |             for p in pool: | 
					
						
							| 
									
										
										
										
											2010-01-27 03:36:01 +00:00
										 |  |  |                 if p.exitcode is None: | 
					
						
							|  |  |  |                     p.terminate() | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-08-14 15:35:41 +01:00
										 |  |  |         util.debug('joining task handler') | 
					
						
							| 
									
										
										
										
											2012-06-18 15:54:57 +01:00
										 |  |  |         if threading.current_thread() is not task_handler: | 
					
						
							|  |  |  |             task_handler.join() | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-08-14 15:35:41 +01:00
										 |  |  |         util.debug('joining result handler') | 
					
						
							| 
									
										
										
										
											2012-06-18 15:54:57 +01:00
										 |  |  |         if threading.current_thread() is not result_handler: | 
					
						
							|  |  |  |             result_handler.join() | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  |         if pool and hasattr(pool[0], 'terminate'): | 
					
						
							| 
									
										
										
										
											2013-08-14 15:35:41 +01:00
										 |  |  |             util.debug('joining pool workers') | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |             for p in pool: | 
					
						
							| 
									
										
										
										
											2010-03-08 13:32:17 +00:00
										 |  |  |                 if p.is_alive(): | 
					
						
							| 
									
										
										
										
											2010-01-27 03:36:01 +00:00
										 |  |  |                     # worker has not yet exited | 
					
						
							| 
									
										
										
										
											2013-08-14 15:35:41 +01:00
										 |  |  |                     util.debug('cleaning up worker %d' % p.pid) | 
					
						
							| 
									
										
										
										
											2010-03-08 13:32:17 +00:00
										 |  |  |                     p.join() | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-06-18 17:47:52 +01:00
										 |  |  |     def __enter__(self): | 
					
						
							| 
									
										
										
										
											2018-12-13 02:15:30 +01:00
										 |  |  |         self._check_running() | 
					
						
							| 
									
										
										
										
											2012-06-18 17:47:52 +01:00
										 |  |  |         return self | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     def __exit__(self, exc_type, exc_val, exc_tb): | 
					
						
							|  |  |  |         self.terminate() | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | # | 
					
						
							|  |  |  | # Class whose instances are returned by `Pool.apply_async()` | 
					
						
							|  |  |  | # | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | class ApplyResult(object): | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-02-11 17:29:00 +00:00
										 |  |  |     def __init__(self, pool, callback, error_callback): | 
					
						
							|  |  |  |         self._pool = pool | 
					
						
							| 
									
										
										
										
											2012-05-25 13:26:53 +01:00
										 |  |  |         self._event = threading.Event() | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         self._job = next(job_counter) | 
					
						
							| 
									
										
										
										
											2019-02-11 17:29:00 +00:00
										 |  |  |         self._cache = pool._cache | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         self._callback = callback | 
					
						
							| 
									
										
										
										
											2010-11-09 20:55:52 +00:00
										 |  |  |         self._error_callback = error_callback | 
					
						
							| 
									
										
										
										
											2019-02-11 17:29:00 +00:00
										 |  |  |         self._cache[self._job] = self | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  |     def ready(self): | 
					
						
							| 
									
										
										
										
											2012-05-25 13:26:53 +01:00
										 |  |  |         return self._event.is_set() | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  |     def successful(self): | 
					
						
							| 
									
										
										
										
											2017-08-29 17:52:18 -05:00
										 |  |  |         if not self.ready(): | 
					
						
							|  |  |  |             raise ValueError("{0!r} not ready".format(self)) | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         return self._success | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     def wait(self, timeout=None): | 
					
						
							| 
									
										
										
										
											2012-05-25 13:26:53 +01:00
										 |  |  |         self._event.wait(timeout) | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  |     def get(self, timeout=None): | 
					
						
							|  |  |  |         self.wait(timeout) | 
					
						
							| 
									
										
										
										
											2012-05-25 13:26:53 +01:00
										 |  |  |         if not self.ready(): | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |             raise TimeoutError | 
					
						
							|  |  |  |         if self._success: | 
					
						
							|  |  |  |             return self._value | 
					
						
							|  |  |  |         else: | 
					
						
							|  |  |  |             raise self._value | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     def _set(self, i, obj): | 
					
						
							|  |  |  |         self._success, self._value = obj | 
					
						
							|  |  |  |         if self._callback and self._success: | 
					
						
							|  |  |  |             self._callback(self._value) | 
					
						
							| 
									
										
										
										
											2010-11-09 20:55:52 +00:00
										 |  |  |         if self._error_callback and not self._success: | 
					
						
							|  |  |  |             self._error_callback(self._value) | 
					
						
							| 
									
										
										
										
											2012-05-25 13:26:53 +01:00
										 |  |  |         self._event.set() | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         del self._cache[self._job] | 
					
						
							| 
									
										
										
										
											2019-02-11 17:29:00 +00:00
										 |  |  |         self._pool = None | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-04-10 17:46:36 +03:00
										 |  |  |     __class_getitem__ = classmethod(types.GenericAlias) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-05-06 12:10:04 +01:00
										 |  |  | AsyncResult = ApplyResult       # create alias -- see #17805 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | # | 
					
						
							|  |  |  | # Class whose instances are returned by `Pool.map_async()` | 
					
						
							|  |  |  | # | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | class MapResult(ApplyResult): | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-02-11 17:29:00 +00:00
										 |  |  |     def __init__(self, pool, chunksize, length, callback, error_callback): | 
					
						
							|  |  |  |         ApplyResult.__init__(self, pool, callback, | 
					
						
							| 
									
										
										
										
											2010-11-09 20:55:52 +00:00
										 |  |  |                              error_callback=error_callback) | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         self._success = True | 
					
						
							|  |  |  |         self._value = [None] * length | 
					
						
							|  |  |  |         self._chunksize = chunksize | 
					
						
							|  |  |  |         if chunksize <= 0: | 
					
						
							|  |  |  |             self._number_left = 0 | 
					
						
							| 
									
										
										
										
											2012-05-25 13:26:53 +01:00
										 |  |  |             self._event.set() | 
					
						
							| 
									
										
										
										
											2019-02-11 17:29:00 +00:00
										 |  |  |             del self._cache[self._job] | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         else: | 
					
						
							|  |  |  |             self._number_left = length//chunksize + bool(length % chunksize) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     def _set(self, i, success_result): | 
					
						
							| 
									
										
										
										
											2016-02-10 22:58:18 +00:00
										 |  |  |         self._number_left -= 1 | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         success, result = success_result | 
					
						
							| 
									
										
										
										
											2016-02-10 22:58:18 +00:00
										 |  |  |         if success and self._success: | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |             self._value[i*self._chunksize:(i+1)*self._chunksize] = result | 
					
						
							|  |  |  |             if self._number_left == 0: | 
					
						
							|  |  |  |                 if self._callback: | 
					
						
							|  |  |  |                     self._callback(self._value) | 
					
						
							|  |  |  |                 del self._cache[self._job] | 
					
						
							| 
									
										
										
										
											2012-05-25 13:26:53 +01:00
										 |  |  |                 self._event.set() | 
					
						
							| 
									
										
										
										
											2019-02-11 17:29:00 +00:00
										 |  |  |                 self._pool = None | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         else: | 
					
						
							| 
									
										
										
										
											2016-02-10 22:58:18 +00:00
										 |  |  |             if not success and self._success: | 
					
						
							|  |  |  |                 # only store first exception | 
					
						
							|  |  |  |                 self._success = False | 
					
						
							|  |  |  |                 self._value = result | 
					
						
							|  |  |  |             if self._number_left == 0: | 
					
						
							|  |  |  |                 # only consider the result ready once all jobs are done | 
					
						
							|  |  |  |                 if self._error_callback: | 
					
						
							|  |  |  |                     self._error_callback(self._value) | 
					
						
							|  |  |  |                 del self._cache[self._job] | 
					
						
							|  |  |  |                 self._event.set() | 
					
						
							| 
									
										
										
										
											2019-02-11 17:29:00 +00:00
										 |  |  |                 self._pool = None | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  | # | 
					
						
							|  |  |  | # Class whose instances are returned by `Pool.imap()` | 
					
						
							|  |  |  | # | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | class IMapIterator(object): | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-02-11 17:29:00 +00:00
										 |  |  |     def __init__(self, pool): | 
					
						
							|  |  |  |         self._pool = pool | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         self._cond = threading.Condition(threading.Lock()) | 
					
						
							|  |  |  |         self._job = next(job_counter) | 
					
						
							| 
									
										
										
										
											2019-02-11 17:29:00 +00:00
										 |  |  |         self._cache = pool._cache | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         self._items = collections.deque() | 
					
						
							|  |  |  |         self._index = 0 | 
					
						
							|  |  |  |         self._length = None | 
					
						
							|  |  |  |         self._unsorted = {} | 
					
						
							| 
									
										
										
										
											2019-02-11 17:29:00 +00:00
										 |  |  |         self._cache[self._job] = self | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  |     def __iter__(self): | 
					
						
							|  |  |  |         return self | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     def next(self, timeout=None): | 
					
						
							| 
									
										
										
										
											2014-05-25 14:12:12 +01:00
										 |  |  |         with self._cond: | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |             try: | 
					
						
							|  |  |  |                 item = self._items.popleft() | 
					
						
							|  |  |  |             except IndexError: | 
					
						
							|  |  |  |                 if self._index == self._length: | 
					
						
							| 
									
										
										
										
											2019-02-11 17:29:00 +00:00
										 |  |  |                     self._pool = None | 
					
						
							| 
									
										
										
										
											2017-04-05 09:37:24 +03:00
										 |  |  |                     raise StopIteration from None | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |                 self._cond.wait(timeout) | 
					
						
							|  |  |  |                 try: | 
					
						
							|  |  |  |                     item = self._items.popleft() | 
					
						
							|  |  |  |                 except IndexError: | 
					
						
							|  |  |  |                     if self._index == self._length: | 
					
						
							| 
									
										
										
										
											2019-02-11 17:29:00 +00:00
										 |  |  |                         self._pool = None | 
					
						
							| 
									
										
										
										
											2017-04-05 09:37:24 +03:00
										 |  |  |                         raise StopIteration from None | 
					
						
							|  |  |  |                     raise TimeoutError from None | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  |         success, value = item | 
					
						
							|  |  |  |         if success: | 
					
						
							|  |  |  |             return value | 
					
						
							|  |  |  |         raise value | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     __next__ = next                    # XXX | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     def _set(self, i, obj): | 
					
						
							| 
									
										
										
										
											2014-05-25 14:12:12 +01:00
										 |  |  |         with self._cond: | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |             if self._index == i: | 
					
						
							|  |  |  |                 self._items.append(obj) | 
					
						
							|  |  |  |                 self._index += 1 | 
					
						
							|  |  |  |                 while self._index in self._unsorted: | 
					
						
							|  |  |  |                     obj = self._unsorted.pop(self._index) | 
					
						
							|  |  |  |                     self._items.append(obj) | 
					
						
							|  |  |  |                     self._index += 1 | 
					
						
							|  |  |  |                 self._cond.notify() | 
					
						
							|  |  |  |             else: | 
					
						
							|  |  |  |                 self._unsorted[i] = obj | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |             if self._index == self._length: | 
					
						
							|  |  |  |                 del self._cache[self._job] | 
					
						
							| 
									
										
										
										
											2019-02-11 17:29:00 +00:00
										 |  |  |                 self._pool = None | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  |     def _set_length(self, length): | 
					
						
							| 
									
										
										
										
											2014-05-25 14:12:12 +01:00
										 |  |  |         with self._cond: | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |             self._length = length | 
					
						
							|  |  |  |             if self._index == self._length: | 
					
						
							|  |  |  |                 self._cond.notify() | 
					
						
							|  |  |  |                 del self._cache[self._job] | 
					
						
							| 
									
										
										
										
											2019-02-11 17:29:00 +00:00
										 |  |  |                 self._pool = None | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  | # | 
					
						
							|  |  |  | # Class whose instances are returned by `Pool.imap_unordered()` | 
					
						
							|  |  |  | # | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | class IMapUnorderedIterator(IMapIterator): | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     def _set(self, i, obj): | 
					
						
							| 
									
										
										
										
											2014-05-25 14:12:12 +01:00
										 |  |  |         with self._cond: | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |             self._items.append(obj) | 
					
						
							|  |  |  |             self._index += 1 | 
					
						
							|  |  |  |             self._cond.notify() | 
					
						
							|  |  |  |             if self._index == self._length: | 
					
						
							|  |  |  |                 del self._cache[self._job] | 
					
						
							| 
									
										
										
										
											2019-02-11 17:29:00 +00:00
										 |  |  |                 self._pool = None | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  | # | 
					
						
							|  |  |  | # | 
					
						
							|  |  |  | # | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | class ThreadPool(Pool): | 
					
						
							| 
									
										
										
										
											2014-03-23 12:30:54 +00:00
										 |  |  |     _wrap_exception = False | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-08-14 15:35:41 +01:00
										 |  |  |     @staticmethod | 
					
						
							| 
									
										
										
										
											2019-02-11 17:29:00 +00:00
										 |  |  |     def Process(ctx, *args, **kwds): | 
					
						
							| 
									
										
										
										
											2013-08-14 15:35:41 +01:00
										 |  |  |         from .dummy import Process | 
					
						
							|  |  |  |         return Process(*args, **kwds) | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  |     def __init__(self, processes=None, initializer=None, initargs=()): | 
					
						
							|  |  |  |         Pool.__init__(self, processes, initializer, initargs) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     def _setup_queues(self): | 
					
						
							| 
									
										
										
										
											2018-01-18 10:38:03 +01:00
										 |  |  |         self._inqueue = queue.SimpleQueue() | 
					
						
							|  |  |  |         self._outqueue = queue.SimpleQueue() | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         self._quick_put = self._inqueue.put | 
					
						
							|  |  |  |         self._quick_get = self._outqueue.get | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-03-16 22:34:24 +00:00
										 |  |  |     def _get_sentinels(self): | 
					
						
							|  |  |  |         return [self._change_notifier._reader] | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     @staticmethod | 
					
						
							|  |  |  |     def _get_worker_sentinels(workers): | 
					
						
							|  |  |  |         return [] | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |     @staticmethod | 
					
						
							|  |  |  |     def _help_stuff_finish(inqueue, task_handler, size): | 
					
						
							| 
									
										
										
										
											2018-01-18 10:38:03 +01:00
										 |  |  |         # drain inqueue, and put sentinels at its head to make workers finish | 
					
						
							|  |  |  |         try: | 
					
						
							|  |  |  |             while True: | 
					
						
							|  |  |  |                 inqueue.get(block=False) | 
					
						
							|  |  |  |         except queue.Empty: | 
					
						
							|  |  |  |             pass | 
					
						
							|  |  |  |         for i in range(size): | 
					
						
							|  |  |  |             inqueue.put(None) | 
					
						
							| 
									
										
										
										
											2019-03-16 22:34:24 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  |     def _wait_for_updates(self, sentinels, change_notifier, timeout): | 
					
						
							|  |  |  |         time.sleep(timeout) |