| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | # | 
					
						
							|  |  |  | # Module providing the `Pool` class for managing a process pool | 
					
						
							|  |  |  | # | 
					
						
							|  |  |  | # multiprocessing/pool.py | 
					
						
							|  |  |  | # | 
					
						
							| 
									
										
										
										
											2010-12-14 01:38:16 +00:00
										 |  |  | # Copyright (c) 2006-2008, R Oudkerk | 
					
						
							| 
									
										
										
										
											2012-04-30 12:13:55 +01:00
										 |  |  | # Licensed to PSF under a Contributor Agreement. | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | # | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | __all__ = ['Pool'] | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | # | 
					
						
							|  |  |  | # Imports | 
					
						
							|  |  |  | # | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | import threading | 
					
						
							|  |  |  | import queue | 
					
						
							|  |  |  | import itertools | 
					
						
							|  |  |  | import collections | 
					
						
							|  |  |  | import time | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | from multiprocessing import Process, cpu_count, TimeoutError | 
					
						
							|  |  |  | from multiprocessing.util import Finalize, debug | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | # | 
					
						
							|  |  |  | # Constants representing the state of a pool | 
					
						
							|  |  |  | # | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | RUN = 0 | 
					
						
							|  |  |  | CLOSE = 1 | 
					
						
							|  |  |  | TERMINATE = 2 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | # | 
					
						
							|  |  |  | # Miscellaneous | 
					
						
							|  |  |  | # | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | job_counter = itertools.count() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | def mapstar(args): | 
					
						
							|  |  |  |     return list(map(*args)) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-12-21 11:03:24 +01:00
										 |  |  | def starmapstar(args): | 
					
						
							|  |  |  |     return list(itertools.starmap(args[0], args[1])) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | # | 
					
						
							|  |  |  | # Code run by worker processes | 
					
						
							|  |  |  | # | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-11-09 20:55:52 +00:00
										 |  |  | class MaybeEncodingError(Exception): | 
					
						
							|  |  |  |     """Wraps possible unpickleable errors, so they can be
 | 
					
						
							|  |  |  |     safely sent through the socket."""
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     def __init__(self, exc, value): | 
					
						
							|  |  |  |         self.exc = repr(exc) | 
					
						
							|  |  |  |         self.value = repr(value) | 
					
						
							|  |  |  |         super(MaybeEncodingError, self).__init__(self.exc, self.value) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     def __str__(self): | 
					
						
							|  |  |  |         return "Error sending result: '%s'. Reason: '%s'" % (self.value, | 
					
						
							|  |  |  |                                                              self.exc) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     def __repr__(self): | 
					
						
							|  |  |  |         return "<MaybeEncodingError: %s>" % str(self) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-01-27 03:36:01 +00:00
										 |  |  | def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None): | 
					
						
							|  |  |  |     assert maxtasks is None or (type(maxtasks) == int and maxtasks > 0) | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |     put = outqueue.put | 
					
						
							|  |  |  |     get = inqueue.get | 
					
						
							|  |  |  |     if hasattr(inqueue, '_writer'): | 
					
						
							|  |  |  |         inqueue._writer.close() | 
					
						
							|  |  |  |         outqueue._reader.close() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     if initializer is not None: | 
					
						
							|  |  |  |         initializer(*initargs) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-01-27 03:36:01 +00:00
										 |  |  |     completed = 0 | 
					
						
							|  |  |  |     while maxtasks is None or (maxtasks and completed < maxtasks): | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         try: | 
					
						
							|  |  |  |             task = get() | 
					
						
							|  |  |  |         except (EOFError, IOError): | 
					
						
							|  |  |  |             debug('worker got EOFError or IOError -- exiting') | 
					
						
							|  |  |  |             break | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |         if task is None: | 
					
						
							|  |  |  |             debug('worker got sentinel -- exiting') | 
					
						
							|  |  |  |             break | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |         job, i, func, args, kwds = task | 
					
						
							|  |  |  |         try: | 
					
						
							|  |  |  |             result = (True, func(*args, **kwds)) | 
					
						
							|  |  |  |         except Exception as e: | 
					
						
							|  |  |  |             result = (False, e) | 
					
						
							| 
									
										
										
										
											2010-11-09 20:55:52 +00:00
										 |  |  |         try: | 
					
						
							|  |  |  |             put((job, i, result)) | 
					
						
							|  |  |  |         except Exception as e: | 
					
						
							|  |  |  |             wrapped = MaybeEncodingError(e, result[1]) | 
					
						
							|  |  |  |             debug("Possible encoding error while sending result: %s" % ( | 
					
						
							|  |  |  |                 wrapped)) | 
					
						
							|  |  |  |             put((job, i, (False, wrapped))) | 
					
						
							| 
									
										
										
										
											2010-01-27 03:36:01 +00:00
										 |  |  |         completed += 1 | 
					
						
							|  |  |  |     debug('worker exiting after %d tasks' % completed) | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  | # | 
					
						
							|  |  |  | # Class representing a process pool | 
					
						
							|  |  |  | # | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | class Pool(object): | 
					
						
							|  |  |  |     '''
 | 
					
						
							| 
									
										
										
										
											2008-11-22 08:51:39 +00:00
										 |  |  |     Class which supports an async version of applying functions to arguments. | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |     '''
 | 
					
						
							|  |  |  |     Process = Process | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-01-27 03:36:01 +00:00
										 |  |  |     def __init__(self, processes=None, initializer=None, initargs=(), | 
					
						
							|  |  |  |                  maxtasksperchild=None): | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         self._setup_queues() | 
					
						
							|  |  |  |         self._taskqueue = queue.Queue() | 
					
						
							|  |  |  |         self._cache = {} | 
					
						
							|  |  |  |         self._state = RUN | 
					
						
							| 
									
										
										
										
											2010-01-27 03:36:01 +00:00
										 |  |  |         self._maxtasksperchild = maxtasksperchild | 
					
						
							|  |  |  |         self._initializer = initializer | 
					
						
							|  |  |  |         self._initargs = initargs | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  |         if processes is None: | 
					
						
							|  |  |  |             try: | 
					
						
							|  |  |  |                 processes = cpu_count() | 
					
						
							|  |  |  |             except NotImplementedError: | 
					
						
							|  |  |  |                 processes = 1 | 
					
						
							| 
									
										
										
										
											2011-06-20 17:53:35 +02:00
										 |  |  |         if processes < 1: | 
					
						
							|  |  |  |             raise ValueError("Number of processes must be at least 1") | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-10-28 14:45:05 +02:00
										 |  |  |         if initializer is not None and not callable(initializer): | 
					
						
							| 
									
										
											  
											
												Merged revisions 70912,70944,70968,71033,71041,71208,71263,71286,71395-71396,71405-71406,71485,71492,71494 via svnmerge from
svn+ssh://pythondev@svn.python.org/python/trunk
........
  r70912 | georg.brandl | 2009-03-31 17:35:46 -0500 (Tue, 31 Mar 2009) | 1 line
  #5617: add a handy function to print a unicode string to gdbinit.
........
  r70944 | georg.brandl | 2009-03-31 23:32:39 -0500 (Tue, 31 Mar 2009) | 1 line
  #5631: add upload to list of possible commands, which is presented in --help-commands.
........
  r70968 | michael.foord | 2009-04-01 13:25:38 -0500 (Wed, 01 Apr 2009) | 1 line
  Adding Wing project file
........
  r71033 | brett.cannon | 2009-04-01 22:34:53 -0500 (Wed, 01 Apr 2009) | 3 lines
  Fix two issues introduced by issue #71031 by changing the signature of
  PyImport_AppendInittab() to take a const char *.
........
  r71041 | jesse.noller | 2009-04-02 00:17:26 -0500 (Thu, 02 Apr 2009) | 1 line
  Add custom initializer argument to multiprocess.Manager*, courtesy of lekma
........
  r71208 | michael.foord | 2009-04-04 20:15:01 -0500 (Sat, 04 Apr 2009) | 4 lines
  Change the way unittest.TestSuite use their tests to always access them through iteration. Non behavior changing, this allows you to create custom subclasses that override __iter__.
  Issue #5693
........
  r71263 | michael.foord | 2009-04-05 14:19:28 -0500 (Sun, 05 Apr 2009) | 4 lines
  Adding assertIs and assertIsNot methods to unittest.TestCase
  Issue #2578
........
  r71286 | tarek.ziade | 2009-04-05 17:04:38 -0500 (Sun, 05 Apr 2009) | 1 line
  added a simplest test to distutils.spawn._nt_quote_args
........
  r71395 | benjamin.peterson | 2009-04-08 08:27:29 -0500 (Wed, 08 Apr 2009) | 1 line
  these must be installed to correctly run tests
........
  r71396 | benjamin.peterson | 2009-04-08 08:29:41 -0500 (Wed, 08 Apr 2009) | 1 line
  fix syntax
........
  r71405 | andrew.kuchling | 2009-04-09 06:22:47 -0500 (Thu, 09 Apr 2009) | 1 line
  Add items
........
  r71406 | andrew.kuchling | 2009-04-09 06:23:36 -0500 (Thu, 09 Apr 2009) | 1 line
  Typo fixes
........
  r71485 | andrew.kuchling | 2009-04-11 11:12:23 -0500 (Sat, 11 Apr 2009) | 1 line
  Add various items
........
  r71492 | georg.brandl | 2009-04-11 13:19:27 -0500 (Sat, 11 Apr 2009) | 1 line
  Take credit for a patch of mine.
........
  r71494 | benjamin.peterson | 2009-04-11 14:31:00 -0500 (Sat, 11 Apr 2009) | 1 line
  ignore py3_test_grammar when compiling the library
........
											
										 
											2009-04-11 20:45:40 +00:00
										 |  |  |             raise TypeError('initializer must be a callable') | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-01-27 03:36:01 +00:00
										 |  |  |         self._processes = processes | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         self._pool = [] | 
					
						
							| 
									
										
										
										
											2010-01-27 03:36:01 +00:00
										 |  |  |         self._repopulate_pool() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |         self._worker_handler = threading.Thread( | 
					
						
							|  |  |  |             target=Pool._handle_workers, | 
					
						
							|  |  |  |             args=(self, ) | 
					
						
							|  |  |  |             ) | 
					
						
							|  |  |  |         self._worker_handler.daemon = True | 
					
						
							|  |  |  |         self._worker_handler._state = RUN | 
					
						
							|  |  |  |         self._worker_handler.start() | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  |         self._task_handler = threading.Thread( | 
					
						
							|  |  |  |             target=Pool._handle_tasks, | 
					
						
							|  |  |  |             args=(self._taskqueue, self._quick_put, self._outqueue, self._pool) | 
					
						
							|  |  |  |             ) | 
					
						
							| 
									
										
										
										
											2008-08-18 18:40:08 +00:00
										 |  |  |         self._task_handler.daemon = True | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         self._task_handler._state = RUN | 
					
						
							|  |  |  |         self._task_handler.start() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |         self._result_handler = threading.Thread( | 
					
						
							|  |  |  |             target=Pool._handle_results, | 
					
						
							|  |  |  |             args=(self._outqueue, self._quick_get, self._cache) | 
					
						
							|  |  |  |             ) | 
					
						
							| 
									
										
										
										
											2008-08-18 18:40:08 +00:00
										 |  |  |         self._result_handler.daemon = True | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         self._result_handler._state = RUN | 
					
						
							|  |  |  |         self._result_handler.start() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |         self._terminate = Finalize( | 
					
						
							|  |  |  |             self, self._terminate_pool, | 
					
						
							|  |  |  |             args=(self._taskqueue, self._inqueue, self._outqueue, self._pool, | 
					
						
							| 
									
										
										
										
											2010-01-27 03:36:01 +00:00
										 |  |  |                   self._worker_handler, self._task_handler, | 
					
						
							|  |  |  |                   self._result_handler, self._cache), | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |             exitpriority=15 | 
					
						
							|  |  |  |             ) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-01-27 03:36:01 +00:00
										 |  |  |     def _join_exited_workers(self): | 
					
						
							|  |  |  |         """Cleanup after any worker processes which have exited due to reaching
 | 
					
						
							|  |  |  |         their specified lifetime.  Returns True if any workers were cleaned up. | 
					
						
							|  |  |  |         """
 | 
					
						
							|  |  |  |         cleaned = False | 
					
						
							|  |  |  |         for i in reversed(range(len(self._pool))): | 
					
						
							|  |  |  |             worker = self._pool[i] | 
					
						
							|  |  |  |             if worker.exitcode is not None: | 
					
						
							|  |  |  |                 # worker exited | 
					
						
							|  |  |  |                 debug('cleaning up worker %d' % i) | 
					
						
							|  |  |  |                 worker.join() | 
					
						
							|  |  |  |                 cleaned = True | 
					
						
							|  |  |  |                 del self._pool[i] | 
					
						
							|  |  |  |         return cleaned | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     def _repopulate_pool(self): | 
					
						
							|  |  |  |         """Bring the number of pool processes up to the specified number,
 | 
					
						
							|  |  |  |         for use after reaping workers which have exited. | 
					
						
							|  |  |  |         """
 | 
					
						
							|  |  |  |         for i in range(self._processes - len(self._pool)): | 
					
						
							|  |  |  |             w = self.Process(target=worker, | 
					
						
							|  |  |  |                              args=(self._inqueue, self._outqueue, | 
					
						
							|  |  |  |                                    self._initializer, | 
					
						
							|  |  |  |                                    self._initargs, self._maxtasksperchild) | 
					
						
							|  |  |  |                             ) | 
					
						
							|  |  |  |             self._pool.append(w) | 
					
						
							|  |  |  |             w.name = w.name.replace('Process', 'PoolWorker') | 
					
						
							|  |  |  |             w.daemon = True | 
					
						
							|  |  |  |             w.start() | 
					
						
							|  |  |  |             debug('added worker') | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     def _maintain_pool(self): | 
					
						
							|  |  |  |         """Clean up any exited workers and start replacements for them.
 | 
					
						
							|  |  |  |         """
 | 
					
						
							|  |  |  |         if self._join_exited_workers(): | 
					
						
							|  |  |  |             self._repopulate_pool() | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |     def _setup_queues(self): | 
					
						
							|  |  |  |         from .queues import SimpleQueue | 
					
						
							|  |  |  |         self._inqueue = SimpleQueue() | 
					
						
							|  |  |  |         self._outqueue = SimpleQueue() | 
					
						
							|  |  |  |         self._quick_put = self._inqueue._writer.send | 
					
						
							|  |  |  |         self._quick_get = self._outqueue._reader.recv | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     def apply(self, func, args=(), kwds={}): | 
					
						
							|  |  |  |         '''
 | 
					
						
							| 
									
										
										
										
											2008-11-22 08:51:39 +00:00
										 |  |  |         Equivalent of `func(*args, **kwds)`. | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         '''
 | 
					
						
							|  |  |  |         assert self._state == RUN | 
					
						
							|  |  |  |         return self.apply_async(func, args, kwds).get() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     def map(self, func, iterable, chunksize=None): | 
					
						
							|  |  |  |         '''
 | 
					
						
							| 
									
										
										
										
											2008-11-22 08:51:39 +00:00
										 |  |  |         Apply `func` to each element in `iterable`, collecting the results | 
					
						
							|  |  |  |         in a list that is returned. | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         '''
 | 
					
						
							|  |  |  |         assert self._state == RUN | 
					
						
							| 
									
										
										
										
											2011-12-21 11:03:24 +01:00
										 |  |  |         return self._map_async(func, iterable, mapstar, chunksize).get() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     def starmap(self, func, iterable, chunksize=None): | 
					
						
							|  |  |  |         '''
 | 
					
						
							|  |  |  |         Like `map()` method but the elements of the `iterable` are expected to | 
					
						
							|  |  |  |         be iterables as well and will be unpacked as arguments. Hence | 
					
						
							|  |  |  |         `func` and (a, b) becomes func(a, b). | 
					
						
							|  |  |  |         '''
 | 
					
						
							|  |  |  |         assert self._state == RUN | 
					
						
							|  |  |  |         return self._map_async(func, iterable, starmapstar, chunksize).get() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     def starmap_async(self, func, iterable, chunksize=None, callback=None, | 
					
						
							|  |  |  |             error_callback=None): | 
					
						
							|  |  |  |         '''
 | 
					
						
							|  |  |  |         Asynchronous version of `starmap()` method. | 
					
						
							|  |  |  |         '''
 | 
					
						
							|  |  |  |         assert self._state == RUN | 
					
						
							|  |  |  |         return self._map_async(func, iterable, starmapstar, chunksize, | 
					
						
							|  |  |  |                                callback, error_callback) | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  |     def imap(self, func, iterable, chunksize=1): | 
					
						
							|  |  |  |         '''
 | 
					
						
							| 
									
										
										
										
											2008-11-22 08:51:39 +00:00
										 |  |  |         Equivalent of `map()` -- can be MUCH slower than `Pool.map()`. | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         '''
 | 
					
						
							|  |  |  |         assert self._state == RUN | 
					
						
							|  |  |  |         if chunksize == 1: | 
					
						
							|  |  |  |             result = IMapIterator(self._cache) | 
					
						
							|  |  |  |             self._taskqueue.put((((result._job, i, func, (x,), {}) | 
					
						
							|  |  |  |                          for i, x in enumerate(iterable)), result._set_length)) | 
					
						
							|  |  |  |             return result | 
					
						
							|  |  |  |         else: | 
					
						
							|  |  |  |             assert chunksize > 1 | 
					
						
							|  |  |  |             task_batches = Pool._get_tasks(func, iterable, chunksize) | 
					
						
							|  |  |  |             result = IMapIterator(self._cache) | 
					
						
							|  |  |  |             self._taskqueue.put((((result._job, i, mapstar, (x,), {}) | 
					
						
							|  |  |  |                      for i, x in enumerate(task_batches)), result._set_length)) | 
					
						
							|  |  |  |             return (item for chunk in result for item in chunk) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     def imap_unordered(self, func, iterable, chunksize=1): | 
					
						
							|  |  |  |         '''
 | 
					
						
							| 
									
										
										
										
											2008-11-22 08:51:39 +00:00
										 |  |  |         Like `imap()` method but ordering of results is arbitrary. | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         '''
 | 
					
						
							|  |  |  |         assert self._state == RUN | 
					
						
							|  |  |  |         if chunksize == 1: | 
					
						
							|  |  |  |             result = IMapUnorderedIterator(self._cache) | 
					
						
							|  |  |  |             self._taskqueue.put((((result._job, i, func, (x,), {}) | 
					
						
							|  |  |  |                          for i, x in enumerate(iterable)), result._set_length)) | 
					
						
							|  |  |  |             return result | 
					
						
							|  |  |  |         else: | 
					
						
							|  |  |  |             assert chunksize > 1 | 
					
						
							|  |  |  |             task_batches = Pool._get_tasks(func, iterable, chunksize) | 
					
						
							|  |  |  |             result = IMapUnorderedIterator(self._cache) | 
					
						
							|  |  |  |             self._taskqueue.put((((result._job, i, mapstar, (x,), {}) | 
					
						
							|  |  |  |                      for i, x in enumerate(task_batches)), result._set_length)) | 
					
						
							|  |  |  |             return (item for chunk in result for item in chunk) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-11-09 20:55:52 +00:00
										 |  |  |     def apply_async(self, func, args=(), kwds={}, callback=None, | 
					
						
							|  |  |  |             error_callback=None): | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         '''
 | 
					
						
							| 
									
										
										
										
											2008-11-22 08:51:39 +00:00
										 |  |  |         Asynchronous version of `apply()` method. | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         '''
 | 
					
						
							|  |  |  |         assert self._state == RUN | 
					
						
							| 
									
										
										
										
											2010-11-09 20:55:52 +00:00
										 |  |  |         result = ApplyResult(self._cache, callback, error_callback) | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         self._taskqueue.put(([(result._job, None, func, args, kwds)], None)) | 
					
						
							|  |  |  |         return result | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-11-09 20:55:52 +00:00
										 |  |  |     def map_async(self, func, iterable, chunksize=None, callback=None, | 
					
						
							|  |  |  |             error_callback=None): | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         '''
 | 
					
						
							| 
									
										
										
										
											2008-11-22 08:51:39 +00:00
										 |  |  |         Asynchronous version of `map()` method. | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         '''
 | 
					
						
							|  |  |  |         assert self._state == RUN | 
					
						
							| 
									
										
										
										
											2011-12-21 11:03:24 +01:00
										 |  |  |         return self._map_async(func, iterable, mapstar, chunksize) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     def _map_async(self, func, iterable, mapper, chunksize=None, callback=None, | 
					
						
							|  |  |  |             error_callback=None): | 
					
						
							|  |  |  |         '''
 | 
					
						
							|  |  |  |         Helper function to implement map, starmap and their async counterparts. | 
					
						
							|  |  |  |         '''
 | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         if not hasattr(iterable, '__len__'): | 
					
						
							|  |  |  |             iterable = list(iterable) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |         if chunksize is None: | 
					
						
							|  |  |  |             chunksize, extra = divmod(len(iterable), len(self._pool) * 4) | 
					
						
							|  |  |  |             if extra: | 
					
						
							|  |  |  |                 chunksize += 1 | 
					
						
							| 
									
										
											  
											
												Merged revisions 73995,74002,74005,74007-74008,74011,74019-74023 via svnmerge from
svn+ssh://pythondev@svn.python.org/python/trunk
........
  r73995 | vinay.sajip | 2009-07-13 07:21:05 -0400 (Mon, 13 Jul 2009) | 1 line
  Issue #6314: logging: Extra checks on the "level" argument in more places.
........
  r74002 | marc-andre.lemburg | 2009-07-13 16:23:49 -0400 (Mon, 13 Jul 2009) | 6 lines
  Use a new global DEV_NULL instead of hard-coding /dev/null into the system
  command helper functions.
  See #6479 for some motivation.
........
  r74005 | marc-andre.lemburg | 2009-07-13 17:28:33 -0400 (Mon, 13 Jul 2009) | 6 lines
  Use a different VER command output parser to address the localization
  issues mentioned in #3410.
  Prepare for Windows 7 (still commented out).
........
  r74007 | michael.foord | 2009-07-14 13:58:12 -0400 (Tue, 14 Jul 2009) | 1 line
  Move TestRunner initialisation into unittest.TestProgram.runTests. Fixes issue 6418.
........
  r74008 | benjamin.peterson | 2009-07-14 20:46:42 -0400 (Tue, 14 Jul 2009) | 1 line
  update year
........
  r74011 | ezio.melotti | 2009-07-15 13:07:04 -0400 (Wed, 15 Jul 2009) | 1 line
  methods' names pep8ification
........
  r74019 | amaury.forgeotdarc | 2009-07-15 17:29:27 -0400 (Wed, 15 Jul 2009) | 2 lines
  #6076 Add a title to the IDLE Preferences window.
........
  r74020 | georg.brandl | 2009-07-16 03:18:07 -0400 (Thu, 16 Jul 2009) | 1 line
  #5910: fix kqueue for calls with more than one event.
........
  r74021 | georg.brandl | 2009-07-16 03:33:04 -0400 (Thu, 16 Jul 2009) | 1 line
  #6486: start with built in functions rather than "built in objects".
........
  r74022 | georg.brandl | 2009-07-16 03:38:35 -0400 (Thu, 16 Jul 2009) | 1 line
  #6481: fix typo in os.system() replacement.
........
  r74023 | jesse.noller | 2009-07-16 10:23:04 -0400 (Thu, 16 Jul 2009) | 1 line
  Issue 6433: multiprocessing.pool.map hangs on empty list
........
											
										 
											2009-07-17 09:18:18 +00:00
										 |  |  |         if len(iterable) == 0: | 
					
						
							|  |  |  |             chunksize = 0 | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  |         task_batches = Pool._get_tasks(func, iterable, chunksize) | 
					
						
							| 
									
										
										
										
											2010-11-09 20:55:52 +00:00
										 |  |  |         result = MapResult(self._cache, chunksize, len(iterable), callback, | 
					
						
							|  |  |  |                            error_callback=error_callback) | 
					
						
							| 
									
										
										
										
											2011-12-21 11:03:24 +01:00
										 |  |  |         self._taskqueue.put((((result._job, i, mapper, (x,), {}) | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |                               for i, x in enumerate(task_batches)), None)) | 
					
						
							|  |  |  |         return result | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-01-27 03:36:01 +00:00
										 |  |  |     @staticmethod | 
					
						
							|  |  |  |     def _handle_workers(pool): | 
					
						
							| 
									
										
										
										
											2011-10-24 18:45:29 +02:00
										 |  |  |         thread = threading.current_thread() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |         # Keep maintaining workers until the cache gets drained, unless the pool | 
					
						
							|  |  |  |         # is terminated. | 
					
						
							|  |  |  |         while thread._state == RUN or (pool._cache and thread._state != TERMINATE): | 
					
						
							| 
									
										
										
										
											2010-01-27 03:36:01 +00:00
										 |  |  |             pool._maintain_pool() | 
					
						
							|  |  |  |             time.sleep(0.1) | 
					
						
							| 
									
										
										
										
											2011-04-11 00:18:59 +02:00
										 |  |  |         # send sentinel to stop workers | 
					
						
							|  |  |  |         pool._taskqueue.put(None) | 
					
						
							| 
									
										
										
										
											2010-01-27 03:36:01 +00:00
										 |  |  |         debug('worker handler exiting') | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |     @staticmethod | 
					
						
							|  |  |  |     def _handle_tasks(taskqueue, put, outqueue, pool): | 
					
						
							| 
									
										
										
										
											2008-06-11 19:14:14 +00:00
										 |  |  |         thread = threading.current_thread() | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  |         for taskseq, set_length in iter(taskqueue.get, None): | 
					
						
							|  |  |  |             i = -1 | 
					
						
							|  |  |  |             for i, task in enumerate(taskseq): | 
					
						
							|  |  |  |                 if thread._state: | 
					
						
							|  |  |  |                     debug('task handler found thread._state != RUN') | 
					
						
							|  |  |  |                     break | 
					
						
							|  |  |  |                 try: | 
					
						
							|  |  |  |                     put(task) | 
					
						
							|  |  |  |                 except IOError: | 
					
						
							|  |  |  |                     debug('could not put task on queue') | 
					
						
							|  |  |  |                     break | 
					
						
							|  |  |  |             else: | 
					
						
							|  |  |  |                 if set_length: | 
					
						
							|  |  |  |                     debug('doing set_length()') | 
					
						
							|  |  |  |                     set_length(i+1) | 
					
						
							|  |  |  |                 continue | 
					
						
							|  |  |  |             break | 
					
						
							|  |  |  |         else: | 
					
						
							|  |  |  |             debug('task handler got sentinel') | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |         try: | 
					
						
							|  |  |  |             # tell result handler to finish when cache is empty | 
					
						
							|  |  |  |             debug('task handler sending sentinel to result handler') | 
					
						
							|  |  |  |             outqueue.put(None) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |             # tell workers there is no more work | 
					
						
							|  |  |  |             debug('task handler sending sentinel to workers') | 
					
						
							|  |  |  |             for p in pool: | 
					
						
							|  |  |  |                 put(None) | 
					
						
							|  |  |  |         except IOError: | 
					
						
							|  |  |  |             debug('task handler got IOError when sending sentinels') | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |         debug('task handler exiting') | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     @staticmethod | 
					
						
							|  |  |  |     def _handle_results(outqueue, get, cache): | 
					
						
							| 
									
										
										
										
											2008-06-11 19:14:14 +00:00
										 |  |  |         thread = threading.current_thread() | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  |         while 1: | 
					
						
							|  |  |  |             try: | 
					
						
							|  |  |  |                 task = get() | 
					
						
							|  |  |  |             except (IOError, EOFError): | 
					
						
							|  |  |  |                 debug('result handler got EOFError/IOError -- exiting') | 
					
						
							|  |  |  |                 return | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |             if thread._state: | 
					
						
							|  |  |  |                 assert thread._state == TERMINATE | 
					
						
							|  |  |  |                 debug('result handler found thread._state=TERMINATE') | 
					
						
							|  |  |  |                 break | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |             if task is None: | 
					
						
							|  |  |  |                 debug('result handler got sentinel') | 
					
						
							|  |  |  |                 break | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |             job, i, obj = task | 
					
						
							|  |  |  |             try: | 
					
						
							|  |  |  |                 cache[job]._set(i, obj) | 
					
						
							|  |  |  |             except KeyError: | 
					
						
							|  |  |  |                 pass | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |         while cache and thread._state != TERMINATE: | 
					
						
							|  |  |  |             try: | 
					
						
							|  |  |  |                 task = get() | 
					
						
							|  |  |  |             except (IOError, EOFError): | 
					
						
							|  |  |  |                 debug('result handler got EOFError/IOError -- exiting') | 
					
						
							|  |  |  |                 return | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |             if task is None: | 
					
						
							|  |  |  |                 debug('result handler ignoring extra sentinel') | 
					
						
							|  |  |  |                 continue | 
					
						
							|  |  |  |             job, i, obj = task | 
					
						
							|  |  |  |             try: | 
					
						
							|  |  |  |                 cache[job]._set(i, obj) | 
					
						
							|  |  |  |             except KeyError: | 
					
						
							|  |  |  |                 pass | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |         if hasattr(outqueue, '_reader'): | 
					
						
							|  |  |  |             debug('ensuring that outqueue is not full') | 
					
						
							|  |  |  |             # If we don't make room available in outqueue then | 
					
						
							|  |  |  |             # attempts to add the sentinel (None) to outqueue may | 
					
						
							|  |  |  |             # block.  There is guaranteed to be no more than 2 sentinels. | 
					
						
							|  |  |  |             try: | 
					
						
							|  |  |  |                 for i in range(10): | 
					
						
							|  |  |  |                     if not outqueue._reader.poll(): | 
					
						
							|  |  |  |                         break | 
					
						
							|  |  |  |                     get() | 
					
						
							|  |  |  |             except (IOError, EOFError): | 
					
						
							|  |  |  |                 pass | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |         debug('result handler exiting: len(cache)=%s, thread._state=%s', | 
					
						
							|  |  |  |               len(cache), thread._state) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     @staticmethod | 
					
						
							|  |  |  |     def _get_tasks(func, it, size): | 
					
						
							|  |  |  |         it = iter(it) | 
					
						
							|  |  |  |         while 1: | 
					
						
							|  |  |  |             x = tuple(itertools.islice(it, size)) | 
					
						
							|  |  |  |             if not x: | 
					
						
							|  |  |  |                 return | 
					
						
							|  |  |  |             yield (func, x) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     def __reduce__(self): | 
					
						
							|  |  |  |         raise NotImplementedError( | 
					
						
							|  |  |  |               'pool objects cannot be passed between processes or pickled' | 
					
						
							|  |  |  |               ) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     def close(self): | 
					
						
							|  |  |  |         debug('closing pool') | 
					
						
							|  |  |  |         if self._state == RUN: | 
					
						
							|  |  |  |             self._state = CLOSE | 
					
						
							| 
									
										
										
										
											2010-01-27 03:36:01 +00:00
										 |  |  |             self._worker_handler._state = CLOSE | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  |     def terminate(self): | 
					
						
							|  |  |  |         debug('terminating pool') | 
					
						
							|  |  |  |         self._state = TERMINATE | 
					
						
							| 
									
										
										
										
											2010-01-27 03:36:01 +00:00
										 |  |  |         self._worker_handler._state = TERMINATE | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         self._terminate() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     def join(self): | 
					
						
							|  |  |  |         debug('joining pool') | 
					
						
							|  |  |  |         assert self._state in (CLOSE, TERMINATE) | 
					
						
							| 
									
										
										
										
											2010-01-27 03:36:01 +00:00
										 |  |  |         self._worker_handler.join() | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         self._task_handler.join() | 
					
						
							|  |  |  |         self._result_handler.join() | 
					
						
							|  |  |  |         for p in self._pool: | 
					
						
							|  |  |  |             p.join() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     @staticmethod | 
					
						
							|  |  |  |     def _help_stuff_finish(inqueue, task_handler, size): | 
					
						
							|  |  |  |         # task_handler may be blocked trying to put items on inqueue | 
					
						
							|  |  |  |         debug('removing tasks from inqueue until task handler finished') | 
					
						
							|  |  |  |         inqueue._rlock.acquire() | 
					
						
							| 
									
										
										
										
											2008-06-11 19:14:14 +00:00
										 |  |  |         while task_handler.is_alive() and inqueue._reader.poll(): | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |             inqueue._reader.recv() | 
					
						
							|  |  |  |             time.sleep(0) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     @classmethod | 
					
						
							|  |  |  |     def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool, | 
					
						
							| 
									
										
										
										
											2010-01-27 03:36:01 +00:00
										 |  |  |                         worker_handler, task_handler, result_handler, cache): | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         # this is guaranteed to only be called once | 
					
						
							|  |  |  |         debug('finalizing pool') | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-01-27 03:36:01 +00:00
										 |  |  |         worker_handler._state = TERMINATE | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         task_handler._state = TERMINATE | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |         debug('helping task handler/workers to finish') | 
					
						
							|  |  |  |         cls._help_stuff_finish(inqueue, task_handler, len(pool)) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-06-11 19:14:14 +00:00
										 |  |  |         assert result_handler.is_alive() or len(cache) == 0 | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  |         result_handler._state = TERMINATE | 
					
						
							|  |  |  |         outqueue.put(None)                  # sentinel | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-04-11 00:18:59 +02:00
										 |  |  |         # We must wait for the worker handler to exit before terminating | 
					
						
							|  |  |  |         # workers because we don't want workers to be restarted behind our back. | 
					
						
							|  |  |  |         debug('joining worker handler') | 
					
						
							|  |  |  |         worker_handler.join() | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-01-27 03:36:01 +00:00
										 |  |  |         # Terminate workers which haven't already finished. | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         if pool and hasattr(pool[0], 'terminate'): | 
					
						
							|  |  |  |             debug('terminating workers') | 
					
						
							|  |  |  |             for p in pool: | 
					
						
							| 
									
										
										
										
											2010-01-27 03:36:01 +00:00
										 |  |  |                 if p.exitcode is None: | 
					
						
							|  |  |  |                     p.terminate() | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  |         debug('joining task handler') | 
					
						
							| 
									
										
										
										
											2010-04-14 15:44:10 +00:00
										 |  |  |         task_handler.join() | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  |         debug('joining result handler') | 
					
						
							| 
									
										
										
										
											2011-04-11 00:20:23 +02:00
										 |  |  |         result_handler.join() | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  |         if pool and hasattr(pool[0], 'terminate'): | 
					
						
							|  |  |  |             debug('joining pool workers') | 
					
						
							|  |  |  |             for p in pool: | 
					
						
							| 
									
										
										
										
											2010-03-08 13:32:17 +00:00
										 |  |  |                 if p.is_alive(): | 
					
						
							| 
									
										
										
										
											2010-01-27 03:36:01 +00:00
										 |  |  |                     # worker has not yet exited | 
					
						
							| 
									
										
										
										
											2010-03-08 13:32:17 +00:00
										 |  |  |                     debug('cleaning up worker %d' % p.pid) | 
					
						
							|  |  |  |                     p.join() | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  | # | 
					
						
							|  |  |  | # Class whose instances are returned by `Pool.apply_async()` | 
					
						
							|  |  |  | # | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | class ApplyResult(object): | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-11-09 20:55:52 +00:00
										 |  |  |     def __init__(self, cache, callback, error_callback): | 
					
						
							| 
									
										
										
										
											2012-05-25 13:26:53 +01:00
										 |  |  |         self._event = threading.Event() | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         self._job = next(job_counter) | 
					
						
							|  |  |  |         self._cache = cache | 
					
						
							|  |  |  |         self._callback = callback | 
					
						
							| 
									
										
										
										
											2010-11-09 20:55:52 +00:00
										 |  |  |         self._error_callback = error_callback | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         cache[self._job] = self | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     def ready(self): | 
					
						
							| 
									
										
										
										
											2012-05-25 13:26:53 +01:00
										 |  |  |         return self._event.is_set() | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  |     def successful(self): | 
					
						
							| 
									
										
										
										
											2012-05-25 13:26:53 +01:00
										 |  |  |         assert self.ready() | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         return self._success | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     def wait(self, timeout=None): | 
					
						
							| 
									
										
										
										
											2012-05-25 13:26:53 +01:00
										 |  |  |         self._event.wait(timeout) | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  |     def get(self, timeout=None): | 
					
						
							|  |  |  |         self.wait(timeout) | 
					
						
							| 
									
										
										
										
											2012-05-25 13:26:53 +01:00
										 |  |  |         if not self.ready(): | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |             raise TimeoutError | 
					
						
							|  |  |  |         if self._success: | 
					
						
							|  |  |  |             return self._value | 
					
						
							|  |  |  |         else: | 
					
						
							|  |  |  |             raise self._value | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     def _set(self, i, obj): | 
					
						
							|  |  |  |         self._success, self._value = obj | 
					
						
							|  |  |  |         if self._callback and self._success: | 
					
						
							|  |  |  |             self._callback(self._value) | 
					
						
							| 
									
										
										
										
											2010-11-09 20:55:52 +00:00
										 |  |  |         if self._error_callback and not self._success: | 
					
						
							|  |  |  |             self._error_callback(self._value) | 
					
						
							| 
									
										
										
										
											2012-05-25 13:26:53 +01:00
										 |  |  |         self._event.set() | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         del self._cache[self._job] | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | # | 
					
						
							|  |  |  | # Class whose instances are returned by `Pool.map_async()` | 
					
						
							|  |  |  | # | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | class MapResult(ApplyResult): | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-11-09 20:55:52 +00:00
										 |  |  |     def __init__(self, cache, chunksize, length, callback, error_callback): | 
					
						
							|  |  |  |         ApplyResult.__init__(self, cache, callback, | 
					
						
							|  |  |  |                              error_callback=error_callback) | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         self._success = True | 
					
						
							|  |  |  |         self._value = [None] * length | 
					
						
							|  |  |  |         self._chunksize = chunksize | 
					
						
							|  |  |  |         if chunksize <= 0: | 
					
						
							|  |  |  |             self._number_left = 0 | 
					
						
							| 
									
										
										
										
											2012-05-25 13:26:53 +01:00
										 |  |  |             self._event.set() | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         else: | 
					
						
							|  |  |  |             self._number_left = length//chunksize + bool(length % chunksize) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     def _set(self, i, success_result): | 
					
						
							|  |  |  |         success, result = success_result | 
					
						
							|  |  |  |         if success: | 
					
						
							|  |  |  |             self._value[i*self._chunksize:(i+1)*self._chunksize] = result | 
					
						
							|  |  |  |             self._number_left -= 1 | 
					
						
							|  |  |  |             if self._number_left == 0: | 
					
						
							|  |  |  |                 if self._callback: | 
					
						
							|  |  |  |                     self._callback(self._value) | 
					
						
							|  |  |  |                 del self._cache[self._job] | 
					
						
							| 
									
										
										
										
											2012-05-25 13:26:53 +01:00
										 |  |  |                 self._event.set() | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         else: | 
					
						
							|  |  |  |             self._success = False | 
					
						
							|  |  |  |             self._value = result | 
					
						
							| 
									
										
										
										
											2010-11-09 20:55:52 +00:00
										 |  |  |             if self._error_callback: | 
					
						
							|  |  |  |                 self._error_callback(self._value) | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |             del self._cache[self._job] | 
					
						
							| 
									
										
										
										
											2012-05-25 13:26:53 +01:00
										 |  |  |             self._event.set() | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  | # | 
					
						
							|  |  |  | # Class whose instances are returned by `Pool.imap()` | 
					
						
							|  |  |  | # | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | class IMapIterator(object): | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     def __init__(self, cache): | 
					
						
							|  |  |  |         self._cond = threading.Condition(threading.Lock()) | 
					
						
							|  |  |  |         self._job = next(job_counter) | 
					
						
							|  |  |  |         self._cache = cache | 
					
						
							|  |  |  |         self._items = collections.deque() | 
					
						
							|  |  |  |         self._index = 0 | 
					
						
							|  |  |  |         self._length = None | 
					
						
							|  |  |  |         self._unsorted = {} | 
					
						
							|  |  |  |         cache[self._job] = self | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     def __iter__(self): | 
					
						
							|  |  |  |         return self | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     def next(self, timeout=None): | 
					
						
							|  |  |  |         self._cond.acquire() | 
					
						
							|  |  |  |         try: | 
					
						
							|  |  |  |             try: | 
					
						
							|  |  |  |                 item = self._items.popleft() | 
					
						
							|  |  |  |             except IndexError: | 
					
						
							|  |  |  |                 if self._index == self._length: | 
					
						
							|  |  |  |                     raise StopIteration | 
					
						
							|  |  |  |                 self._cond.wait(timeout) | 
					
						
							|  |  |  |                 try: | 
					
						
							|  |  |  |                     item = self._items.popleft() | 
					
						
							|  |  |  |                 except IndexError: | 
					
						
							|  |  |  |                     if self._index == self._length: | 
					
						
							|  |  |  |                         raise StopIteration | 
					
						
							|  |  |  |                     raise TimeoutError | 
					
						
							|  |  |  |         finally: | 
					
						
							|  |  |  |             self._cond.release() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |         success, value = item | 
					
						
							|  |  |  |         if success: | 
					
						
							|  |  |  |             return value | 
					
						
							|  |  |  |         raise value | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     __next__ = next                    # XXX | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     def _set(self, i, obj): | 
					
						
							|  |  |  |         self._cond.acquire() | 
					
						
							|  |  |  |         try: | 
					
						
							|  |  |  |             if self._index == i: | 
					
						
							|  |  |  |                 self._items.append(obj) | 
					
						
							|  |  |  |                 self._index += 1 | 
					
						
							|  |  |  |                 while self._index in self._unsorted: | 
					
						
							|  |  |  |                     obj = self._unsorted.pop(self._index) | 
					
						
							|  |  |  |                     self._items.append(obj) | 
					
						
							|  |  |  |                     self._index += 1 | 
					
						
							|  |  |  |                 self._cond.notify() | 
					
						
							|  |  |  |             else: | 
					
						
							|  |  |  |                 self._unsorted[i] = obj | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |             if self._index == self._length: | 
					
						
							|  |  |  |                 del self._cache[self._job] | 
					
						
							|  |  |  |         finally: | 
					
						
							|  |  |  |             self._cond.release() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     def _set_length(self, length): | 
					
						
							|  |  |  |         self._cond.acquire() | 
					
						
							|  |  |  |         try: | 
					
						
							|  |  |  |             self._length = length | 
					
						
							|  |  |  |             if self._index == self._length: | 
					
						
							|  |  |  |                 self._cond.notify() | 
					
						
							|  |  |  |                 del self._cache[self._job] | 
					
						
							|  |  |  |         finally: | 
					
						
							|  |  |  |             self._cond.release() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | # | 
					
						
							|  |  |  | # Class whose instances are returned by `Pool.imap_unordered()` | 
					
						
							|  |  |  | # | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | class IMapUnorderedIterator(IMapIterator): | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     def _set(self, i, obj): | 
					
						
							|  |  |  |         self._cond.acquire() | 
					
						
							|  |  |  |         try: | 
					
						
							|  |  |  |             self._items.append(obj) | 
					
						
							|  |  |  |             self._index += 1 | 
					
						
							|  |  |  |             self._cond.notify() | 
					
						
							|  |  |  |             if self._index == self._length: | 
					
						
							|  |  |  |                 del self._cache[self._job] | 
					
						
							|  |  |  |         finally: | 
					
						
							|  |  |  |             self._cond.release() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | # | 
					
						
							|  |  |  | # | 
					
						
							|  |  |  | # | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | class ThreadPool(Pool): | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     from .dummy import Process | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     def __init__(self, processes=None, initializer=None, initargs=()): | 
					
						
							|  |  |  |         Pool.__init__(self, processes, initializer, initargs) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     def _setup_queues(self): | 
					
						
							|  |  |  |         self._inqueue = queue.Queue() | 
					
						
							|  |  |  |         self._outqueue = queue.Queue() | 
					
						
							|  |  |  |         self._quick_put = self._inqueue.put | 
					
						
							|  |  |  |         self._quick_get = self._outqueue.get | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     @staticmethod | 
					
						
							|  |  |  |     def _help_stuff_finish(inqueue, task_handler, size): | 
					
						
							|  |  |  |         # put sentinels at head of inqueue to make workers finish | 
					
						
							|  |  |  |         inqueue.not_empty.acquire() | 
					
						
							|  |  |  |         try: | 
					
						
							|  |  |  |             inqueue.queue.clear() | 
					
						
							|  |  |  |             inqueue.queue.extend([None] * size) | 
					
						
							| 
									
										
										
										
											2008-06-11 19:14:14 +00:00
										 |  |  |             inqueue.not_empty.notify_all() | 
					
						
							| 
									
										
										
										
											2008-06-11 16:44:04 +00:00
										 |  |  |         finally: | 
					
						
							|  |  |  |             inqueue.not_empty.release() |