import logging import logging.handlers import multiprocessing import multiprocessing.util L = logging.getLogger(__name__) _globalQueue = None _globalListener = None def basicsetup(): global _globalQueue global _globalListener cf = logging.Formatter("[{levelname}] {created:.7f} {name} ({process}~{processName}): {message}", style="{") handler = logging.StreamHandler() handler.setLevel(logging.DEBUG) handler.setFormatter(cf) # Subprocesses should use the queue to send log messages back to a thread in the main process _globalQueue = multiprocessing.Queue() _globalListener = logging.handlers.QueueListener(_globalQueue, handler, respect_handler_level=True) _globalListener.start() # Configure logging for main thread process_setup(get_queue()) def get_queue(): return _globalQueue def process_setup(queue): handler = logging.handlers.QueueHandler(queue) logger = logging.getLogger() logger.setLevel(logging.DEBUG) logger.addHandler(handler) def do_work(i): # Do something that involves logging # If nothing is logged, it works fine L.info("Hello {} from MP".format(i)) if __name__ == "__main__": # Also fails with other startup methods, but this is what I'm using in the actual application multiprocessing.set_start_method("spawn") # Optional, but more debugging info multiprocessing.util.log_to_stderr() # Configure logging basicsetup() # Set up multiprocessing pool, initialising logging in each subprocess with multiprocessing.Pool(initializer=process_setup, initargs=(get_queue(),)) as pl: # 100 seems to work fine, 500 fails most of the time. # If you're having trouble reproducing the error, try bumping this number up to 1000 pl.map(do_work, range(10000)) if _globalListener is not None: # Stop the listener and join the thread it runs on. # If we don't do this, we may lose log messages when we exit. _globalListener.stop()