diff -r 1f73355afebb Doc/howto/logging-cookbook.rst --- a/Doc/howto/logging-cookbook.rst Tue Aug 23 09:01:43 2016 +0000 +++ b/Doc/howto/logging-cookbook.rst Wed Aug 24 01:01:41 2016 +0900 @@ -720,6 +720,88 @@ .. currentmodule:: logging.handlers +The simplest way to log to the same file in multiprocessing is to use the +logging module as if your code was not using multiprocessing at all:: + + # You'll need these imports in your own code + import logging + import multiprocessing + import sys + + # Next two import lines for this demo only + from random import choice, random + import time + + # The size of the rotated files is made small so you can see the results easily. + def log_configurer(): + + # First set the root logger to pass on all inputs to its handlers. + # Inputs equally serious to or more serious than the Level will be + # passed on. + root = logging.getLogger() + root.setLevel(logging.DEBUG) + + # create a new handler to send only debug records to the mptest.log files + h = logging.FileHandler('mptest.log', 'w') + # add formatting to the logfile + f = logging.Formatter('%(asctime)s %(processName)-10s %(name)s %(levelname)-8s %(message)s') + h.setFormatter(f) + # filter out non-DEBUG logs + debug_only = logging.Filter() + debug_only.filter = lambda record: record.levelno == logging.DEBUG + h.addFilter(debug_only) + # add the new handler to the main logger, root + root.addHandler(h) + + # send only INFO or higher level records to the console + stdout_handler = logging.StreamHandler(sys.stdout) + stdout_handler.setLevel(logging.INFO) + root.addHandler(stdout_handler) + + # Arrays used for random selections in this demo + + LOGGERS = ['a.b.c', 'd.e.f'] + + MESSAGES = [ + 'Random message #1', + 'Random message #2', + 'Random message #3', + ] + + # This is the worker process top-level loop, which just logs ten events with + # random intervening delays before terminating. + # The INFO logging messages are just so you know it's doing something! + def worker_process(): + + name = multiprocessing.current_process().name + logging.log(logging.INFO, 'Worker started: %s' % name) + + for i in range(10): + time.sleep(random()) + logger = choice(LOGGERS) + child_logger = logging.getLogger(logger) + message = choice(MESSAGES) + child_logger.log(logging.DEBUG, message) + + logging.log(logging.INFO, 'Worker finished: %s' % name) + + # Here's where the demo gets orchestrated. Create the queue, create ten + # workers and start them, wait for them to finish. + def main(): + + log_configurer() + + workers = [] + for i in range(10): + worker = multiprocessing.Process(target=worker_process) + workers.append(worker) + worker.start() + for w in workers: + w.join() + + if __name__ == '__main__': + main() + Alternatively, you can use a ``Queue`` and a :class:`QueueHandler` to send all logging events to one of the processes in your multi-process application. The following example script demonstrates how you can do this; in the example