1# Copyright 2001-2016 by Vinay Sajip. All Rights Reserved.
2#
3# Permission to use, copy, modify, and distribute this software and its
4# documentation for any purpose and without fee is hereby granted,
5# provided that the above copyright notice appear in all copies and that
6# both that copyright notice and this permission notice appear in
7# supporting documentation, and that the name of Vinay Sajip
8# not be used in advertising or publicity pertaining to distribution
9# of the software without specific, written prior permission.
10# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
11# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
12# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
13# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
14# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
15# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16
17"""
18Additional handlers for the logging package for Python. The core package is
19based on PEP 282 and comments thereto in comp.lang.python.
20
21Copyright (C) 2001-2016 Vinay Sajip. All Rights Reserved.
22
23To use, simply 'import logging.handlers' and log away!
24"""
25
26import io, logging, socket, os, pickle, struct, time, re
27from stat import ST_DEV, ST_INO, ST_MTIME
28import queue
29import threading
30import copy
31
32#
33# Some constants...
34#
35
36DEFAULT_TCP_LOGGING_PORT    = 9020
37DEFAULT_UDP_LOGGING_PORT    = 9021
38DEFAULT_HTTP_LOGGING_PORT   = 9022
39DEFAULT_SOAP_LOGGING_PORT   = 9023
40SYSLOG_UDP_PORT             = 514
41SYSLOG_TCP_PORT             = 514
42
43_MIDNIGHT = 24 * 60 * 60  # number of seconds in a day
44
45class BaseRotatingHandler(logging.FileHandler):
46    """
47    Base class for handlers that rotate log files at a certain point.
48    Not meant to be instantiated directly.  Instead, use RotatingFileHandler
49    or TimedRotatingFileHandler.
50    """
51    namer = None
52    rotator = None
53
54    def __init__(self, filename, mode, encoding=None, delay=False, errors=None):
55        """
56        Use the specified filename for streamed logging
57        """
58        logging.FileHandler.__init__(self, filename, mode=mode,
59                                     encoding=encoding, delay=delay,
60                                     errors=errors)
61        self.mode = mode
62        self.encoding = encoding
63        self.errors = errors
64
65    def emit(self, record):
66        """
67        Emit a record.
68
69        Output the record to the file, catering for rollover as described
70        in doRollover().
71        """
72        try:
73            if self.shouldRollover(record):
74                self.doRollover()
75            logging.FileHandler.emit(self, record)
76        except Exception:
77            self.handleError(record)
78
79    def rotation_filename(self, default_name):
80        """
81        Modify the filename of a log file when rotating.
82
83        This is provided so that a custom filename can be provided.
84
85        The default implementation calls the 'namer' attribute of the
86        handler, if it's callable, passing the default name to
87        it. If the attribute isn't callable (the default is None), the name
88        is returned unchanged.
89
90        :param default_name: The default name for the log file.
91        """
92        if not callable(self.namer):
93            result = default_name
94        else:
95            result = self.namer(default_name)
96        return result
97
98    def rotate(self, source, dest):
99        """
100        When rotating, rotate the current log.
101
102        The default implementation calls the 'rotator' attribute of the
103        handler, if it's callable, passing the source and dest arguments to
104        it. If the attribute isn't callable (the default is None), the source
105        is simply renamed to the destination.
106
107        :param source: The source filename. This is normally the base
108                       filename, e.g. 'test.log'
109        :param dest:   The destination filename. This is normally
110                       what the source is rotated to, e.g. 'test.log.1'.
111        """
112        if not callable(self.rotator):
113            # Issue 18940: A file may not have been created if delay is True.
114            if os.path.exists(source):
115                os.rename(source, dest)
116        else:
117            self.rotator(source, dest)
118
119class RotatingFileHandler(BaseRotatingHandler):
120    """
121    Handler for logging to a set of files, which switches from one file
122    to the next when the current file reaches a certain size.
123    """
124    def __init__(self, filename, mode='a', maxBytes=0, backupCount=0,
125                 encoding=None, delay=False, errors=None):
126        """
127        Open the specified file and use it as the stream for logging.
128
129        By default, the file grows indefinitely. You can specify particular
130        values of maxBytes and backupCount to allow the file to rollover at
131        a predetermined size.
132
133        Rollover occurs whenever the current log file is nearly maxBytes in
134        length. If backupCount is >= 1, the system will successively create
135        new files with the same pathname as the base file, but with extensions
136        ".1", ".2" etc. appended to it. For example, with a backupCount of 5
137        and a base file name of "app.log", you would get "app.log",
138        "app.log.1", "app.log.2", ... through to "app.log.5". The file being
139        written to is always "app.log" - when it gets filled up, it is closed
140        and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
141        exist, then they are renamed to "app.log.2", "app.log.3" etc.
142        respectively.
143
144        If maxBytes is zero, rollover never occurs.
145        """
146        # If rotation/rollover is wanted, it doesn't make sense to use another
147        # mode. If for example 'w' were specified, then if there were multiple
148        # runs of the calling application, the logs from previous runs would be
149        # lost if the 'w' is respected, because the log file would be truncated
150        # on each run.
151        if maxBytes > 0:
152            mode = 'a'
153        if "b" not in mode:
154            encoding = io.text_encoding(encoding)
155        BaseRotatingHandler.__init__(self, filename, mode, encoding=encoding,
156                                     delay=delay, errors=errors)
157        self.maxBytes = maxBytes
158        self.backupCount = backupCount
159
160    def doRollover(self):
161        """
162        Do a rollover, as described in __init__().
163        """
164        if self.stream:
165            self.stream.close()
166            self.stream = None
167        if self.backupCount > 0:
168            for i in range(self.backupCount - 1, 0, -1):
169                sfn = self.rotation_filename("%s.%d" % (self.baseFilename, i))
170                dfn = self.rotation_filename("%s.%d" % (self.baseFilename,
171                                                        i + 1))
172                if os.path.exists(sfn):
173                    if os.path.exists(dfn):
174                        os.remove(dfn)
175                    os.rename(sfn, dfn)
176            dfn = self.rotation_filename(self.baseFilename + ".1")
177            if os.path.exists(dfn):
178                os.remove(dfn)
179            self.rotate(self.baseFilename, dfn)
180        if not self.delay:
181            self.stream = self._open()
182
183    def shouldRollover(self, record):
184        """
185        Determine if rollover should occur.
186
187        Basically, see if the supplied record would cause the file to exceed
188        the size limit we have.
189        """
190        # See bpo-45401: Never rollover anything other than regular files
191        if os.path.exists(self.baseFilename) and not os.path.isfile(self.baseFilename):
192            return False
193        if self.stream is None:                 # delay was set...
194            self.stream = self._open()
195        if self.maxBytes > 0:                   # are we rolling over?
196            msg = "%s\n" % self.format(record)
197            self.stream.seek(0, 2)  #due to non-posix-compliant Windows feature
198            if self.stream.tell() + len(msg) >= self.maxBytes:
199                return True
200        return False
201
202class TimedRotatingFileHandler(BaseRotatingHandler):
203    """
204    Handler for logging to a file, rotating the log file at certain timed
205    intervals.
206
207    If backupCount is > 0, when rollover is done, no more than backupCount
208    files are kept - the oldest ones are deleted.
209    """
210    def __init__(self, filename, when='h', interval=1, backupCount=0,
211                 encoding=None, delay=False, utc=False, atTime=None,
212                 errors=None):
213        encoding = io.text_encoding(encoding)
214        BaseRotatingHandler.__init__(self, filename, 'a', encoding=encoding,
215                                     delay=delay, errors=errors)
216        self.when = when.upper()
217        self.backupCount = backupCount
218        self.utc = utc
219        self.atTime = atTime
220        # Calculate the real rollover interval, which is just the number of
221        # seconds between rollovers.  Also set the filename suffix used when
222        # a rollover occurs.  Current 'when' events supported:
223        # S - Seconds
224        # M - Minutes
225        # H - Hours
226        # D - Days
227        # midnight - roll over at midnight
228        # W{0-6} - roll over on a certain day; 0 - Monday
229        #
230        # Case of the 'when' specifier is not important; lower or upper case
231        # will work.
232        if self.when == 'S':
233            self.interval = 1 # one second
234            self.suffix = "%Y-%m-%d_%H-%M-%S"
235            self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}(\.\w+)?$"
236        elif self.when == 'M':
237            self.interval = 60 # one minute
238            self.suffix = "%Y-%m-%d_%H-%M"
239            self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}(\.\w+)?$"
240        elif self.when == 'H':
241            self.interval = 60 * 60 # one hour
242            self.suffix = "%Y-%m-%d_%H"
243            self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}(\.\w+)?$"
244        elif self.when == 'D' or self.when == 'MIDNIGHT':
245            self.interval = 60 * 60 * 24 # one day
246            self.suffix = "%Y-%m-%d"
247            self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
248        elif self.when.startswith('W'):
249            self.interval = 60 * 60 * 24 * 7 # one week
250            if len(self.when) != 2:
251                raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when)
252            if self.when[1] < '0' or self.when[1] > '6':
253                raise ValueError("Invalid day specified for weekly rollover: %s" % self.when)
254            self.dayOfWeek = int(self.when[1])
255            self.suffix = "%Y-%m-%d"
256            self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
257        else:
258            raise ValueError("Invalid rollover interval specified: %s" % self.when)
259
260        self.extMatch = re.compile(self.extMatch, re.ASCII)
261        self.interval = self.interval * interval # multiply by units requested
262        # The following line added because the filename passed in could be a
263        # path object (see Issue #27493), but self.baseFilename will be a string
264        filename = self.baseFilename
265        if os.path.exists(filename):
266            t = os.stat(filename)[ST_MTIME]
267        else:
268            t = int(time.time())
269        self.rolloverAt = self.computeRollover(t)
270
271    def computeRollover(self, currentTime):
272        """
273        Work out the rollover time based on the specified time.
274        """
275        result = currentTime + self.interval
276        # If we are rolling over at midnight or weekly, then the interval is already known.
277        # What we need to figure out is WHEN the next interval is.  In other words,
278        # if you are rolling over at midnight, then your base interval is 1 day,
279        # but you want to start that one day clock at midnight, not now.  So, we
280        # have to fudge the rolloverAt value in order to trigger the first rollover
281        # at the right time.  After that, the regular interval will take care of
282        # the rest.  Note that this code doesn't care about leap seconds. :)
283        if self.when == 'MIDNIGHT' or self.when.startswith('W'):
284            # This could be done with less code, but I wanted it to be clear
285            if self.utc:
286                t = time.gmtime(currentTime)
287            else:
288                t = time.localtime(currentTime)
289            currentHour = t[3]
290            currentMinute = t[4]
291            currentSecond = t[5]
292            currentDay = t[6]
293            # r is the number of seconds left between now and the next rotation
294            if self.atTime is None:
295                rotate_ts = _MIDNIGHT
296            else:
297                rotate_ts = ((self.atTime.hour * 60 + self.atTime.minute)*60 +
298                    self.atTime.second)
299
300            r = rotate_ts - ((currentHour * 60 + currentMinute) * 60 +
301                currentSecond)
302            if r < 0:
303                # Rotate time is before the current time (for example when
304                # self.rotateAt is 13:45 and it now 14:15), rotation is
305                # tomorrow.
306                r += _MIDNIGHT
307                currentDay = (currentDay + 1) % 7
308            result = currentTime + r
309            # If we are rolling over on a certain day, add in the number of days until
310            # the next rollover, but offset by 1 since we just calculated the time
311            # until the next day starts.  There are three cases:
312            # Case 1) The day to rollover is today; in this case, do nothing
313            # Case 2) The day to rollover is further in the interval (i.e., today is
314            #         day 2 (Wednesday) and rollover is on day 6 (Sunday).  Days to
315            #         next rollover is simply 6 - 2 - 1, or 3.
316            # Case 3) The day to rollover is behind us in the interval (i.e., today
317            #         is day 5 (Saturday) and rollover is on day 3 (Thursday).
318            #         Days to rollover is 6 - 5 + 3, or 4.  In this case, it's the
319            #         number of days left in the current week (1) plus the number
320            #         of days in the next week until the rollover day (3).
321            # The calculations described in 2) and 3) above need to have a day added.
322            # This is because the above time calculation takes us to midnight on this
323            # day, i.e. the start of the next day.
324            if self.when.startswith('W'):
325                day = currentDay # 0 is Monday
326                if day != self.dayOfWeek:
327                    if day < self.dayOfWeek:
328                        daysToWait = self.dayOfWeek - day
329                    else:
330                        daysToWait = 6 - day + self.dayOfWeek + 1
331                    newRolloverAt = result + (daysToWait * (60 * 60 * 24))
332                    if not self.utc:
333                        dstNow = t[-1]
334                        dstAtRollover = time.localtime(newRolloverAt)[-1]
335                        if dstNow != dstAtRollover:
336                            if not dstNow:  # DST kicks in before next rollover, so we need to deduct an hour
337                                addend = -3600
338                            else:           # DST bows out before next rollover, so we need to add an hour
339                                addend = 3600
340                            newRolloverAt += addend
341                    result = newRolloverAt
342        return result
343
344    def shouldRollover(self, record):
345        """
346        Determine if rollover should occur.
347
348        record is not used, as we are just comparing times, but it is needed so
349        the method signatures are the same
350        """
351        # See bpo-45401: Never rollover anything other than regular files
352        if os.path.exists(self.baseFilename) and not os.path.isfile(self.baseFilename):
353            return False
354        t = int(time.time())
355        if t >= self.rolloverAt:
356            return True
357        return False
358
359    def getFilesToDelete(self):
360        """
361        Determine the files to delete when rolling over.
362
363        More specific than the earlier method, which just used glob.glob().
364        """
365        dirName, baseName = os.path.split(self.baseFilename)
366        fileNames = os.listdir(dirName)
367        result = []
368        # See bpo-44753: Don't use the extension when computing the prefix.
369        prefix = os.path.splitext(baseName)[0] + "."
370        plen = len(prefix)
371        for fileName in fileNames:
372            if fileName[:plen] == prefix:
373                suffix = fileName[plen:]
374                # See bpo-45628: The date/time suffix could be anywhere in the
375                # filename
376                parts = suffix.split('.')
377                for part in parts:
378                    if self.extMatch.match(part):
379                        result.append(os.path.join(dirName, fileName))
380                        break
381        if len(result) < self.backupCount:
382            result = []
383        else:
384            result.sort()
385            result = result[:len(result) - self.backupCount]
386        return result
387
388    def doRollover(self):
389        """
390        do a rollover; in this case, a date/time stamp is appended to the filename
391        when the rollover happens.  However, you want the file to be named for the
392        start of the interval, not the current time.  If there is a backup count,
393        then we have to get a list of matching filenames, sort them and remove
394        the one with the oldest suffix.
395        """
396        if self.stream:
397            self.stream.close()
398            self.stream = None
399        # get the time that this sequence started at and make it a TimeTuple
400        currentTime = int(time.time())
401        dstNow = time.localtime(currentTime)[-1]
402        t = self.rolloverAt - self.interval
403        if self.utc:
404            timeTuple = time.gmtime(t)
405        else:
406            timeTuple = time.localtime(t)
407            dstThen = timeTuple[-1]
408            if dstNow != dstThen:
409                if dstNow:
410                    addend = 3600
411                else:
412                    addend = -3600
413                timeTuple = time.localtime(t + addend)
414        dfn = self.rotation_filename(self.baseFilename + "." +
415                                     time.strftime(self.suffix, timeTuple))
416        if os.path.exists(dfn):
417            os.remove(dfn)
418        self.rotate(self.baseFilename, dfn)
419        if self.backupCount > 0:
420            for s in self.getFilesToDelete():
421                os.remove(s)
422        if not self.delay:
423            self.stream = self._open()
424        newRolloverAt = self.computeRollover(currentTime)
425        while newRolloverAt <= currentTime:
426            newRolloverAt = newRolloverAt + self.interval
427        #If DST changes and midnight or weekly rollover, adjust for this.
428        if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
429            dstAtRollover = time.localtime(newRolloverAt)[-1]
430            if dstNow != dstAtRollover:
431                if not dstNow:  # DST kicks in before next rollover, so we need to deduct an hour
432                    addend = -3600
433                else:           # DST bows out before next rollover, so we need to add an hour
434                    addend = 3600
435                newRolloverAt += addend
436        self.rolloverAt = newRolloverAt
437
438class WatchedFileHandler(logging.FileHandler):
439    """
440    A handler for logging to a file, which watches the file
441    to see if it has changed while in use. This can happen because of
442    usage of programs such as newsyslog and logrotate which perform
443    log file rotation. This handler, intended for use under Unix,
444    watches the file to see if it has changed since the last emit.
445    (A file has changed if its device or inode have changed.)
446    If it has changed, the old file stream is closed, and the file
447    opened to get a new stream.
448
449    This handler is not appropriate for use under Windows, because
450    under Windows open files cannot be moved or renamed - logging
451    opens the files with exclusive locks - and so there is no need
452    for such a handler. Furthermore, ST_INO is not supported under
453    Windows; stat always returns zero for this value.
454
455    This handler is based on a suggestion and patch by Chad J.
456    Schroeder.
457    """
458    def __init__(self, filename, mode='a', encoding=None, delay=False,
459                 errors=None):
460        if "b" not in mode:
461            encoding = io.text_encoding(encoding)
462        logging.FileHandler.__init__(self, filename, mode=mode,
463                                     encoding=encoding, delay=delay,
464                                     errors=errors)
465        self.dev, self.ino = -1, -1
466        self._statstream()
467
468    def _statstream(self):
469        if self.stream:
470            sres = os.fstat(self.stream.fileno())
471            self.dev, self.ino = sres[ST_DEV], sres[ST_INO]
472
473    def reopenIfNeeded(self):
474        """
475        Reopen log file if needed.
476
477        Checks if the underlying file has changed, and if it
478        has, close the old stream and reopen the file to get the
479        current stream.
480        """
481        # Reduce the chance of race conditions by stat'ing by path only
482        # once and then fstat'ing our new fd if we opened a new log stream.
483        # See issue #14632: Thanks to John Mulligan for the problem report
484        # and patch.
485        try:
486            # stat the file by path, checking for existence
487            sres = os.stat(self.baseFilename)
488        except FileNotFoundError:
489            sres = None
490        # compare file system stat with that of our stream file handle
491        if not sres or sres[ST_DEV] != self.dev or sres[ST_INO] != self.ino:
492            if self.stream is not None:
493                # we have an open file handle, clean it up
494                self.stream.flush()
495                self.stream.close()
496                self.stream = None  # See Issue #21742: _open () might fail.
497                # open a new file handle and get new stat info from that fd
498                self.stream = self._open()
499                self._statstream()
500
501    def emit(self, record):
502        """
503        Emit a record.
504
505        If underlying file has changed, reopen the file before emitting the
506        record to it.
507        """
508        self.reopenIfNeeded()
509        logging.FileHandler.emit(self, record)
510
511
512class SocketHandler(logging.Handler):
513    """
514    A handler class which writes logging records, in pickle format, to
515    a streaming socket. The socket is kept open across logging calls.
516    If the peer resets it, an attempt is made to reconnect on the next call.
517    The pickle which is sent is that of the LogRecord's attribute dictionary
518    (__dict__), so that the receiver does not need to have the logging module
519    installed in order to process the logging event.
520
521    To unpickle the record at the receiving end into a LogRecord, use the
522    makeLogRecord function.
523    """
524
525    def __init__(self, host, port):
526        """
527        Initializes the handler with a specific host address and port.
528
529        When the attribute *closeOnError* is set to True - if a socket error
530        occurs, the socket is silently closed and then reopened on the next
531        logging call.
532        """
533        logging.Handler.__init__(self)
534        self.host = host
535        self.port = port
536        if port is None:
537            self.address = host
538        else:
539            self.address = (host, port)
540        self.sock = None
541        self.closeOnError = False
542        self.retryTime = None
543        #
544        # Exponential backoff parameters.
545        #
546        self.retryStart = 1.0
547        self.retryMax = 30.0
548        self.retryFactor = 2.0
549
550    def makeSocket(self, timeout=1):
551        """
552        A factory method which allows subclasses to define the precise
553        type of socket they want.
554        """
555        if self.port is not None:
556            result = socket.create_connection(self.address, timeout=timeout)
557        else:
558            result = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
559            result.settimeout(timeout)
560            try:
561                result.connect(self.address)
562            except OSError:
563                result.close()  # Issue 19182
564                raise
565        return result
566
567    def createSocket(self):
568        """
569        Try to create a socket, using an exponential backoff with
570        a max retry time. Thanks to Robert Olson for the original patch
571        (SF #815911) which has been slightly refactored.
572        """
573        now = time.time()
574        # Either retryTime is None, in which case this
575        # is the first time back after a disconnect, or
576        # we've waited long enough.
577        if self.retryTime is None:
578            attempt = True
579        else:
580            attempt = (now >= self.retryTime)
581        if attempt:
582            try:
583                self.sock = self.makeSocket()
584                self.retryTime = None # next time, no delay before trying
585            except OSError:
586                #Creation failed, so set the retry time and return.
587                if self.retryTime is None:
588                    self.retryPeriod = self.retryStart
589                else:
590                    self.retryPeriod = self.retryPeriod * self.retryFactor
591                    if self.retryPeriod > self.retryMax:
592                        self.retryPeriod = self.retryMax
593                self.retryTime = now + self.retryPeriod
594
595    def send(self, s):
596        """
597        Send a pickled string to the socket.
598
599        This function allows for partial sends which can happen when the
600        network is busy.
601        """
602        if self.sock is None:
603            self.createSocket()
604        #self.sock can be None either because we haven't reached the retry
605        #time yet, or because we have reached the retry time and retried,
606        #but are still unable to connect.
607        if self.sock:
608            try:
609                self.sock.sendall(s)
610            except OSError: #pragma: no cover
611                self.sock.close()
612                self.sock = None  # so we can call createSocket next time
613
614    def makePickle(self, record):
615        """
616        Pickles the record in binary format with a length prefix, and
617        returns it ready for transmission across the socket.
618        """
619        ei = record.exc_info
620        if ei:
621            # just to get traceback text into record.exc_text ...
622            dummy = self.format(record)
623        # See issue #14436: If msg or args are objects, they may not be
624        # available on the receiving end. So we convert the msg % args
625        # to a string, save it as msg and zap the args.
626        d = dict(record.__dict__)
627        d['msg'] = record.getMessage()
628        d['args'] = None
629        d['exc_info'] = None
630        # Issue #25685: delete 'message' if present: redundant with 'msg'
631        d.pop('message', None)
632        s = pickle.dumps(d, 1)
633        slen = struct.pack(">L", len(s))
634        return slen + s
635
636    def handleError(self, record):
637        """
638        Handle an error during logging.
639
640        An error has occurred during logging. Most likely cause -
641        connection lost. Close the socket so that we can retry on the
642        next event.
643        """
644        if self.closeOnError and self.sock:
645            self.sock.close()
646            self.sock = None        #try to reconnect next time
647        else:
648            logging.Handler.handleError(self, record)
649
650    def emit(self, record):
651        """
652        Emit a record.
653
654        Pickles the record and writes it to the socket in binary format.
655        If there is an error with the socket, silently drop the packet.
656        If there was a problem with the socket, re-establishes the
657        socket.
658        """
659        try:
660            s = self.makePickle(record)
661            self.send(s)
662        except Exception:
663            self.handleError(record)
664
665    def close(self):
666        """
667        Closes the socket.
668        """
669        self.acquire()
670        try:
671            sock = self.sock
672            if sock:
673                self.sock = None
674                sock.close()
675            logging.Handler.close(self)
676        finally:
677            self.release()
678
679class DatagramHandler(SocketHandler):
680    """
681    A handler class which writes logging records, in pickle format, to
682    a datagram socket.  The pickle which is sent is that of the LogRecord's
683    attribute dictionary (__dict__), so that the receiver does not need to
684    have the logging module installed in order to process the logging event.
685
686    To unpickle the record at the receiving end into a LogRecord, use the
687    makeLogRecord function.
688
689    """
690    def __init__(self, host, port):
691        """
692        Initializes the handler with a specific host address and port.
693        """
694        SocketHandler.__init__(self, host, port)
695        self.closeOnError = False
696
697    def makeSocket(self):
698        """
699        The factory method of SocketHandler is here overridden to create
700        a UDP socket (SOCK_DGRAM).
701        """
702        if self.port is None:
703            family = socket.AF_UNIX
704        else:
705            family = socket.AF_INET
706        s = socket.socket(family, socket.SOCK_DGRAM)
707        return s
708
709    def send(self, s):
710        """
711        Send a pickled string to a socket.
712
713        This function no longer allows for partial sends which can happen
714        when the network is busy - UDP does not guarantee delivery and
715        can deliver packets out of sequence.
716        """
717        if self.sock is None:
718            self.createSocket()
719        self.sock.sendto(s, self.address)
720
721class SysLogHandler(logging.Handler):
722    """
723    A handler class which sends formatted logging records to a syslog
724    server. Based on Sam Rushing's syslog module:
725    http://www.nightmare.com/squirl/python-ext/misc/syslog.py
726    Contributed by Nicolas Untz (after which minor refactoring changes
727    have been made).
728    """
729
730    # from <linux/sys/syslog.h>:
731    # ======================================================================
732    # priorities/facilities are encoded into a single 32-bit quantity, where
733    # the bottom 3 bits are the priority (0-7) and the top 28 bits are the
734    # facility (0-big number). Both the priorities and the facilities map
735    # roughly one-to-one to strings in the syslogd(8) source code.  This
736    # mapping is included in this file.
737    #
738    # priorities (these are ordered)
739
740    LOG_EMERG     = 0       #  system is unusable
741    LOG_ALERT     = 1       #  action must be taken immediately
742    LOG_CRIT      = 2       #  critical conditions
743    LOG_ERR       = 3       #  error conditions
744    LOG_WARNING   = 4       #  warning conditions
745    LOG_NOTICE    = 5       #  normal but significant condition
746    LOG_INFO      = 6       #  informational
747    LOG_DEBUG     = 7       #  debug-level messages
748
749    #  facility codes
750    LOG_KERN      = 0       #  kernel messages
751    LOG_USER      = 1       #  random user-level messages
752    LOG_MAIL      = 2       #  mail system
753    LOG_DAEMON    = 3       #  system daemons
754    LOG_AUTH      = 4       #  security/authorization messages
755    LOG_SYSLOG    = 5       #  messages generated internally by syslogd
756    LOG_LPR       = 6       #  line printer subsystem
757    LOG_NEWS      = 7       #  network news subsystem
758    LOG_UUCP      = 8       #  UUCP subsystem
759    LOG_CRON      = 9       #  clock daemon
760    LOG_AUTHPRIV  = 10      #  security/authorization messages (private)
761    LOG_FTP       = 11      #  FTP daemon
762    LOG_NTP       = 12      #  NTP subsystem
763    LOG_SECURITY  = 13      #  Log audit
764    LOG_CONSOLE   = 14      #  Log alert
765    LOG_SOLCRON   = 15      #  Scheduling daemon (Solaris)
766
767    #  other codes through 15 reserved for system use
768    LOG_LOCAL0    = 16      #  reserved for local use
769    LOG_LOCAL1    = 17      #  reserved for local use
770    LOG_LOCAL2    = 18      #  reserved for local use
771    LOG_LOCAL3    = 19      #  reserved for local use
772    LOG_LOCAL4    = 20      #  reserved for local use
773    LOG_LOCAL5    = 21      #  reserved for local use
774    LOG_LOCAL6    = 22      #  reserved for local use
775    LOG_LOCAL7    = 23      #  reserved for local use
776
777    priority_names = {
778        "alert":    LOG_ALERT,
779        "crit":     LOG_CRIT,
780        "critical": LOG_CRIT,
781        "debug":    LOG_DEBUG,
782        "emerg":    LOG_EMERG,
783        "err":      LOG_ERR,
784        "error":    LOG_ERR,        #  DEPRECATED
785        "info":     LOG_INFO,
786        "notice":   LOG_NOTICE,
787        "panic":    LOG_EMERG,      #  DEPRECATED
788        "warn":     LOG_WARNING,    #  DEPRECATED
789        "warning":  LOG_WARNING,
790        }
791
792    facility_names = {
793        "auth":         LOG_AUTH,
794        "authpriv":     LOG_AUTHPRIV,
795        "console":      LOG_CONSOLE,
796        "cron":         LOG_CRON,
797        "daemon":       LOG_DAEMON,
798        "ftp":          LOG_FTP,
799        "kern":         LOG_KERN,
800        "lpr":          LOG_LPR,
801        "mail":         LOG_MAIL,
802        "news":         LOG_NEWS,
803        "ntp":          LOG_NTP,
804        "security":     LOG_SECURITY,
805        "solaris-cron": LOG_SOLCRON,
806        "syslog":       LOG_SYSLOG,
807        "user":         LOG_USER,
808        "uucp":         LOG_UUCP,
809        "local0":       LOG_LOCAL0,
810        "local1":       LOG_LOCAL1,
811        "local2":       LOG_LOCAL2,
812        "local3":       LOG_LOCAL3,
813        "local4":       LOG_LOCAL4,
814        "local5":       LOG_LOCAL5,
815        "local6":       LOG_LOCAL6,
816        "local7":       LOG_LOCAL7,
817        }
818
819    #The map below appears to be trivially lowercasing the key. However,
820    #there's more to it than meets the eye - in some locales, lowercasing
821    #gives unexpected results. See SF #1524081: in the Turkish locale,
822    #"INFO".lower() != "info"
823    priority_map = {
824        "DEBUG" : "debug",
825        "INFO" : "info",
826        "WARNING" : "warning",
827        "ERROR" : "error",
828        "CRITICAL" : "critical"
829    }
830
831    def __init__(self, address=('localhost', SYSLOG_UDP_PORT),
832                 facility=LOG_USER, socktype=None):
833        """
834        Initialize a handler.
835
836        If address is specified as a string, a UNIX socket is used. To log to a
837        local syslogd, "SysLogHandler(address="/dev/log")" can be used.
838        If facility is not specified, LOG_USER is used. If socktype is
839        specified as socket.SOCK_DGRAM or socket.SOCK_STREAM, that specific
840        socket type will be used. For Unix sockets, you can also specify a
841        socktype of None, in which case socket.SOCK_DGRAM will be used, falling
842        back to socket.SOCK_STREAM.
843        """
844        logging.Handler.__init__(self)
845
846        self.address = address
847        self.facility = facility
848        self.socktype = socktype
849        self.socket = None
850        self.createSocket()
851
852    def _connect_unixsocket(self, address):
853        use_socktype = self.socktype
854        if use_socktype is None:
855            use_socktype = socket.SOCK_DGRAM
856        self.socket = socket.socket(socket.AF_UNIX, use_socktype)
857        try:
858            self.socket.connect(address)
859            # it worked, so set self.socktype to the used type
860            self.socktype = use_socktype
861        except OSError:
862            self.socket.close()
863            if self.socktype is not None:
864                # user didn't specify falling back, so fail
865                raise
866            use_socktype = socket.SOCK_STREAM
867            self.socket = socket.socket(socket.AF_UNIX, use_socktype)
868            try:
869                self.socket.connect(address)
870                # it worked, so set self.socktype to the used type
871                self.socktype = use_socktype
872            except OSError:
873                self.socket.close()
874                raise
875
876    def createSocket(self):
877        address = self.address
878        socktype = self.socktype
879
880        if isinstance(address, str):
881            self.unixsocket = True
882            # Syslog server may be unavailable during handler initialisation.
883            # C's openlog() function also ignores connection errors.
884            # Moreover, we ignore these errors while logging, so it not worse
885            # to ignore it also here.
886            try:
887                self._connect_unixsocket(address)
888            except OSError:
889                pass
890        else:
891            self.unixsocket = False
892            if socktype is None:
893                socktype = socket.SOCK_DGRAM
894            host, port = address
895            ress = socket.getaddrinfo(host, port, 0, socktype)
896            if not ress:
897                raise OSError("getaddrinfo returns an empty list")
898            for res in ress:
899                af, socktype, proto, _, sa = res
900                err = sock = None
901                try:
902                    sock = socket.socket(af, socktype, proto)
903                    if socktype == socket.SOCK_STREAM:
904                        sock.connect(sa)
905                    break
906                except OSError as exc:
907                    err = exc
908                    if sock is not None:
909                        sock.close()
910            if err is not None:
911                raise err
912            self.socket = sock
913            self.socktype = socktype
914
915    def encodePriority(self, facility, priority):
916        """
917        Encode the facility and priority. You can pass in strings or
918        integers - if strings are passed, the facility_names and
919        priority_names mapping dictionaries are used to convert them to
920        integers.
921        """
922        if isinstance(facility, str):
923            facility = self.facility_names[facility]
924        if isinstance(priority, str):
925            priority = self.priority_names[priority]
926        return (facility << 3) | priority
927
928    def close(self):
929        """
930        Closes the socket.
931        """
932        self.acquire()
933        try:
934            sock = self.socket
935            if sock:
936                self.socket = None
937                sock.close()
938            logging.Handler.close(self)
939        finally:
940            self.release()
941
942    def mapPriority(self, levelName):
943        """
944        Map a logging level name to a key in the priority_names map.
945        This is useful in two scenarios: when custom levels are being
946        used, and in the case where you can't do a straightforward
947        mapping by lowercasing the logging level name because of locale-
948        specific issues (see SF #1524081).
949        """
950        return self.priority_map.get(levelName, "warning")
951
952    ident = ''          # prepended to all messages
953    append_nul = True   # some old syslog daemons expect a NUL terminator
954
955    def emit(self, record):
956        """
957        Emit a record.
958
959        The record is formatted, and then sent to the syslog server. If
960        exception information is present, it is NOT sent to the server.
961        """
962        try:
963            msg = self.format(record)
964            if self.ident:
965                msg = self.ident + msg
966            if self.append_nul:
967                msg += '\000'
968
969            # We need to convert record level to lowercase, maybe this will
970            # change in the future.
971            prio = '<%d>' % self.encodePriority(self.facility,
972                                                self.mapPriority(record.levelname))
973            prio = prio.encode('utf-8')
974            # Message is a string. Convert to bytes as required by RFC 5424
975            msg = msg.encode('utf-8')
976            msg = prio + msg
977
978            if not self.socket:
979                self.createSocket()
980
981            if self.unixsocket:
982                try:
983                    self.socket.send(msg)
984                except OSError:
985                    self.socket.close()
986                    self._connect_unixsocket(self.address)
987                    self.socket.send(msg)
988            elif self.socktype == socket.SOCK_DGRAM:
989                self.socket.sendto(msg, self.address)
990            else:
991                self.socket.sendall(msg)
992        except Exception:
993            self.handleError(record)
994
995class SMTPHandler(logging.Handler):
996    """
997    A handler class which sends an SMTP email for each logging event.
998    """
999    def __init__(self, mailhost, fromaddr, toaddrs, subject,
1000                 credentials=None, secure=None, timeout=5.0):
1001        """
1002        Initialize the handler.
1003
1004        Initialize the instance with the from and to addresses and subject
1005        line of the email. To specify a non-standard SMTP port, use the
1006        (host, port) tuple format for the mailhost argument. To specify
1007        authentication credentials, supply a (username, password) tuple
1008        for the credentials argument. To specify the use of a secure
1009        protocol (TLS), pass in a tuple for the secure argument. This will
1010        only be used when authentication credentials are supplied. The tuple
1011        will be either an empty tuple, or a single-value tuple with the name
1012        of a keyfile, or a 2-value tuple with the names of the keyfile and
1013        certificate file. (This tuple is passed to the `starttls` method).
1014        A timeout in seconds can be specified for the SMTP connection (the
1015        default is one second).
1016        """
1017        logging.Handler.__init__(self)
1018        if isinstance(mailhost, (list, tuple)):
1019            self.mailhost, self.mailport = mailhost
1020        else:
1021            self.mailhost, self.mailport = mailhost, None
1022        if isinstance(credentials, (list, tuple)):
1023            self.username, self.password = credentials
1024        else:
1025            self.username = None
1026        self.fromaddr = fromaddr
1027        if isinstance(toaddrs, str):
1028            toaddrs = [toaddrs]
1029        self.toaddrs = toaddrs
1030        self.subject = subject
1031        self.secure = secure
1032        self.timeout = timeout
1033
1034    def getSubject(self, record):
1035        """
1036        Determine the subject for the email.
1037
1038        If you want to specify a subject line which is record-dependent,
1039        override this method.
1040        """
1041        return self.subject
1042
1043    def emit(self, record):
1044        """
1045        Emit a record.
1046
1047        Format the record and send it to the specified addressees.
1048        """
1049        try:
1050            import smtplib
1051            from email.message import EmailMessage
1052            import email.utils
1053
1054            port = self.mailport
1055            if not port:
1056                port = smtplib.SMTP_PORT
1057            smtp = smtplib.SMTP(self.mailhost, port, timeout=self.timeout)
1058            msg = EmailMessage()
1059            msg['From'] = self.fromaddr
1060            msg['To'] = ','.join(self.toaddrs)
1061            msg['Subject'] = self.getSubject(record)
1062            msg['Date'] = email.utils.localtime()
1063            msg.set_content(self.format(record))
1064            if self.username:
1065                if self.secure is not None:
1066                    smtp.ehlo()
1067                    smtp.starttls(*self.secure)
1068                    smtp.ehlo()
1069                smtp.login(self.username, self.password)
1070            smtp.send_message(msg)
1071            smtp.quit()
1072        except Exception:
1073            self.handleError(record)
1074
1075class NTEventLogHandler(logging.Handler):
1076    """
1077    A handler class which sends events to the NT Event Log. Adds a
1078    registry entry for the specified application name. If no dllname is
1079    provided, win32service.pyd (which contains some basic message
1080    placeholders) is used. Note that use of these placeholders will make
1081    your event logs big, as the entire message source is held in the log.
1082    If you want slimmer logs, you have to pass in the name of your own DLL
1083    which contains the message definitions you want to use in the event log.
1084    """
1085    def __init__(self, appname, dllname=None, logtype="Application"):
1086        logging.Handler.__init__(self)
1087        try:
1088            import win32evtlogutil, win32evtlog
1089            self.appname = appname
1090            self._welu = win32evtlogutil
1091            if not dllname:
1092                dllname = os.path.split(self._welu.__file__)
1093                dllname = os.path.split(dllname[0])
1094                dllname = os.path.join(dllname[0], r'win32service.pyd')
1095            self.dllname = dllname
1096            self.logtype = logtype
1097            self._welu.AddSourceToRegistry(appname, dllname, logtype)
1098            self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE
1099            self.typemap = {
1100                logging.DEBUG   : win32evtlog.EVENTLOG_INFORMATION_TYPE,
1101                logging.INFO    : win32evtlog.EVENTLOG_INFORMATION_TYPE,
1102                logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE,
1103                logging.ERROR   : win32evtlog.EVENTLOG_ERROR_TYPE,
1104                logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE,
1105         }
1106        except ImportError:
1107            print("The Python Win32 extensions for NT (service, event "\
1108                        "logging) appear not to be available.")
1109            self._welu = None
1110
1111    def getMessageID(self, record):
1112        """
1113        Return the message ID for the event record. If you are using your
1114        own messages, you could do this by having the msg passed to the
1115        logger being an ID rather than a formatting string. Then, in here,
1116        you could use a dictionary lookup to get the message ID. This
1117        version returns 1, which is the base message ID in win32service.pyd.
1118        """
1119        return 1
1120
1121    def getEventCategory(self, record):
1122        """
1123        Return the event category for the record.
1124
1125        Override this if you want to specify your own categories. This version
1126        returns 0.
1127        """
1128        return 0
1129
1130    def getEventType(self, record):
1131        """
1132        Return the event type for the record.
1133
1134        Override this if you want to specify your own types. This version does
1135        a mapping using the handler's typemap attribute, which is set up in
1136        __init__() to a dictionary which contains mappings for DEBUG, INFO,
1137        WARNING, ERROR and CRITICAL. If you are using your own levels you will
1138        either need to override this method or place a suitable dictionary in
1139        the handler's typemap attribute.
1140        """
1141        return self.typemap.get(record.levelno, self.deftype)
1142
1143    def emit(self, record):
1144        """
1145        Emit a record.
1146
1147        Determine the message ID, event category and event type. Then
1148        log the message in the NT event log.
1149        """
1150        if self._welu:
1151            try:
1152                id = self.getMessageID(record)
1153                cat = self.getEventCategory(record)
1154                type = self.getEventType(record)
1155                msg = self.format(record)
1156                self._welu.ReportEvent(self.appname, id, cat, type, [msg])
1157            except Exception:
1158                self.handleError(record)
1159
1160    def close(self):
1161        """
1162        Clean up this handler.
1163
1164        You can remove the application name from the registry as a
1165        source of event log entries. However, if you do this, you will
1166        not be able to see the events as you intended in the Event Log
1167        Viewer - it needs to be able to access the registry to get the
1168        DLL name.
1169        """
1170        #self._welu.RemoveSourceFromRegistry(self.appname, self.logtype)
1171        logging.Handler.close(self)
1172
1173class HTTPHandler(logging.Handler):
1174    """
1175    A class which sends records to a web server, using either GET or
1176    POST semantics.
1177    """
1178    def __init__(self, host, url, method="GET", secure=False, credentials=None,
1179                 context=None):
1180        """
1181        Initialize the instance with the host, the request URL, and the method
1182        ("GET" or "POST")
1183        """
1184        logging.Handler.__init__(self)
1185        method = method.upper()
1186        if method not in ["GET", "POST"]:
1187            raise ValueError("method must be GET or POST")
1188        if not secure and context is not None:
1189            raise ValueError("context parameter only makes sense "
1190                             "with secure=True")
1191        self.host = host
1192        self.url = url
1193        self.method = method
1194        self.secure = secure
1195        self.credentials = credentials
1196        self.context = context
1197
1198    def mapLogRecord(self, record):
1199        """
1200        Default implementation of mapping the log record into a dict
1201        that is sent as the CGI data. Overwrite in your class.
1202        Contributed by Franz Glasner.
1203        """
1204        return record.__dict__
1205
1206    def getConnection(self, host, secure):
1207        """
1208        get a HTTP[S]Connection.
1209
1210        Override when a custom connection is required, for example if
1211        there is a proxy.
1212        """
1213        import http.client
1214        if secure:
1215            connection = http.client.HTTPSConnection(host, context=self.context)
1216        else:
1217            connection = http.client.HTTPConnection(host)
1218        return connection
1219
1220    def emit(self, record):
1221        """
1222        Emit a record.
1223
1224        Send the record to the web server as a percent-encoded dictionary
1225        """
1226        try:
1227            import urllib.parse
1228            host = self.host
1229            h = self.getConnection(host, self.secure)
1230            url = self.url
1231            data = urllib.parse.urlencode(self.mapLogRecord(record))
1232            if self.method == "GET":
1233                if (url.find('?') >= 0):
1234                    sep = '&'
1235                else:
1236                    sep = '?'
1237                url = url + "%c%s" % (sep, data)
1238            h.putrequest(self.method, url)
1239            # support multiple hosts on one IP address...
1240            # need to strip optional :port from host, if present
1241            i = host.find(":")
1242            if i >= 0:
1243                host = host[:i]
1244            # See issue #30904: putrequest call above already adds this header
1245            # on Python 3.x.
1246            # h.putheader("Host", host)
1247            if self.method == "POST":
1248                h.putheader("Content-type",
1249                            "application/x-www-form-urlencoded")
1250                h.putheader("Content-length", str(len(data)))
1251            if self.credentials:
1252                import base64
1253                s = ('%s:%s' % self.credentials).encode('utf-8')
1254                s = 'Basic ' + base64.b64encode(s).strip().decode('ascii')
1255                h.putheader('Authorization', s)
1256            h.endheaders()
1257            if self.method == "POST":
1258                h.send(data.encode('utf-8'))
1259            h.getresponse()    #can't do anything with the result
1260        except Exception:
1261            self.handleError(record)
1262
1263class BufferingHandler(logging.Handler):
1264    """
1265  A handler class which buffers logging records in memory. Whenever each
1266  record is added to the buffer, a check is made to see if the buffer should
1267  be flushed. If it should, then flush() is expected to do what's needed.
1268    """
1269    def __init__(self, capacity):
1270        """
1271        Initialize the handler with the buffer size.
1272        """
1273        logging.Handler.__init__(self)
1274        self.capacity = capacity
1275        self.buffer = []
1276
1277    def shouldFlush(self, record):
1278        """
1279        Should the handler flush its buffer?
1280
1281        Returns true if the buffer is up to capacity. This method can be
1282        overridden to implement custom flushing strategies.
1283        """
1284        return (len(self.buffer) >= self.capacity)
1285
1286    def emit(self, record):
1287        """
1288        Emit a record.
1289
1290        Append the record. If shouldFlush() tells us to, call flush() to process
1291        the buffer.
1292        """
1293        self.buffer.append(record)
1294        if self.shouldFlush(record):
1295            self.flush()
1296
1297    def flush(self):
1298        """
1299        Override to implement custom flushing behaviour.
1300
1301        This version just zaps the buffer to empty.
1302        """
1303        self.acquire()
1304        try:
1305            self.buffer.clear()
1306        finally:
1307            self.release()
1308
1309    def close(self):
1310        """
1311        Close the handler.
1312
1313        This version just flushes and chains to the parent class' close().
1314        """
1315        try:
1316            self.flush()
1317        finally:
1318            logging.Handler.close(self)
1319
1320class MemoryHandler(BufferingHandler):
1321    """
1322    A handler class which buffers logging records in memory, periodically
1323    flushing them to a target handler. Flushing occurs whenever the buffer
1324    is full, or when an event of a certain severity or greater is seen.
1325    """
1326    def __init__(self, capacity, flushLevel=logging.ERROR, target=None,
1327                 flushOnClose=True):
1328        """
1329        Initialize the handler with the buffer size, the level at which
1330        flushing should occur and an optional target.
1331
1332        Note that without a target being set either here or via setTarget(),
1333        a MemoryHandler is no use to anyone!
1334
1335        The ``flushOnClose`` argument is ``True`` for backward compatibility
1336        reasons - the old behaviour is that when the handler is closed, the
1337        buffer is flushed, even if the flush level hasn't been exceeded nor the
1338        capacity exceeded. To prevent this, set ``flushOnClose`` to ``False``.
1339        """
1340        BufferingHandler.__init__(self, capacity)
1341        self.flushLevel = flushLevel
1342        self.target = target
1343        # See Issue #26559 for why this has been added
1344        self.flushOnClose = flushOnClose
1345
1346    def shouldFlush(self, record):
1347        """
1348        Check for buffer full or a record at the flushLevel or higher.
1349        """
1350        return (len(self.buffer) >= self.capacity) or \
1351                (record.levelno >= self.flushLevel)
1352
1353    def setTarget(self, target):
1354        """
1355        Set the target handler for this handler.
1356        """
1357        self.acquire()
1358        try:
1359            self.target = target
1360        finally:
1361            self.release()
1362
1363    def flush(self):
1364        """
1365        For a MemoryHandler, flushing means just sending the buffered
1366        records to the target, if there is one. Override if you want
1367        different behaviour.
1368
1369        The record buffer is also cleared by this operation.
1370        """
1371        self.acquire()
1372        try:
1373            if self.target:
1374                for record in self.buffer:
1375                    self.target.handle(record)
1376                self.buffer.clear()
1377        finally:
1378            self.release()
1379
1380    def close(self):
1381        """
1382        Flush, if appropriately configured, set the target to None and lose the
1383        buffer.
1384        """
1385        try:
1386            if self.flushOnClose:
1387                self.flush()
1388        finally:
1389            self.acquire()
1390            try:
1391                self.target = None
1392                BufferingHandler.close(self)
1393            finally:
1394                self.release()
1395
1396
1397class QueueHandler(logging.Handler):
1398    """
1399    This handler sends events to a queue. Typically, it would be used together
1400    with a multiprocessing Queue to centralise logging to file in one process
1401    (in a multi-process application), so as to avoid file write contention
1402    between processes.
1403
1404    This code is new in Python 3.2, but this class can be copy pasted into
1405    user code for use with earlier Python versions.
1406    """
1407
1408    def __init__(self, queue):
1409        """
1410        Initialise an instance, using the passed queue.
1411        """
1412        logging.Handler.__init__(self)
1413        self.queue = queue
1414
1415    def enqueue(self, record):
1416        """
1417        Enqueue a record.
1418
1419        The base implementation uses put_nowait. You may want to override
1420        this method if you want to use blocking, timeouts or custom queue
1421        implementations.
1422        """
1423        self.queue.put_nowait(record)
1424
1425    def prepare(self, record):
1426        """
1427        Prepare a record for queuing. The object returned by this method is
1428        enqueued.
1429
1430        The base implementation formats the record to merge the message and
1431        arguments, and removes unpickleable items from the record in-place.
1432        Specifically, it overwrites the record's `msg` and
1433        `message` attributes with the merged message (obtained by
1434        calling the handler's `format` method), and sets the `args`,
1435        `exc_info` and `exc_text` attributes to None.
1436
1437        You might want to override this method if you want to convert
1438        the record to a dict or JSON string, or send a modified copy
1439        of the record while leaving the original intact.
1440        """
1441        # The format operation gets traceback text into record.exc_text
1442        # (if there's exception data), and also returns the formatted
1443        # message. We can then use this to replace the original
1444        # msg + args, as these might be unpickleable. We also zap the
1445        # exc_info and exc_text attributes, as they are no longer
1446        # needed and, if not None, will typically not be pickleable.
1447        msg = self.format(record)
1448        # bpo-35726: make copy of record to avoid affecting other handlers in the chain.
1449        record = copy.copy(record)
1450        record.message = msg
1451        record.msg = msg
1452        record.args = None
1453        record.exc_info = None
1454        record.exc_text = None
1455        return record
1456
1457    def emit(self, record):
1458        """
1459        Emit a record.
1460
1461        Writes the LogRecord to the queue, preparing it for pickling first.
1462        """
1463        try:
1464            self.enqueue(self.prepare(record))
1465        except Exception:
1466            self.handleError(record)
1467
1468
1469class QueueListener(object):
1470    """
1471    This class implements an internal threaded listener which watches for
1472    LogRecords being added to a queue, removes them and passes them to a
1473    list of handlers for processing.
1474    """
1475    _sentinel = None
1476
1477    def __init__(self, queue, *handlers, respect_handler_level=False):
1478        """
1479        Initialise an instance with the specified queue and
1480        handlers.
1481        """
1482        self.queue = queue
1483        self.handlers = handlers
1484        self._thread = None
1485        self.respect_handler_level = respect_handler_level
1486
1487    def dequeue(self, block):
1488        """
1489        Dequeue a record and return it, optionally blocking.
1490
1491        The base implementation uses get. You may want to override this method
1492        if you want to use timeouts or work with custom queue implementations.
1493        """
1494        return self.queue.get(block)
1495
1496    def start(self):
1497        """
1498        Start the listener.
1499
1500        This starts up a background thread to monitor the queue for
1501        LogRecords to process.
1502        """
1503        self._thread = t = threading.Thread(target=self._monitor)
1504        t.daemon = True
1505        t.start()
1506
1507    def prepare(self, record):
1508        """
1509        Prepare a record for handling.
1510
1511        This method just returns the passed-in record. You may want to
1512        override this method if you need to do any custom marshalling or
1513        manipulation of the record before passing it to the handlers.
1514        """
1515        return record
1516
1517    def handle(self, record):
1518        """
1519        Handle a record.
1520
1521        This just loops through the handlers offering them the record
1522        to handle.
1523        """
1524        record = self.prepare(record)
1525        for handler in self.handlers:
1526            if not self.respect_handler_level:
1527                process = True
1528            else:
1529                process = record.levelno >= handler.level
1530            if process:
1531                handler.handle(record)
1532
1533    def _monitor(self):
1534        """
1535        Monitor the queue for records, and ask the handler
1536        to deal with them.
1537
1538        This method runs on a separate, internal thread.
1539        The thread will terminate if it sees a sentinel object in the queue.
1540        """
1541        q = self.queue
1542        has_task_done = hasattr(q, 'task_done')
1543        while True:
1544            try:
1545                record = self.dequeue(True)
1546                if record is self._sentinel:
1547                    if has_task_done:
1548                        q.task_done()
1549                    break
1550                self.handle(record)
1551                if has_task_done:
1552                    q.task_done()
1553            except queue.Empty:
1554                break
1555
1556    def enqueue_sentinel(self):
1557        """
1558        This is used to enqueue the sentinel record.
1559
1560        The base implementation uses put_nowait. You may want to override this
1561        method if you want to use timeouts or work with custom queue
1562        implementations.
1563        """
1564        self.queue.put_nowait(self._sentinel)
1565
1566    def stop(self):
1567        """
1568        Stop the listener.
1569
1570        This asks the thread to terminate, and then waits for it to do so.
1571        Note that if you don't call this before your application exits, there
1572        may be some records still left on the queue, which won't be processed.
1573        """
1574        self.enqueue_sentinel()
1575        self._thread.join()
1576        self._thread = None
1577