1"""
2    :codeauthor: Pedro Algarvio (pedro@algarvio.me)
3
4
5    salt.log.setup
6    ~~~~~~~~~~~~~~
7
8    This is where Salt's logging gets set up.
9
10    This module should be imported as soon as possible, preferably the first
11    module salt or any salt depending library imports so any new logging
12    logger instance uses our ``salt.log.setup.SaltLoggingClass``.
13"""
14
15
16import logging
17import logging.handlers
18import multiprocessing
19import os
20import socket
21import sys
22import time
23import traceback
24import types
25import urllib.parse
26
27# pylint: disable=unused-import
28from salt._logging import (
29    LOG_COLORS,
30    LOG_LEVELS,
31    LOG_VALUES_TO_LEVELS,
32    SORTED_LEVEL_NAMES,
33)
34from salt._logging.handlers import (
35    FileHandler,
36    QueueHandler,
37    RotatingFileHandler,
38    StreamHandler,
39    SysLogHandler,
40    WatchedFileHandler,
41)
42from salt._logging.impl import (
43    LOGGING_NULL_HANDLER,
44    LOGGING_STORE_HANDLER,
45    LOGGING_TEMP_HANDLER,
46    SaltColorLogRecord,
47    SaltLogRecord,
48)
49from salt._logging.impl import set_log_record_factory as setLogRecordFactory
50
51# pylint: enable=unused-import
52
53__CONSOLE_CONFIGURED = False
54__LOGGING_CONSOLE_HANDLER = None
55__LOGFILE_CONFIGURED = False
56__LOGGING_LOGFILE_HANDLER = None
57__TEMP_LOGGING_CONFIGURED = False
58__EXTERNAL_LOGGERS_CONFIGURED = False
59__MP_LOGGING_LISTENER_CONFIGURED = False
60__MP_LOGGING_CONFIGURED = False
61__MP_LOGGING_QUEUE = None
62__MP_LOGGING_LEVEL = logging.GARBAGE
63__MP_LOGGING_QUEUE_PROCESS = None
64__MP_LOGGING_QUEUE_HANDLER = None
65__MP_IN_MAINPROCESS = multiprocessing.current_process().name == "MainProcess"
66__MP_MAINPROCESS_ID = None
67
68
69def is_console_configured():
70    return __CONSOLE_CONFIGURED
71
72
73def is_logfile_configured():
74    return __LOGFILE_CONFIGURED
75
76
77def is_logging_configured():
78    return __CONSOLE_CONFIGURED or __LOGFILE_CONFIGURED
79
80
81def is_temp_logging_configured():
82    return __TEMP_LOGGING_CONFIGURED
83
84
85def is_mp_logging_listener_configured():
86    return __MP_LOGGING_LISTENER_CONFIGURED
87
88
89def is_mp_logging_configured():
90    return __MP_LOGGING_LISTENER_CONFIGURED
91
92
93def is_extended_logging_configured():
94    return __EXTERNAL_LOGGERS_CONFIGURED
95
96
97class SaltLogQueueHandler(QueueHandler):
98    """
99    Subclassed just to differentiate when debugging
100    """
101
102
103def getLogger(name):  # pylint: disable=C0103
104    """
105    This function is just a helper, an alias to:
106        logging.getLogger(name)
107
108    Although you might find it useful, there's no reason why you should not be
109    using the aliased method.
110    """
111    return logging.getLogger(name)
112
113
114def setup_temp_logger(log_level="error"):
115    """
116    Setup the temporary console logger
117    """
118    if is_temp_logging_configured():
119        logging.getLogger(__name__).warning("Temporary logging is already configured")
120        return
121
122    if log_level is None:
123        log_level = "warning"
124
125    level = LOG_LEVELS.get(log_level.lower(), logging.ERROR)
126
127    handler = None
128    for handler in logging.root.handlers:
129        if handler in (LOGGING_NULL_HANDLER, LOGGING_STORE_HANDLER):
130            continue
131
132        if not hasattr(handler, "stream"):
133            # Not a stream handler, continue
134            continue
135
136        if handler.stream is sys.stderr:
137            # There's already a logging handler outputting to sys.stderr
138            break
139    else:
140        handler = LOGGING_TEMP_HANDLER
141    handler.setLevel(level)
142
143    # Set the default temporary console formatter config
144    formatter = logging.Formatter("[%(levelname)-8s] %(message)s", datefmt="%H:%M:%S")
145    handler.setFormatter(formatter)
146    logging.root.addHandler(handler)
147
148    # Sync the null logging handler messages with the temporary handler
149    if LOGGING_NULL_HANDLER is not None:
150        LOGGING_NULL_HANDLER.sync_with_handlers([handler])
151    else:
152        logging.getLogger(__name__).debug(
153            "LOGGING_NULL_HANDLER is already None, can't sync messages with it"
154        )
155
156    # Remove the temporary null logging handler
157    __remove_null_logging_handler()
158
159    global __TEMP_LOGGING_CONFIGURED
160    __TEMP_LOGGING_CONFIGURED = True
161
162
163def setup_console_logger(log_level="error", log_format=None, date_format=None):
164    """
165    Setup the console logger
166    """
167    if is_console_configured():
168        logging.getLogger(__name__).warning("Console logging already configured")
169        return
170
171    # Remove the temporary logging handler
172    __remove_temp_logging_handler()
173
174    if log_level is None:
175        log_level = "warning"
176
177    level = LOG_LEVELS.get(log_level.lower(), logging.ERROR)
178
179    setLogRecordFactory(SaltColorLogRecord)
180
181    handler = None
182    for handler in logging.root.handlers:
183        if handler is LOGGING_STORE_HANDLER:
184            continue
185
186        if not hasattr(handler, "stream"):
187            # Not a stream handler, continue
188            continue
189
190        if handler.stream is sys.stderr:
191            # There's already a logging handler outputting to sys.stderr
192            break
193    else:
194        handler = StreamHandler(sys.stderr)
195    handler.setLevel(level)
196
197    # Set the default console formatter config
198    if not log_format:
199        log_format = "[%(levelname)-8s] %(message)s"
200    if not date_format:
201        date_format = "%H:%M:%S"
202
203    formatter = logging.Formatter(log_format, datefmt=date_format)
204
205    handler.setFormatter(formatter)
206    logging.root.addHandler(handler)
207
208    global __CONSOLE_CONFIGURED
209    global __LOGGING_CONSOLE_HANDLER
210    __CONSOLE_CONFIGURED = True
211    __LOGGING_CONSOLE_HANDLER = handler
212
213
214def setup_logfile_logger(
215    log_path,
216    log_level="error",
217    log_format=None,
218    date_format=None,
219    max_bytes=0,
220    backup_count=0,
221):
222    """
223    Setup the logfile logger
224
225    Since version 0.10.6 we support logging to syslog, some examples:
226
227        tcp://localhost:514/LOG_USER
228        tcp://localhost/LOG_DAEMON
229        udp://localhost:5145/LOG_KERN
230        udp://localhost
231        file:///dev/log
232        file:///dev/log/LOG_SYSLOG
233        file:///dev/log/LOG_DAEMON
234
235    The above examples are self explanatory, but:
236        <file|udp|tcp>://<host|socketpath>:<port-if-required>/<log-facility>
237
238    If you're thinking on doing remote logging you might also be thinking that
239    you could point salt's logging to the remote syslog. **Please Don't!**
240    An issue has been reported when doing this over TCP when the logged lines
241    get concatenated. See #3061.
242
243    The preferred way to do remote logging is setup a local syslog, point
244    salt's logging to the local syslog(unix socket is much faster) and then
245    have the local syslog forward the log messages to the remote syslog.
246    """
247
248    if is_logfile_configured():
249        logging.getLogger(__name__).warning("Logfile logging already configured")
250        return
251
252    if log_path is None:
253        logging.getLogger(__name__).warning(
254            "log_path setting is set to `None`. Nothing else to do"
255        )
256        return
257
258    # Remove the temporary logging handler
259    __remove_temp_logging_handler()
260
261    if log_level is None:
262        log_level = "warning"
263
264    level = LOG_LEVELS.get(log_level.lower(), logging.ERROR)
265
266    parsed_log_path = urllib.parse.urlparse(log_path)
267
268    root_logger = logging.getLogger()
269
270    if parsed_log_path.scheme in ("tcp", "udp", "file"):
271        syslog_opts = {
272            "facility": SysLogHandler.LOG_USER,
273            "socktype": socket.SOCK_DGRAM,
274        }
275
276        if parsed_log_path.scheme == "file" and parsed_log_path.path:
277            facility_name = parsed_log_path.path.split(os.sep)[-1].upper()
278            if not facility_name.startswith("LOG_"):
279                # The user is not specifying a syslog facility
280                facility_name = "LOG_USER"  # Syslog default
281                syslog_opts["address"] = parsed_log_path.path
282            else:
283                # The user has set a syslog facility, let's update the path to
284                # the logging socket
285                syslog_opts["address"] = os.sep.join(
286                    parsed_log_path.path.split(os.sep)[:-1]
287                )
288        elif parsed_log_path.path:
289            # In case of udp or tcp with a facility specified
290            facility_name = parsed_log_path.path.lstrip(os.sep).upper()
291            if not facility_name.startswith("LOG_"):
292                # Logging facilities start with LOG_ if this is not the case
293                # fail right now!
294                raise RuntimeError(
295                    "The syslog facility '{}' is not known".format(facility_name)
296                )
297        else:
298            # This is the case of udp or tcp without a facility specified
299            facility_name = "LOG_USER"  # Syslog default
300
301        facility = getattr(SysLogHandler, facility_name, None)
302        if facility is None:
303            # This python syslog version does not know about the user provided
304            # facility name
305            raise RuntimeError(
306                "The syslog facility '{}' is not known".format(facility_name)
307            )
308        syslog_opts["facility"] = facility
309
310        if parsed_log_path.scheme == "tcp":
311            # tcp syslog support was only added on python versions >= 2.7
312            if sys.version_info < (2, 7):
313                raise RuntimeError(
314                    "Python versions lower than 2.7 do not support logging "
315                    "to syslog using tcp sockets"
316                )
317            syslog_opts["socktype"] = socket.SOCK_STREAM
318
319        if parsed_log_path.scheme in ("tcp", "udp"):
320            syslog_opts["address"] = (
321                parsed_log_path.hostname,
322                parsed_log_path.port or logging.handlers.SYSLOG_UDP_PORT,
323            )
324
325        if sys.version_info < (2, 7) or parsed_log_path.scheme == "file":
326            # There's not socktype support on python versions lower than 2.7
327            syslog_opts.pop("socktype", None)
328
329        try:
330            # Et voilá! Finally our syslog handler instance
331            handler = SysLogHandler(**syslog_opts)
332        except OSError as err:
333            logging.getLogger(__name__).error(
334                "Failed to setup the Syslog logging handler: %s", err
335            )
336            shutdown_multiprocessing_logging_listener()
337            sys.exit(2)
338    else:
339        # make sure, the logging directory exists and attempt to create it if necessary
340        log_dir = os.path.dirname(log_path)
341        if not os.path.exists(log_dir):
342            logging.getLogger(__name__).info(
343                "Log directory not found, trying to create it: %s", log_dir
344            )
345            try:
346                os.makedirs(log_dir, mode=0o700)
347            except OSError as ose:
348                logging.getLogger(__name__).warning(
349                    "Failed to create directory for log file: %s (%s)", log_dir, ose
350                )
351                return
352        try:
353            # Logfile logging is UTF-8 on purpose.
354            # Since salt uses YAML and YAML uses either UTF-8 or UTF-16, if a
355            # user is not using plain ASCII, their system should be ready to
356            # handle UTF-8.
357            if max_bytes > 0:
358                handler = RotatingFileHandler(
359                    log_path,
360                    mode="a",
361                    maxBytes=max_bytes,
362                    backupCount=backup_count,
363                    encoding="utf-8",
364                    delay=0,
365                )
366            else:
367                handler = WatchedFileHandler(
368                    log_path, mode="a", encoding="utf-8", delay=0
369                )
370        except OSError:
371            logging.getLogger(__name__).warning(
372                "Failed to open log file, do you have permission to write to %s?",
373                log_path,
374            )
375            # Do not proceed with any more configuration since it will fail, we
376            # have the console logging already setup and the user should see
377            # the error.
378            return
379
380    handler.setLevel(level)
381
382    # Set the default console formatter config
383    if not log_format:
384        log_format = "%(asctime)s [%(name)-15s][%(levelname)-8s] %(message)s"
385    if not date_format:
386        date_format = "%Y-%m-%d %H:%M:%S"
387
388    formatter = logging.Formatter(log_format, datefmt=date_format)
389
390    handler.setFormatter(formatter)
391    root_logger.addHandler(handler)
392
393    global __LOGFILE_CONFIGURED
394    global __LOGGING_LOGFILE_HANDLER
395    __LOGFILE_CONFIGURED = True
396    __LOGGING_LOGFILE_HANDLER = handler
397
398
399def setup_extended_logging(opts):
400    """
401    Setup any additional logging handlers, internal or external
402    """
403    if is_extended_logging_configured() is True:
404        # Don't re-configure external loggers
405        return
406
407    # Explicit late import of salt's loader
408    import salt.loader
409
410    # Let's keep a reference to the current logging handlers
411    initial_handlers = logging.root.handlers[:]
412
413    # Load any additional logging handlers
414    providers = salt.loader.log_handlers(opts)
415
416    # Let's keep track of the new logging handlers so we can sync the stored
417    # log records with them
418    additional_handlers = []
419
420    for name, get_handlers_func in providers.items():
421        logging.getLogger(__name__).info("Processing `log_handlers.%s`", name)
422        # Keep a reference to the logging handlers count before getting the
423        # possible additional ones.
424        initial_handlers_count = len(logging.root.handlers)
425
426        handlers = get_handlers_func()
427        if isinstance(handlers, types.GeneratorType):
428            handlers = list(handlers)
429        elif handlers is False or handlers == [False]:
430            # A false return value means not configuring any logging handler on
431            # purpose
432            logging.getLogger(__name__).info(
433                "The `log_handlers.%s.setup_handlers()` function returned "
434                "`False` which means no logging handler was configured on "
435                "purpose. Continuing...",
436                name,
437            )
438            continue
439        else:
440            # Make sure we have an iterable
441            handlers = [handlers]
442
443        for handler in handlers:
444            if not handler and len(logging.root.handlers) == initial_handlers_count:
445                logging.getLogger(__name__).info(
446                    "The `log_handlers.%s`, did not return any handlers "
447                    "and the global handlers count did not increase. This "
448                    "could be a sign of `log_handlers.%s` not working as "
449                    "supposed",
450                    name,
451                    name,
452                )
453                continue
454
455            logging.getLogger(__name__).debug(
456                "Adding the '%s' provided logging handler: '%s'", name, handler
457            )
458            additional_handlers.append(handler)
459            logging.root.addHandler(handler)
460
461    for handler in logging.root.handlers:
462        if handler in initial_handlers:
463            continue
464        additional_handlers.append(handler)
465
466    # Sync the null logging handler messages with the temporary handler
467    if LOGGING_STORE_HANDLER is not None:
468        LOGGING_STORE_HANDLER.sync_with_handlers(additional_handlers)
469    else:
470        logging.getLogger(__name__).debug(
471            "LOGGING_STORE_HANDLER is already None, can't sync messages with it"
472        )
473
474    # Remove the temporary queue logging handler
475    __remove_queue_logging_handler()
476
477    # Remove the temporary null logging handler (if it exists)
478    __remove_null_logging_handler()
479
480    global __EXTERNAL_LOGGERS_CONFIGURED
481    __EXTERNAL_LOGGERS_CONFIGURED = True
482
483
484def get_multiprocessing_logging_queue():
485    global __MP_LOGGING_QUEUE
486    from salt.utils.platform import is_darwin, is_aix
487
488    if __MP_LOGGING_QUEUE is not None:
489        return __MP_LOGGING_QUEUE
490
491    if __MP_IN_MAINPROCESS is False:
492        # We're not in the MainProcess, return! No Queue shall be instantiated
493        return __MP_LOGGING_QUEUE
494
495    if __MP_LOGGING_QUEUE is None:
496        if is_darwin() or is_aix():
497            __MP_LOGGING_QUEUE = multiprocessing.Queue(32767)
498        else:
499            __MP_LOGGING_QUEUE = multiprocessing.Queue(100000)
500    return __MP_LOGGING_QUEUE
501
502
503def set_multiprocessing_logging_queue(queue):
504    global __MP_LOGGING_QUEUE
505    if __MP_LOGGING_QUEUE is not queue:
506        __MP_LOGGING_QUEUE = queue
507
508
509def get_multiprocessing_logging_level():
510    return __MP_LOGGING_LEVEL
511
512
513def set_multiprocessing_logging_level(log_level):
514    global __MP_LOGGING_LEVEL
515    __MP_LOGGING_LEVEL = log_level
516
517
518def set_multiprocessing_logging_level_by_opts(opts):
519    """
520    This will set the multiprocessing logging level to the lowest
521    logging level of all the types of logging that are configured.
522    """
523    global __MP_LOGGING_LEVEL
524
525    log_levels = [
526        LOG_LEVELS.get(opts.get("log_level", "").lower(), logging.ERROR),
527        LOG_LEVELS.get(opts.get("log_level_logfile", "").lower(), logging.ERROR),
528    ]
529    for level in opts.get("log_granular_levels", {}).values():
530        log_levels.append(LOG_LEVELS.get(level.lower(), logging.ERROR))
531
532    __MP_LOGGING_LEVEL = min(log_levels)
533
534
535def setup_multiprocessing_logging_listener(opts, queue=None):
536    global __MP_LOGGING_QUEUE_PROCESS
537    global __MP_LOGGING_LISTENER_CONFIGURED
538    global __MP_MAINPROCESS_ID
539
540    if __MP_IN_MAINPROCESS is False:
541        # We're not in the MainProcess, return! No logging listener setup shall happen
542        return
543
544    if __MP_LOGGING_LISTENER_CONFIGURED is True:
545        return
546
547    if __MP_MAINPROCESS_ID is not None and __MP_MAINPROCESS_ID != os.getpid():
548        # We're not in the MainProcess, return! No logging listener setup shall happen
549        return
550
551    __MP_MAINPROCESS_ID = os.getpid()
552    __MP_LOGGING_QUEUE_PROCESS = multiprocessing.Process(
553        target=__process_multiprocessing_logging_queue,
554        args=(
555            opts,
556            queue or get_multiprocessing_logging_queue(),
557        ),
558    )
559    __MP_LOGGING_QUEUE_PROCESS.daemon = True
560    __MP_LOGGING_QUEUE_PROCESS.start()
561    __MP_LOGGING_LISTENER_CONFIGURED = True
562
563
564def setup_multiprocessing_logging(queue=None):
565    """
566    This code should be called from within a running multiprocessing
567    process instance.
568    """
569    from salt.utils.platform import is_windows
570
571    global __MP_LOGGING_CONFIGURED
572    global __MP_LOGGING_QUEUE_HANDLER
573
574    if __MP_IN_MAINPROCESS is True and not is_windows():
575        # We're in the MainProcess, return! No multiprocessing logging setup shall happen
576        # Windows is the exception where we want to set up multiprocessing
577        # logging in the MainProcess.
578        return
579
580    try:
581        logging._acquireLock()  # pylint: disable=protected-access
582
583        if __MP_LOGGING_CONFIGURED is True:
584            return
585
586        # Let's set it to true as fast as possible
587        __MP_LOGGING_CONFIGURED = True
588
589        if __MP_LOGGING_QUEUE_HANDLER is not None:
590            return
591
592        # The temp null and temp queue logging handlers will store messages.
593        # Since noone will process them, memory usage will grow. If they
594        # exist, remove them.
595        __remove_null_logging_handler()
596        __remove_queue_logging_handler()
597
598        # Let's add a queue handler to the logging root handlers
599        __MP_LOGGING_QUEUE_HANDLER = SaltLogQueueHandler(
600            queue or get_multiprocessing_logging_queue()
601        )
602        logging.root.addHandler(__MP_LOGGING_QUEUE_HANDLER)
603        # Set the logging root level to the lowest needed level to get all
604        # desired messages.
605        log_level = get_multiprocessing_logging_level()
606        logging.root.setLevel(log_level)
607        logging.getLogger(__name__).debug(
608            "Multiprocessing queue logging configured for the process running "
609            "under PID: %s at log level %s",
610            os.getpid(),
611            log_level,
612        )
613        # The above logging call will create, in some situations, a futex wait
614        # lock condition, probably due to the multiprocessing Queue's internal
615        # lock and semaphore mechanisms.
616        # A small sleep will allow us not to hit that futex wait lock condition.
617        time.sleep(0.0001)
618    finally:
619        logging._releaseLock()  # pylint: disable=protected-access
620
621
622def shutdown_console_logging():
623    global __CONSOLE_CONFIGURED
624    global __LOGGING_CONSOLE_HANDLER
625
626    if not __CONSOLE_CONFIGURED or not __LOGGING_CONSOLE_HANDLER:
627        return
628
629    try:
630        logging._acquireLock()
631        logging.root.removeHandler(__LOGGING_CONSOLE_HANDLER)
632        __LOGGING_CONSOLE_HANDLER = None
633        __CONSOLE_CONFIGURED = False
634    finally:
635        logging._releaseLock()
636
637
638def shutdown_logfile_logging():
639    global __LOGFILE_CONFIGURED
640    global __LOGGING_LOGFILE_HANDLER
641
642    if not __LOGFILE_CONFIGURED or not __LOGGING_LOGFILE_HANDLER:
643        return
644
645    try:
646        logging._acquireLock()
647        logging.root.removeHandler(__LOGGING_LOGFILE_HANDLER)
648        __LOGGING_LOGFILE_HANDLER = None
649        __LOGFILE_CONFIGURED = False
650    finally:
651        logging._releaseLock()
652
653
654def shutdown_temp_logging():
655    __remove_temp_logging_handler()
656
657
658def shutdown_multiprocessing_logging():
659    global __MP_LOGGING_CONFIGURED
660    global __MP_LOGGING_QUEUE_HANDLER
661
662    if not __MP_LOGGING_CONFIGURED or not __MP_LOGGING_QUEUE_HANDLER:
663        return
664
665    try:
666        logging._acquireLock()
667        # Let's remove the queue handler from the logging root handlers
668        logging.root.removeHandler(__MP_LOGGING_QUEUE_HANDLER)
669        __MP_LOGGING_QUEUE_HANDLER = None
670        __MP_LOGGING_CONFIGURED = False
671        if not logging.root.handlers:
672            # Ensure we have at least one logging root handler so
673            # something can handle logging messages. This case should
674            # only occur on Windows since on Windows we log to console
675            # and file through the Multiprocessing Logging Listener.
676            setup_console_logger()
677    finally:
678        logging._releaseLock()
679
680
681def shutdown_multiprocessing_logging_listener(daemonizing=False):
682    global __MP_LOGGING_QUEUE
683    global __MP_LOGGING_QUEUE_PROCESS
684    global __MP_LOGGING_LISTENER_CONFIGURED
685
686    if daemonizing is False and __MP_IN_MAINPROCESS is True:
687        # We're in the MainProcess and we're not daemonizing, return!
688        # No multiprocessing logging listener shutdown shall happen
689        return
690
691    if not daemonizing:
692        # Need to remove the queue handler so that it doesn't try to send
693        # data over a queue that was shut down on the listener end.
694        shutdown_multiprocessing_logging()
695
696    if __MP_LOGGING_QUEUE_PROCESS is None:
697        return
698
699    if __MP_MAINPROCESS_ID is not None and __MP_MAINPROCESS_ID != os.getpid():
700        # We're not in the MainProcess, return! No logging listener setup shall happen
701        return
702
703    if __MP_LOGGING_QUEUE_PROCESS.is_alive():
704        logging.getLogger(__name__).debug(
705            "Stopping the multiprocessing logging queue listener"
706        )
707        try:
708            # Sent None sentinel to stop the logging processing queue
709            __MP_LOGGING_QUEUE.put(None)
710            # Let's join the multiprocessing logging handle thread
711            time.sleep(0.5)
712            logging.getLogger(__name__).debug("closing multiprocessing queue")
713            __MP_LOGGING_QUEUE.close()
714            logging.getLogger(__name__).debug("joining multiprocessing queue thread")
715            __MP_LOGGING_QUEUE.join_thread()
716            __MP_LOGGING_QUEUE = None
717            __MP_LOGGING_QUEUE_PROCESS.join(1)
718            __MP_LOGGING_QUEUE = None
719        except OSError:
720            # We were unable to deliver the sentinel to the queue
721            # carry on...
722            pass
723        if __MP_LOGGING_QUEUE_PROCESS.is_alive():
724            # Process is still alive!?
725            __MP_LOGGING_QUEUE_PROCESS.terminate()
726        __MP_LOGGING_QUEUE_PROCESS = None
727        __MP_LOGGING_LISTENER_CONFIGURED = False
728        logging.getLogger(__name__).debug(
729            "Stopped the multiprocessing logging queue listener"
730        )
731
732
733def set_logger_level(logger_name, log_level="error"):
734    """
735    Tweak a specific logger's logging level
736    """
737    logging.getLogger(logger_name).setLevel(
738        LOG_LEVELS.get(log_level.lower(), logging.ERROR)
739    )
740
741
742def patch_python_logging_handlers():
743    """
744    Patch the python logging handlers with out mixed-in classes
745    """
746    logging.StreamHandler = StreamHandler
747    logging.FileHandler = FileHandler
748    logging.handlers.SysLogHandler = SysLogHandler
749    logging.handlers.WatchedFileHandler = WatchedFileHandler
750    logging.handlers.RotatingFileHandler = RotatingFileHandler
751    if sys.version_info >= (3, 2):
752        logging.handlers.QueueHandler = QueueHandler
753
754
755def __process_multiprocessing_logging_queue(opts, queue):
756    # Avoid circular import
757    import salt.utils.process
758
759    salt.utils.process.appendproctitle("MultiprocessingLoggingQueue")
760
761    # Assign UID/GID of user to proc if set
762    from salt.utils.verify import check_user
763
764    user = opts.get("user")
765    if user:
766        check_user(user)
767
768    from salt.utils.platform import is_windows
769
770    if is_windows():
771        # On Windows, creating a new process doesn't fork (copy the parent
772        # process image). Due to this, we need to setup all of our logging
773        # inside this process.
774        setup_temp_logger()
775        setup_console_logger(
776            log_level=opts.get("log_level"),
777            log_format=opts.get("log_fmt_console"),
778            date_format=opts.get("log_datefmt_console"),
779        )
780        setup_logfile_logger(
781            opts.get("log_file"),
782            log_level=opts.get("log_level_logfile"),
783            log_format=opts.get("log_fmt_logfile"),
784            date_format=opts.get("log_datefmt_logfile"),
785            max_bytes=opts.get("log_rotate_max_bytes", 0),
786            backup_count=opts.get("log_rotate_backup_count", 0),
787        )
788        setup_extended_logging(opts)
789    while True:
790        try:
791            record = queue.get()
792            if record is None:
793                # A sentinel to stop processing the queue
794                break
795            # Just log everything, filtering will happen on the main process
796            # logging handlers
797            logger = logging.getLogger(record.name)
798            logger.handle(record)
799        except (EOFError, KeyboardInterrupt, SystemExit):
800            break
801        except Exception as exc:  # pylint: disable=broad-except
802            logging.getLogger(__name__).warning(
803                "An exception occurred in the multiprocessing logging queue thread: %r",
804                exc,
805                exc_info_on_loglevel=logging.DEBUG,
806            )
807
808
809def __remove_null_logging_handler():
810    """
811    This function will run once the temporary logging has been configured. It
812    just removes the NullHandler from the logging handlers.
813    """
814    global LOGGING_NULL_HANDLER
815    if LOGGING_NULL_HANDLER is None:
816        # Already removed
817        return
818
819    root_logger = logging.getLogger()
820
821    for handler in root_logger.handlers:
822        if handler is LOGGING_NULL_HANDLER:
823            root_logger.removeHandler(LOGGING_NULL_HANDLER)
824            # Redefine the null handler to None so it can be garbage collected
825            LOGGING_NULL_HANDLER = None
826            break
827
828
829def __remove_queue_logging_handler():
830    """
831    This function will run once the additional loggers have been synchronized.
832    It just removes the QueueLoggingHandler from the logging handlers.
833    """
834    global LOGGING_STORE_HANDLER
835    if LOGGING_STORE_HANDLER is None:
836        # Already removed
837        return
838
839    root_logger = logging.getLogger()
840
841    for handler in root_logger.handlers:
842        if handler is LOGGING_STORE_HANDLER:
843            root_logger.removeHandler(LOGGING_STORE_HANDLER)
844            # Redefine the null handler to None so it can be garbage collected
845            LOGGING_STORE_HANDLER = None
846            break
847
848
849def __remove_temp_logging_handler():
850    """
851    This function will run once logging has been configured. It just removes
852    the temporary stream Handler from the logging handlers.
853    """
854    if is_logging_configured():
855        # In this case, the temporary logging handler has been removed, return!
856        return
857
858    # This should already be done, but...
859    __remove_null_logging_handler()
860
861    root_logger = logging.getLogger()
862    global LOGGING_TEMP_HANDLER
863
864    for handler in root_logger.handlers:
865        if handler is LOGGING_TEMP_HANDLER:
866            root_logger.removeHandler(LOGGING_TEMP_HANDLER)
867            # Redefine the null handler to None so it can be garbage collected
868            LOGGING_TEMP_HANDLER = None
869            break
870
871    if sys.version_info >= (2, 7):
872        # Python versions >= 2.7 allow warnings to be redirected to the logging
873        # system now that it's configured. Let's enable it.
874        logging.captureWarnings(True)
875
876
877def __global_logging_exception_handler(
878    exc_type,
879    exc_value,
880    exc_traceback,
881    _logger=logging.getLogger(__name__),
882    _stderr=sys.__stderr__,
883    _format_exception=traceback.format_exception,
884):
885    """
886    This function will log all un-handled python exceptions.
887    """
888    if exc_type.__name__ == "KeyboardInterrupt":
889        # Do not log the exception or display the traceback on Keyboard Interrupt
890        # Stop the logging queue listener thread
891        if is_mp_logging_listener_configured():
892            shutdown_multiprocessing_logging_listener()
893        return
894
895    # Log the exception
896    msg = "An un-handled exception was caught by salt's global exception handler:"
897    try:
898        msg = "{}\n{}: {}\n{}".format(
899            msg,
900            exc_type.__name__,
901            exc_value,
902            "".join(_format_exception(exc_type, exc_value, exc_traceback)).strip(),
903        )
904    except Exception:  # pylint: disable=broad-except
905        msg = "{}\n{}: {}\n(UNABLE TO FORMAT TRACEBACK)".format(
906            msg,
907            exc_type.__name__,
908            exc_value,
909        )
910    try:
911        _logger.error(msg)
912    except Exception:  # pylint: disable=broad-except
913        # Python is shutting down and logging has been set to None already
914        try:
915            _stderr.write(msg + "\n")
916        except Exception:  # pylint: disable=broad-except
917            # We have also lost reference to sys.__stderr__ ?!
918            print(msg)
919
920    # Call the original sys.excepthook
921    try:
922        sys.__excepthook__(exc_type, exc_value, exc_traceback)
923    except Exception:  # pylint: disable=broad-except
924        # Python is shutting down and sys has been set to None already
925        pass
926
927
928# Set our own exception handler as the one to use
929sys.excepthook = __global_logging_exception_handler
930