1# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
2# vi: set ft=python sts=4 ts=4 sw=4 noet :
3
4# This file is part of Fail2Ban.
5#
6# Fail2Ban is free software; you can redistribute it and/or modify
7# it under the terms of the GNU General Public License as published by
8# the Free Software Foundation; either version 2 of the License, or
9# (at your option) any later version.
10#
11# Fail2Ban is distributed in the hope that it will be useful,
12# but WITHOUT ANY WARRANTY; without even the implied warranty of
13# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14# GNU General Public License for more details.
15#
16# You should have received a copy of the GNU General Public License
17# along with Fail2Ban; if not, write to the Free Software
18# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
19
20__author__ = "Cyril Jaquier and Fail2Ban Contributors"
21__copyright__ = "Copyright (c) 2004 Cyril Jaquier, 2011-2013 Yaroslav Halchenko"
22__license__ = "GPL"
23
24import codecs
25import datetime
26import fcntl
27import logging
28import os
29import re
30import sys
31import time
32
33from .actions import Actions
34from .failmanager import FailManagerEmpty, FailManager
35from .ipdns import DNSUtils, IPAddr
36from .observer import Observers
37from .ticket import FailTicket
38from .jailthread import JailThread
39from .datedetector import DateDetector, validateTimeZone
40from .mytime import MyTime
41from .failregex import FailRegex, Regex, RegexException
42from .action import CommandAction
43from .utils import Utils
44from ..helpers import getLogger, PREFER_ENC
45
46# Gets the instance of the logger.
47logSys = getLogger(__name__)
48
49##
50# Log reader class.
51#
52# This class reads a log file and detects login failures or anything else
53# that matches a given regular expression. This class is instantiated by
54# a Jail object.
55
56
57class Filter(JailThread):
58
59	##
60	# Constructor.
61	#
62	# Initialize the filter object with default values.
63	# @param jail the jail object
64
65	def __init__(self, jail, useDns='warn'):
66		JailThread.__init__(self)
67		## The jail which contains this filter.
68		self.jail = jail
69		## The failures manager.
70		self.failManager = FailManager()
71		## Regular expression pre-filtering matching the failures.
72		self.__prefRegex = None
73		## The regular expression list matching the failures.
74		self.__failRegex = list()
75		## The regular expression list with expressions to ignore.
76		self.__ignoreRegex = list()
77		## Use DNS setting
78		self.setUseDns(useDns)
79		## The amount of time to look back.
80		self.__findTime = 600
81		## Ignore own IPs flag:
82		self.__ignoreSelf = True
83		## The ignore IP list.
84		self.__ignoreIpSet = set()
85		self.__ignoreIpList = []
86		## External command
87		self.__ignoreCommand = False
88		## Cache for ignoreip:
89		self.__ignoreCache = None
90		## Size of line buffer
91		self.__lineBufferSize = 1
92		## Line buffer
93		self.__lineBuffer = []
94		## Store last time stamp, applicable for multi-line
95		self.__lastTimeText = ""
96		self.__lastDate = None
97		## if set, treat log lines without explicit time zone to be in this time zone
98		self.__logtimezone = None
99		## Default or preferred encoding (to decode bytes from file or journal):
100		self.__encoding = PREFER_ENC
101		## Cache temporary holds failures info (used by multi-line for wrapping e. g. conn-id to host):
102		self.__mlfidCache = None
103		## Error counter (protected, so can be used in filter implementations)
104		## if it reached 100 (at once), run-cycle will go idle
105		self._errors = 0
106		## return raw host (host is not dns):
107		self.returnRawHost = False
108		## check each regex (used for test purposes):
109		self.checkAllRegex = False
110		## avoid finding of pending failures (without ID/IP, used in fail2ban-regex):
111		self.ignorePending = True
112		## callback called on ignoreregex match :
113		self.onIgnoreRegex = None
114		## if true ignores obsolete failures (failure time < now - findTime):
115		self.checkFindTime = True
116		## shows that filter is in operation mode (processing new messages):
117		self.inOperation = True
118		## if true prevents against retarded banning in case of RC by too many failures (disabled only for test purposes):
119		self.banASAP = True
120		## Ticks counter
121		self.ticks = 0
122		## Thread name:
123		self.name="f2b/f."+self.jailName
124
125		self.dateDetector = DateDetector()
126		logSys.debug("Created %s", self)
127
128	def __repr__(self):
129		return "%s(%r)" % (self.__class__.__name__, self.jail)
130
131	@property
132	def jailName(self):
133		return (self.jail is not None and self.jail.name or "~jailless~")
134
135	def clearAllParams(self):
136		""" Clear all lists/dicts parameters (used by reloading)
137		"""
138		self.delFailRegex()
139		self.delIgnoreRegex()
140		self.delIgnoreIP()
141
142	def reload(self, begin=True):
143		""" Begin or end of reloading resp. refreshing of all parameters
144		"""
145		if begin:
146			self.clearAllParams()
147			if hasattr(self, 'getLogPaths'):
148				self._reload_logs = dict((k, 1) for k in self.getLogPaths())
149		else:
150			if hasattr(self, '_reload_logs'):
151				# if it was not reloaded - remove obsolete log file:
152				for path in self._reload_logs:
153					self.delLogPath(path)
154				delattr(self, '_reload_logs')
155
156	@property
157	def mlfidCache(self):
158		if self.__mlfidCache:
159			return self.__mlfidCache
160		self.__mlfidCache = Utils.Cache(maxCount=100, maxTime=5*60)
161		return self.__mlfidCache
162
163	@property
164	def prefRegex(self):
165		return self.__prefRegex
166	@prefRegex.setter
167	def prefRegex(self, value):
168		if value:
169			self.__prefRegex = Regex(value, useDns=self.__useDns)
170		else:
171			self.__prefRegex = None
172
173	##
174	# Add a regular expression which matches the failure.
175	#
176	# The regular expression can also match any other pattern than failures
177	# and thus can be used for many purporse.
178	# @param value the regular expression
179
180	def addFailRegex(self, value):
181		multiLine = self.__lineBufferSize > 1
182		try:
183			regex = FailRegex(value, prefRegex=self.__prefRegex, multiline=multiLine,
184				useDns=self.__useDns)
185			self.__failRegex.append(regex)
186		except RegexException as e:
187			logSys.error(e)
188			raise e
189
190	def delFailRegex(self, index=None):
191		try:
192			# clear all:
193			if index is None:
194				del self.__failRegex[:]
195				return
196			# delete by index:
197			del self.__failRegex[index]
198		except IndexError:
199			logSys.error("Cannot remove regular expression. Index %d is not "
200						 "valid", index)
201
202	##
203	# Get the regular expressions as list.
204	#
205	# @return the regular expression list
206
207	def getFailRegex(self):
208		return [regex.getRegex() for regex in self.__failRegex]
209
210	##
211	# Add the regular expression which matches the failure.
212	#
213	# The regular expression can also match any other pattern than failures
214	# and thus can be used for many purpose.
215	# @param value the regular expression
216
217	def addIgnoreRegex(self, value):
218		try:
219			regex = Regex(value, useDns=self.__useDns)
220			self.__ignoreRegex.append(regex)
221		except RegexException as e:
222			logSys.error(e)
223			raise e
224
225	def delIgnoreRegex(self, index=None):
226		try:
227			# clear all:
228			if index is None:
229				del self.__ignoreRegex[:]
230				return
231			# delete by index:
232			del self.__ignoreRegex[index]
233		except IndexError:
234			logSys.error("Cannot remove regular expression. Index %d is not "
235						 "valid", index)
236
237	##
238	# Get the regular expression which matches the failure.
239	#
240	# @return the regular expression
241
242	def getIgnoreRegex(self):
243		ignoreRegex = list()
244		for regex in self.__ignoreRegex:
245			ignoreRegex.append(regex.getRegex())
246		return ignoreRegex
247
248	##
249	# Set the Use DNS mode
250	# @param value the usedns mode
251
252	def setUseDns(self, value):
253		if isinstance(value, bool):
254			value = {True: 'yes', False: 'no'}[value]
255		value = value.lower()			  # must be a string by now
256		if not (value in ('yes', 'warn', 'no', 'raw')):
257			logSys.error("Incorrect value %r specified for usedns. "
258						 "Using safe 'no'", value)
259			value = 'no'
260		logSys.debug("Setting usedns = %s for %s", value, self)
261		self.__useDns = value
262
263	##
264	# Get the usedns mode
265	# @return the usedns mode
266
267	def getUseDns(self):
268		return self.__useDns
269
270	##
271	# Set the time needed to find a failure.
272	#
273	# This value tells the filter how long it has to take failures into
274	# account.
275	# @param value the time
276
277	def setFindTime(self, value):
278		value = MyTime.str2seconds(value)
279		self.__findTime = value
280		self.failManager.setMaxTime(value)
281		logSys.info("  findtime: %s", value)
282
283	##
284	# Get the time needed to find a failure.
285	#
286	# @return the time
287
288	def getFindTime(self):
289		return self.__findTime
290
291	##
292	# Set the date detector pattern, removing Defaults
293	#
294	# @param pattern the date template pattern
295
296	def setDatePattern(self, pattern):
297		if pattern is None:
298			self.dateDetector = None
299			return
300		else:
301			dd = DateDetector()
302			dd.default_tz = self.__logtimezone
303			if not isinstance(pattern, (list, tuple)):
304				pattern = list(filter(bool, list(map(str.strip, re.split('\n+', pattern)))))
305			for pattern in pattern:
306				dd.appendTemplate(pattern)
307			self.dateDetector = dd
308
309	##
310	# Get the date detector pattern, or Default Detectors if not changed
311	#
312	# @return pattern of the date template pattern
313
314	def getDatePattern(self):
315		if self.dateDetector is not None:
316			templates = self.dateDetector.templates
317			# lazy template init, by first match
318			if not len(templates) or len(templates) > 2:
319				return None, "Default Detectors"
320			elif len(templates):
321				if hasattr(templates[0], "pattern"):
322					pattern =  templates[0].pattern
323				else:
324					pattern = None
325				return pattern, templates[0].name
326		return None
327
328	##
329	# Set the log default time zone
330	#
331	# @param tz the symbolic timezone (for now fixed offset only: UTC[+-]HHMM)
332
333	def setLogTimeZone(self, tz):
334		validateTimeZone(tz); # avoid setting of wrong value, but hold original
335		self.__logtimezone = tz
336		if self.dateDetector: self.dateDetector.default_tz = self.__logtimezone
337
338	##
339	# Get the log default timezone
340	#
341	# @return symbolic timezone (a string)
342
343	def getLogTimeZone(self):
344		return self.__logtimezone
345
346	##
347	# Set the maximum retry value.
348	#
349	# @param value the retry value
350
351	def setMaxRetry(self, value):
352		self.failManager.setMaxRetry(value)
353		logSys.info("  maxRetry: %s", value)
354
355	##
356	# Get the maximum retry value.
357	#
358	# @return the retry value
359
360	def getMaxRetry(self):
361		return self.failManager.getMaxRetry()
362
363	##
364	# Set the maximum line buffer size.
365	#
366	# @param value the line buffer size
367
368	def setMaxLines(self, value):
369		if int(value) <= 0:
370			raise ValueError("maxlines must be integer greater than zero")
371		self.__lineBufferSize = int(value)
372		logSys.info("  maxLines: %i", self.__lineBufferSize)
373
374	##
375	# Get the maximum line buffer size.
376	#
377	# @return the line buffer size
378
379	def getMaxLines(self):
380		return self.__lineBufferSize
381
382	##
383	# Set the log file encoding
384	#
385	# @param encoding the encoding used with log files
386
387	def setLogEncoding(self, encoding):
388		if encoding.lower() == "auto":
389			encoding = PREFER_ENC
390		codecs.lookup(encoding) # Raise LookupError if invalid codec
391		self.__encoding = encoding
392		logSys.info("  encoding: %s", encoding)
393		return encoding
394
395	##
396	# Get the log file encoding
397	#
398	# @return log encoding value
399
400	def getLogEncoding(self):
401		return self.__encoding
402
403	##
404	# Main loop.
405	#
406	# This function is the main loop of the thread. It checks if the
407	# file has been modified and looks for failures.
408	# @return True when the thread exits nicely
409
410	def run(self): # pragma: no cover
411		raise Exception("run() is abstract")
412
413	##
414	# External command, for ignoredips
415	#
416
417	@property
418	def ignoreCommand(self):
419		return self.__ignoreCommand
420
421	@ignoreCommand.setter
422	def ignoreCommand(self, command):
423		self.__ignoreCommand = command
424
425	##
426	# Cache parameters for ignoredips
427	#
428
429	@property
430	def ignoreCache(self):
431		return [self.__ignoreCache[0], self.__ignoreCache[1].maxCount, self.__ignoreCache[1].maxTime] \
432			if self.__ignoreCache else None
433
434	@ignoreCache.setter
435	def ignoreCache(self, command):
436		if command:
437			self.__ignoreCache = command['key'], Utils.Cache(
438				maxCount=int(command.get('max-count', 100)), maxTime=MyTime.str2seconds(command.get('max-time', 5*60))
439			)
440		else:
441			self.__ignoreCache = None
442
443	def performBan(self, ip=None):
444		"""Performs a ban for IPs (or given ip) that are reached maxretry of the jail."""
445		try: # pragma: no branch - exception is the only way out
446			while True:
447				ticket = self.failManager.toBan(ip)
448				self.jail.putFailTicket(ticket)
449		except FailManagerEmpty:
450			self.failManager.cleanup(MyTime.time())
451
452	def addAttempt(self, ip, *matches):
453		"""Generate a failed attempt for ip"""
454		if not isinstance(ip, IPAddr):
455			ip = IPAddr(ip)
456		matches = list(matches) # tuple to list
457
458		# Generate the failure attempt for the IP:
459		unixTime = MyTime.time()
460		ticket = FailTicket(ip, unixTime, matches=matches)
461		logSys.info(
462			"[%s] Attempt %s - %s", self.jailName, ip, datetime.datetime.fromtimestamp(unixTime).strftime("%Y-%m-%d %H:%M:%S")
463		)
464		attempts = self.failManager.addFailure(ticket, len(matches) or 1)
465		# Perform the ban if this attempt is resulted to:
466		if attempts >= self.failManager.getMaxRetry():
467			self.performBan(ip)
468
469		return 1
470
471	##
472	# Ignore own IP/DNS.
473	#
474	@property
475	def ignoreSelf(self):
476		return self.__ignoreSelf
477
478	@ignoreSelf.setter
479	def ignoreSelf(self, value):
480		self.__ignoreSelf = value
481
482	##
483	# Add an IP/DNS to the ignore list.
484	#
485	# IP addresses in the ignore list are not taken into account
486	# when finding failures. CIDR mask and DNS are also accepted.
487	# @param ip IP address to ignore
488
489	def addIgnoreIP(self, ipstr):
490		# An empty string is always false
491		if ipstr == "":
492			return
493		# Create IP address object
494		ip = IPAddr(ipstr)
495		# Avoid exact duplicates
496		if ip in self.__ignoreIpSet or ip in self.__ignoreIpList:
497			logSys.log(logging.MSG, "  Ignore duplicate %r (%r), already in ignore list", ip, ipstr)
498			return
499		# log and append to ignore list
500		logSys.debug("  Add %r to ignore list (%r)", ip, ipstr)
501		# if single IP (not DNS or a subnet) add to set, otherwise to list:
502		if ip.isSingle:
503			self.__ignoreIpSet.add(ip)
504		else:
505			self.__ignoreIpList.append(ip)
506
507	def delIgnoreIP(self, ip=None):
508		# clear all:
509		if ip is None:
510			self.__ignoreIpSet.clear()
511			del self.__ignoreIpList[:]
512			return
513		# delete by ip:
514		logSys.debug("  Remove %r from ignore list", ip)
515		if ip in self.__ignoreIpSet:
516			self.__ignoreIpSet.remove(ip)
517		else:
518			self.__ignoreIpList.remove(ip)
519
520	def logIgnoreIp(self, ip, log_ignore, ignore_source="unknown source"):
521		if log_ignore:
522			logSys.info("[%s] Ignore %s by %s", self.jailName, ip, ignore_source)
523
524	def getIgnoreIP(self):
525		return self.__ignoreIpList + list(self.__ignoreIpSet)
526
527	##
528	# Check if IP address/DNS is in the ignore list.
529	#
530	# Check if the given IP address matches an IP address/DNS or a CIDR
531	# mask in the ignore list.
532	# @param ip IP address object or ticket
533	# @return True if IP address is in ignore list
534
535	def inIgnoreIPList(self, ip, log_ignore=True):
536		ticket = None
537		if isinstance(ip, FailTicket):
538			ticket = ip
539			ip = ticket.getIP()
540		elif not isinstance(ip, IPAddr):
541			ip = IPAddr(ip)
542		return self._inIgnoreIPList(ip, ticket, log_ignore)
543
544	def _inIgnoreIPList(self, ip, ticket, log_ignore=True):
545		aInfo = None
546		# cached ?
547		if self.__ignoreCache:
548			key, c = self.__ignoreCache
549			if ticket:
550				aInfo = Actions.ActionInfo(ticket, self.jail)
551				key = CommandAction.replaceDynamicTags(key, aInfo)
552			else:
553				aInfo = { 'ip': ip }
554				key = CommandAction.replaceTag(key, aInfo)
555			v = c.get(key)
556			if v is not None:
557				return v
558
559		# check own IPs should be ignored and 'ip' is self IP:
560		if self.__ignoreSelf and ip in DNSUtils.getSelfIPs():
561			self.logIgnoreIp(ip, log_ignore, ignore_source="ignoreself rule")
562			if self.__ignoreCache: c.set(key, True)
563			return True
564
565		# check if the IP is covered by ignore IP (in set or in subnet/dns):
566		if ip in self.__ignoreIpSet:
567			self.logIgnoreIp(ip, log_ignore, ignore_source="ip")
568			return True
569		for net in self.__ignoreIpList:
570			if ip.isInNet(net):
571				self.logIgnoreIp(ip, log_ignore, ignore_source=("ip" if net.isValid else "dns"))
572				if self.__ignoreCache: c.set(key, True)
573				return True
574
575		if self.__ignoreCommand:
576			if ticket:
577				if not aInfo: aInfo = Actions.ActionInfo(ticket, self.jail)
578				command = CommandAction.replaceDynamicTags(self.__ignoreCommand, aInfo)
579			else:
580				if not aInfo: aInfo = { 'ip': ip }
581				command = CommandAction.replaceTag(self.__ignoreCommand, aInfo)
582			logSys.debug('ignore command: %s', command)
583			ret, ret_ignore = CommandAction.executeCmd(command, success_codes=(0, 1))
584			ret_ignore = ret and ret_ignore == 0
585			self.logIgnoreIp(ip, log_ignore and ret_ignore, ignore_source="command")
586			if self.__ignoreCache: c.set(key, ret_ignore)
587			return ret_ignore
588
589		if self.__ignoreCache: c.set(key, False)
590		return False
591
592	def _logWarnOnce(self, nextLTM, *args):
593		"""Log some issue as warning once per day, otherwise level 7"""
594		if MyTime.time() < getattr(self, nextLTM, 0):
595			if logSys.getEffectiveLevel() <= 7: logSys.log(7, *(args[0]))
596		else:
597			setattr(self, nextLTM, MyTime.time() + 24*60*60)
598			for args in args:
599				logSys.warning('[%s] ' + args[0], self.jailName, *args[1:])
600
601	def processLine(self, line, date=None):
602		"""Split the time portion from log msg and return findFailures on them
603		"""
604		logSys.log(7, "Working on line %r", line)
605
606		noDate = False
607		if date:
608			tupleLine = line
609			self.__lastTimeText = tupleLine[1]
610			self.__lastDate = date
611		else:
612			# try to parse date:
613			timeMatch = self.dateDetector.matchTime(line)
614			m = timeMatch[0]
615			if m:
616				s = m.start(1)
617				e = m.end(1)
618				m = line[s:e]
619				tupleLine = (line[:s], m, line[e:])
620				if m: # found and not empty - retrive date:
621					date = self.dateDetector.getTime(m, timeMatch)
622					if date is not None:
623						# Lets get the time part
624						date = date[0]
625						self.__lastTimeText = m
626						self.__lastDate = date
627					else:
628						logSys.error("findFailure failed to parse timeText: %s", m)
629				# matched empty value - date is optional or not available - set it to last known or now:
630				elif self.__lastDate and self.__lastDate > MyTime.time() - 60:
631					# set it to last known:
632					tupleLine = ("", self.__lastTimeText, line)
633					date = self.__lastDate
634				else:
635					# set it to now:
636					date = MyTime.time()
637			else:
638				tupleLine = ("", "", line)
639			# still no date - try to use last known:
640			if date is None:
641				noDate = True
642				if self.__lastDate and self.__lastDate > MyTime.time() - 60:
643					tupleLine = ("", self.__lastTimeText, line)
644					date = self.__lastDate
645
646		if self.checkFindTime:
647			# if in operation (modifications have been really found):
648			if self.inOperation:
649				# if weird date - we'd simulate now for timeing issue (too large deviation from now):
650				if (date is None or date < MyTime.time() - 60 or date > MyTime.time() + 60):
651					# log time zone issue as warning once per day:
652					self._logWarnOnce("_next_simByTimeWarn",
653						("Simulate NOW in operation since found time has too large deviation %s ~ %s +/- %s",
654							date, MyTime.time(), 60),
655						("Please check jail has possibly a timezone issue. Line with odd timestamp: %s",
656							line))
657					# simulate now as date:
658					date = MyTime.time()
659					self.__lastDate = date
660			else:
661				# in initialization (restore) phase, if too old - ignore:
662				if date is not None and date < MyTime.time() - self.getFindTime():
663					# log time zone issue as warning once per day:
664					self._logWarnOnce("_next_ignByTimeWarn",
665						("Ignore line since time %s < %s - %s",
666							date, MyTime.time(), self.getFindTime()),
667						("Please check jail has possibly a timezone issue. Line with odd timestamp: %s",
668							line))
669					# ignore - too old (obsolete) entry:
670					return []
671
672		# save last line (lazy convert of process line tuple to string on demand):
673		self.processedLine = lambda: "".join(tupleLine[::2])
674		return self.findFailure(tupleLine, date, noDate=noDate)
675
676	def processLineAndAdd(self, line, date=None):
677		"""Processes the line for failures and populates failManager
678		"""
679		try:
680			for element in self.processLine(line, date):
681				ip = element[1]
682				unixTime = element[2]
683				fail = element[3]
684				logSys.debug("Processing line with time:%s and ip:%s",
685						unixTime, ip)
686				# ensure the time is not in the future, e. g. by some estimated (assumed) time:
687				if self.checkFindTime and unixTime > MyTime.time():
688					unixTime = MyTime.time()
689				tick = FailTicket(ip, unixTime, data=fail)
690				if self._inIgnoreIPList(ip, tick):
691					continue
692				logSys.info(
693					"[%s] Found %s - %s", self.jailName, ip, MyTime.time2str(unixTime)
694				)
695				attempts = self.failManager.addFailure(tick)
696				# avoid RC on busy filter (too many failures) - if attempts for IP/ID reached maxretry,
697				# we can speedup ban, so do it as soon as possible:
698				if self.banASAP and attempts >= self.failManager.getMaxRetry():
699					self.performBan(ip)
700				# report to observer - failure was found, for possibly increasing of it retry counter (asynchronous)
701				if Observers.Main is not None:
702					Observers.Main.add('failureFound', self.failManager, self.jail, tick)
703			# reset (halve) error counter (successfully processed line):
704			if self._errors:
705				self._errors //= 2
706		except Exception as e:
707			logSys.error("Failed to process line: %r, caught exception: %r", line, e,
708				exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
709			# incr common error counter:
710			self.commonError()
711
712	def commonError(self):
713		# incr error counter, stop processing (going idle) after 100th error :
714		self._errors += 1
715		# sleep a little bit (to get around time-related errors):
716		time.sleep(self.sleeptime)
717		if self._errors >= 100:
718			logSys.error("Too many errors at once (%s), going idle", self._errors)
719			self._errors //= 2
720			self.idle = True
721
722	def _ignoreLine(self, buf, orgBuffer, failRegex=None):
723		# if multi-line buffer - use matched only, otherwise (single line) - original buf:
724		if failRegex and self.__lineBufferSize > 1:
725			orgBuffer = failRegex.getMatchedTupleLines()
726			buf = Regex._tupleLinesBuf(orgBuffer)
727		# search ignored:
728		fnd = None
729		for ignoreRegexIndex, ignoreRegex in enumerate(self.__ignoreRegex):
730			ignoreRegex.search(buf, orgBuffer)
731			if ignoreRegex.hasMatched():
732				fnd = ignoreRegexIndex
733				logSys.log(7, "  Matched ignoreregex %d and was ignored", fnd)
734				if self.onIgnoreRegex: self.onIgnoreRegex(fnd, ignoreRegex)
735				# remove ignored match:
736				if not self.checkAllRegex or self.__lineBufferSize > 1:
737					# todo: check ignoreRegex.getUnmatchedTupleLines() would be better (fix testGetFailuresMultiLineIgnoreRegex):
738					if failRegex:
739						self.__lineBuffer = failRegex.getUnmatchedTupleLines()
740				if not self.checkAllRegex: break
741		return fnd
742
743	def _updateUsers(self, fail, user=()):
744		users = fail.get('users')
745		# only for regex contains user:
746		if user:
747			if not users:
748				fail['users'] = users = set()
749			users.add(user)
750			return users
751		return users
752
753	def _mergeFailure(self, mlfid, fail, failRegex):
754		mlfidFail = self.mlfidCache.get(mlfid) if self.__mlfidCache else None
755		users = None
756		nfflgs = 0
757		if fail.get("mlfgained"):
758			nfflgs |= (8|1)
759			if not fail.get('nofail'):
760				fail['nofail'] = fail["mlfgained"]
761		elif fail.get('nofail'): nfflgs |= 1
762		if fail.pop('mlfforget', None): nfflgs |= 2
763		# if multi-line failure id (connection id) known:
764		if mlfidFail:
765			mlfidGroups = mlfidFail[1]
766			# update users set (hold all users of connect):
767			users = self._updateUsers(mlfidGroups, fail.get('user'))
768			# be sure we've correct current state ('nofail' and 'mlfgained' only from last failure)
769			if mlfidGroups.pop('nofail', None): nfflgs |= 4
770			if mlfidGroups.pop('mlfgained', None): nfflgs |= 4
771			# if we had no pending failures then clear the matches (they are already provided):
772			if (nfflgs & 4) == 0 and not mlfidGroups.get('mlfpending', 0):
773				mlfidGroups.pop("matches", None)
774			# overwrite multi-line failure with all values, available in fail:
775			mlfidGroups.update(((k,v) for k,v in fail.items() if v is not None))
776			# new merged failure data:
777			fail = mlfidGroups
778			# if forget (disconnect/reset) - remove cached entry:
779			if nfflgs & 2:
780				self.mlfidCache.unset(mlfid)
781		elif not (nfflgs & 2): # not mlfforget
782			users = self._updateUsers(fail, fail.get('user'))
783			mlfidFail = [self.__lastDate, fail]
784			self.mlfidCache.set(mlfid, mlfidFail)
785		# check users in order to avoid reset failure by multiple logon-attempts:
786		if fail.pop('mlfpending', 0) or users and len(users) > 1:
787			# we've pending failures or new user, reset 'nofail' because of failures or multiple users attempts:
788			fail.pop('nofail', None)
789			fail.pop('mlfgained', None)
790			nfflgs &= ~(8|1) # reset nofail and gained
791		# merge matches:
792		if (nfflgs & 1) == 0: # current nofail state (corresponding users)
793			m = fail.pop("nofail-matches", [])
794			m += fail.get("matches", [])
795			if (nfflgs & 8) == 0: # no gain signaled
796				m += failRegex.getMatchedTupleLines()
797			fail["matches"] = m
798		elif (nfflgs & 3) == 1: # not mlfforget and nofail:
799			fail["nofail-matches"] = fail.get("nofail-matches", []) + failRegex.getMatchedTupleLines()
800		# return merged:
801		return fail
802
803
804	##
805	# Finds the failure in a line given split into time and log parts.
806	#
807	# Uses the failregex pattern to find it and timeregex in order
808	# to find the logging time.
809	# @return a dict with IP and timestamp.
810
811	def findFailure(self, tupleLine, date, noDate=False):
812		failList = list()
813
814		ll = logSys.getEffectiveLevel()
815		returnRawHost = self.returnRawHost
816		cidr = IPAddr.CIDR_UNSPEC
817		if self.__useDns == "raw":
818			returnRawHost = True
819			cidr = IPAddr.CIDR_RAW
820
821		if self.__lineBufferSize > 1:
822			self.__lineBuffer.append(tupleLine)
823			orgBuffer = self.__lineBuffer = self.__lineBuffer[-self.__lineBufferSize:]
824		else:
825			orgBuffer = self.__lineBuffer = [tupleLine]
826		if ll <= 5: logSys.log(5, "Looking for match of %r", orgBuffer)
827		buf = Regex._tupleLinesBuf(orgBuffer)
828
829		# Checks if we must ignore this line (only if fewer ignoreregex than failregex).
830		if self.__ignoreRegex and len(self.__ignoreRegex) < len(self.__failRegex) - 2:
831			if self._ignoreLine(buf, orgBuffer) is not None:
832				# The ignoreregex matched. Return.
833				return failList
834
835		# Pre-filter fail regex (if available):
836		preGroups = {}
837		if self.__prefRegex:
838			if ll <= 5: logSys.log(5, "  Looking for prefregex %r", self.__prefRegex.getRegex())
839			self.__prefRegex.search(buf, orgBuffer)
840			if not self.__prefRegex.hasMatched():
841				if ll <= 5: logSys.log(5, "  Prefregex not matched")
842				return failList
843			preGroups = self.__prefRegex.getGroups()
844			if ll <= 7: logSys.log(7, "  Pre-filter matched %s", preGroups)
845			repl = preGroups.pop('content', None)
846			# Content replacement:
847			if repl:
848				self.__lineBuffer, buf = [('', '', repl)], None
849
850		# Iterates over all the regular expressions.
851		for failRegexIndex, failRegex in enumerate(self.__failRegex):
852			try:
853				# buffer from tuples if changed:
854				if buf is None:
855					buf = Regex._tupleLinesBuf(self.__lineBuffer)
856				if ll <= 5: logSys.log(5, "  Looking for failregex %d - %r", failRegexIndex, failRegex.getRegex())
857				failRegex.search(buf, orgBuffer)
858				if not failRegex.hasMatched():
859					continue
860				# current failure data (matched group dict):
861				fail = failRegex.getGroups()
862				# The failregex matched.
863				if ll <= 7: logSys.log(7, "  Matched failregex %d: %s", failRegexIndex, fail)
864				# Checks if we must ignore this match.
865				if self.__ignoreRegex and self._ignoreLine(buf, orgBuffer, failRegex) is not None:
866					# The ignoreregex matched. Remove ignored match.
867					buf = None
868					if not self.checkAllRegex:
869						break
870					continue
871				if noDate:
872					self._logWarnOnce("_next_noTimeWarn",
873						("Found a match but no valid date/time found for %r.", tupleLine[1]),
874						("Match without a timestamp: %s", "\n".join(failRegex.getMatchedLines())),
875						("Please try setting a custom date pattern (see man page jail.conf(5)).",)
876					)
877					if date is None and self.checkFindTime: continue
878				# we should check all regex (bypass on multi-line, otherwise too complex):
879				if not self.checkAllRegex or self.__lineBufferSize > 1:
880					self.__lineBuffer, buf = failRegex.getUnmatchedTupleLines(), None
881				# merge data if multi-line failure:
882				raw = returnRawHost
883				if preGroups:
884					currFail, fail = fail, preGroups.copy()
885					fail.update(currFail)
886				# first try to check we have mlfid case (caching of connection id by multi-line):
887				mlfid = fail.get('mlfid')
888				if mlfid is not None:
889					fail = self._mergeFailure(mlfid, fail, failRegex)
890					# bypass if no-failure case:
891					if fail.get('nofail'):
892						if ll <= 7: logSys.log(7, "Nofail by mlfid %r in regex %s: %s",
893							mlfid, failRegexIndex, fail.get('mlfforget', "waiting for failure"))
894						if not self.checkAllRegex: return failList
895				else:
896					# matched lines:
897					fail["matches"] = fail.get("matches", []) + failRegex.getMatchedTupleLines()
898				# failure-id:
899				fid = fail.get('fid')
900				# ip-address or host:
901				host = fail.get('ip4')
902				if host is not None:
903					cidr = int(fail.get('cidr') or IPAddr.FAM_IPv4)
904					raw = True
905				else:
906					host = fail.get('ip6')
907					if host is not None:
908						cidr = int(fail.get('cidr') or IPAddr.FAM_IPv6)
909						raw = True
910				if host is None:
911					host = fail.get('dns')
912					if host is None:
913						# first try to check we have mlfid case (cache connection id):
914						if fid is None and mlfid is None:
915								# if no failure-id also (obscure case, wrong regex), throw error inside getFailID:
916								fid = failRegex.getFailID()
917						host = fid
918						cidr = IPAddr.CIDR_RAW
919						raw = True
920				# if mlfid case (not failure):
921				if host is None:
922					if ll <= 7: logSys.log(7, "No failure-id by mlfid %r in regex %s: %s",
923						mlfid, failRegexIndex, fail.get('mlfforget', "waiting for identifier"))
924					fail['mlfpending'] = 1; # mark failure is pending
925					if not self.checkAllRegex and self.ignorePending: return failList
926					ips = [None]
927				# if raw - add single ip or failure-id,
928				# otherwise expand host to multiple ips using dns (or ignore it if not valid):
929				elif raw:
930					ip = IPAddr(host, cidr)
931					# check host equal failure-id, if not - failure with complex id:
932					if fid is not None and fid != host:
933						ip = IPAddr(fid, IPAddr.CIDR_RAW)
934					ips = [ip]
935				# otherwise, try to use dns conversion:
936				else:
937					ips = DNSUtils.textToIp(host, self.__useDns)
938				# if checkAllRegex we must make a copy (to be sure next RE doesn't change merged/cached failure):
939				if self.checkAllRegex and mlfid is not None:
940					fail = fail.copy()
941				# append failure with match to the list:
942				for ip in ips:
943					failList.append([failRegexIndex, ip, date, fail])
944				if not self.checkAllRegex:
945					break
946			except RegexException as e: # pragma: no cover - unsure if reachable
947				logSys.error(e)
948		return failList
949
950	def status(self, flavor="basic"):
951		"""Status of failures detected by filter.
952		"""
953		ret = [("Currently failed", self.failManager.size()),
954		       ("Total failed", self.failManager.getFailTotal())]
955		return ret
956
957
958class FileFilter(Filter):
959
960	def __init__(self, jail, **kwargs):
961		Filter.__init__(self, jail, **kwargs)
962		## The log file path.
963		self.__logs = dict()
964		self.__autoSeek = dict()
965
966	##
967	# Add a log file path
968	#
969	# @param path log file path
970
971	def addLogPath(self, path, tail=False, autoSeek=True):
972		if path in self.__logs:
973			if hasattr(self, '_reload_logs') and path in self._reload_logs:
974				del self._reload_logs[path]
975			else:
976				logSys.error(path + " already exists")
977		else:
978			log = FileContainer(path, self.getLogEncoding(), tail)
979			db = self.jail.database
980			if db is not None:
981				lastpos = db.addLog(self.jail, log)
982				if lastpos and not tail:
983					log.setPos(lastpos)
984			self.__logs[path] = log
985			logSys.info("Added logfile: %r (pos = %s, hash = %s)" , path, log.getPos(), log.getHash())
986			if autoSeek and not tail:
987				self.__autoSeek[path] = autoSeek
988			self._addLogPath(path)			# backend specific
989
990	def _addLogPath(self, path):
991		# nothing to do by default
992		# to be overridden by backends
993		pass
994
995	##
996	# Delete a log path
997	#
998	# @param path the log file to delete
999
1000	def delLogPath(self, path):
1001		try:
1002			log = self.__logs.pop(path)
1003		except KeyError:
1004			return
1005		db = self.jail.database
1006		if db is not None:
1007			db.updateLog(self.jail, log)
1008		logSys.info("Removed logfile: %r", path)
1009		self._delLogPath(path)
1010		return
1011
1012	def _delLogPath(self, path): # pragma: no cover - overwritten function
1013		# nothing to do by default
1014		# to be overridden by backends
1015		pass
1016
1017	##
1018	# Get the log file names
1019	#
1020	# @return log paths
1021
1022	def getLogPaths(self):
1023		return list(self.__logs.keys())
1024
1025	##
1026	# Get the log containers
1027	#
1028	# @return log containers
1029
1030	def getLogs(self):
1031		return list(self.__logs.values())
1032
1033	##
1034	# Get the count of log containers
1035	#
1036	# @return count of log containers
1037
1038	def getLogCount(self):
1039		return len(self.__logs)
1040
1041	##
1042	# Check whether path is already monitored.
1043	#
1044	# @param path The path
1045	# @return True if the path is already monitored else False
1046
1047	def containsLogPath(self, path):
1048		return path in self.__logs
1049
1050	##
1051	# Set the log file encoding
1052	#
1053	# @param encoding the encoding used with log files
1054
1055	def setLogEncoding(self, encoding):
1056		encoding = super(FileFilter, self).setLogEncoding(encoding)
1057		for log in self.__logs.values():
1058			log.setEncoding(encoding)
1059
1060	def getLog(self, path):
1061		return self.__logs.get(path, None)
1062
1063	##
1064	# Gets all the failure in the log file.
1065	#
1066	# Gets all the failure in the log file which are newer than
1067	# MyTime.time()-self.findTime. When a failure is detected, a FailTicket
1068	# is created and is added to the FailManager.
1069
1070	def getFailures(self, filename, inOperation=None):
1071		log = self.getLog(filename)
1072		if log is None:
1073			logSys.error("Unable to get failures in %s", filename)
1074			return False
1075		# We should always close log (file), otherwise may be locked (log-rotate, etc.)
1076		try:
1077			# Try to open log file.
1078			try:
1079				has_content = log.open()
1080			# see http://python.org/dev/peps/pep-3151/
1081			except IOError as e:
1082				logSys.error("Unable to open %s", filename)
1083				if e.errno != 2: # errno.ENOENT
1084					logSys.exception(e)
1085				return False
1086			except OSError as e: # pragma: no cover - requires race condition to trigger this
1087				logSys.error("Error opening %s", filename)
1088				logSys.exception(e)
1089				return False
1090			except Exception as e: # pragma: no cover - Requires implementation error in FileContainer to generate
1091				logSys.error("Internal error in FileContainer open method - please report as a bug to https://github.com/fail2ban/fail2ban/issues")
1092				logSys.exception(e)
1093				return False
1094
1095			# seek to find time for first usage only (prevent performance decline with polling of big files)
1096			if self.__autoSeek:
1097				startTime = self.__autoSeek.pop(filename, None)
1098				if startTime:
1099					# if default, seek to "current time" - "find time":
1100					if isinstance(startTime, bool):
1101						startTime = MyTime.time() - self.getFindTime()
1102					# prevent completely read of big files first time (after start of service),
1103					# initial seek to start time using half-interval search algorithm:
1104					try:
1105						self.seekToTime(log, startTime)
1106					except Exception as e: # pragma: no cover
1107						logSys.error("Error during seek to start time in \"%s\"", filename)
1108						raise
1109						logSys.exception(e)
1110						return False
1111
1112			if has_content:
1113				while not self.idle:
1114					line = log.readline()
1115					if not self.active: break; # jail has been stopped
1116					if not line:
1117						# The jail reached the bottom, simply set in operation for this log
1118						# (since we are first time at end of file, growing is only possible after modifications):
1119						log.inOperation = True
1120						break
1121					# acquire in operation from log and process:
1122					self.inOperation = inOperation if inOperation is not None else log.inOperation
1123					self.processLineAndAdd(line.rstrip('\r\n'))
1124		finally:
1125			log.close()
1126		db = self.jail.database
1127		if db is not None:
1128			db.updateLog(self.jail, log)
1129		return True
1130
1131	##
1132	# Seeks to line with date (search using half-interval search algorithm), to start polling from it
1133	#
1134
1135	def seekToTime(self, container, date, accuracy=3):
1136		fs = container.getFileSize()
1137		if logSys.getEffectiveLevel() <= logging.DEBUG:
1138			logSys.debug("Seek to find time %s (%s), file size %s", date,
1139				MyTime.time2str(date), fs)
1140		minp = container.getPos()
1141		maxp = fs
1142		tryPos = minp
1143		lastPos = -1
1144		foundPos = 0
1145		foundTime = None
1146		cntr = 0
1147		unixTime = None
1148		movecntr = accuracy
1149		while maxp > minp:
1150			if tryPos is None:
1151				pos = int(minp + (maxp - minp) / 2)
1152			else:
1153				pos, tryPos = tryPos, None
1154			# because container seek will go to start of next line (minus CRLF):
1155			pos = max(0, pos-2)
1156			seekpos = pos = container.seek(pos)
1157			cntr += 1
1158			# within next 5 lines try to find any legal datetime:
1159			lncntr = 5;
1160			dateTimeMatch = None
1161			nextp = None
1162			while True:
1163				line = container.readline()
1164				if not line:
1165					break
1166				(timeMatch, template) = self.dateDetector.matchTime(line)
1167				if timeMatch:
1168					dateTimeMatch = self.dateDetector.getTime(
1169						line[timeMatch.start():timeMatch.end()],
1170						(timeMatch, template))
1171				else:
1172					nextp = container.tell()
1173					if nextp > maxp:
1174						pos = seekpos
1175						break
1176					pos = nextp
1177				if not dateTimeMatch and lncntr:
1178					lncntr -= 1
1179					continue
1180				break
1181		 	# not found at this step - stop searching
1182			if dateTimeMatch:
1183				unixTime = dateTimeMatch[0]
1184				if unixTime >= date:
1185					if foundTime is None or unixTime <= foundTime:
1186						foundPos = pos
1187						foundTime = unixTime
1188					if pos == maxp:
1189						pos = seekpos
1190					if pos < maxp:
1191						maxp = pos
1192				else:
1193					if foundTime is None or unixTime >= foundTime:
1194						foundPos = pos
1195						foundTime = unixTime
1196					if nextp is None:
1197						nextp = container.tell()
1198					pos = nextp
1199					if pos > minp:
1200						minp = pos
1201			# if we can't move (position not changed)
1202			if pos == lastPos:
1203				movecntr -= 1
1204				if movecntr <= 0:
1205		 			break
1206				# we have found large area without any date matched
1207				# or end of search - try min position (because can be end of previous line):
1208				if minp != lastPos:
1209					lastPos = tryPos = minp
1210					continue
1211				break
1212			lastPos = pos
1213		# always use smallest pos, that could be found:
1214		foundPos = container.seek(minp, False)
1215		container.setPos(foundPos)
1216		if logSys.getEffectiveLevel() <= logging.DEBUG:
1217			logSys.debug("Position %s from %s, found time %s (%s) within %s seeks", lastPos, fs, foundTime,
1218				(MyTime.time2str(foundTime) if foundTime is not None else ''), cntr)
1219
1220	def status(self, flavor="basic"):
1221		"""Status of Filter plus files being monitored.
1222		"""
1223		ret = super(FileFilter, self).status(flavor=flavor)
1224		path = list(self.__logs.keys())
1225		ret.append(("File list", path))
1226		return ret
1227
1228	def stop(self):
1229		"""Stop monitoring of log-file(s)
1230		"""
1231		# stop files monitoring:
1232		for path in list(self.__logs.keys()):
1233			self.delLogPath(path)
1234		# stop thread:
1235		super(Filter, self).stop()
1236
1237##
1238# FileContainer class.
1239#
1240# This class manages a file handler and takes care of log rotation detection.
1241# In order to detect log rotation, the hash (MD5) of the first line of the file
1242# is computed and compared to the previous hash of this line.
1243
1244try:
1245	import hashlib
1246	try:
1247		md5sum = hashlib.md5
1248		# try to use it (several standards like FIPS forbid it):
1249		md5sum(' ').hexdigest()
1250	except: # pragma: no cover
1251		md5sum = hashlib.sha1
1252except ImportError: # pragma: no cover
1253	# hashlib was introduced in Python 2.5.  For compatibility with those
1254	# elderly Pythons, import from md5
1255	import md5
1256	md5sum = md5.new
1257
1258
1259class FileContainer:
1260
1261	def __init__(self, filename, encoding, tail=False):
1262		self.__filename = filename
1263		self.setEncoding(encoding)
1264		self.__tail = tail
1265		self.__handler = None
1266		# Try to open the file. Raises an exception if an error occurred.
1267		handler = open(filename, 'rb')
1268		stats = os.fstat(handler.fileno())
1269		self.__ino = stats.st_ino
1270		try:
1271			firstLine = handler.readline()
1272			# Computes the MD5 of the first line.
1273			self.__hash = md5sum(firstLine).hexdigest()
1274			# Start at the beginning of file if tail mode is off.
1275			if tail:
1276				handler.seek(0, 2)
1277				self.__pos = handler.tell()
1278			else:
1279				self.__pos = 0
1280		finally:
1281			handler.close()
1282		## shows that log is in operation mode (expecting new messages only from here):
1283		self.inOperation = tail
1284
1285	def getFileName(self):
1286		return self.__filename
1287
1288	def getFileSize(self):
1289		return os.path.getsize(self.__filename);
1290
1291	def setEncoding(self, encoding):
1292		codecs.lookup(encoding) # Raises LookupError if invalid
1293		self.__encoding = encoding
1294
1295	def getEncoding(self):
1296		return self.__encoding
1297
1298	def getHash(self):
1299		return self.__hash
1300
1301	def getPos(self):
1302		return self.__pos
1303
1304	def setPos(self, value):
1305		self.__pos = value
1306
1307	def open(self):
1308		self.__handler = open(self.__filename, 'rb')
1309		# Set the file descriptor to be FD_CLOEXEC
1310		fd = self.__handler.fileno()
1311		flags = fcntl.fcntl(fd, fcntl.F_GETFD)
1312		fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
1313		# Stat the file before even attempting to read it
1314		stats = os.fstat(self.__handler.fileno())
1315		if not stats.st_size:
1316			# yoh: so it is still an empty file -- nothing should be
1317			#      read from it yet
1318			# print "D: no content -- return"
1319			return False
1320		firstLine = self.__handler.readline()
1321		# Computes the MD5 of the first line.
1322		myHash = md5sum(firstLine).hexdigest()
1323		## print "D: fn=%s hashes=%s/%s inos=%s/%s pos=%s rotate=%s" % (
1324		## 	self.__filename, self.__hash, myHash, stats.st_ino, self.__ino, self.__pos,
1325		## 	self.__hash != myHash or self.__ino != stats.st_ino)
1326		## sys.stdout.flush()
1327		# Compare hash and inode
1328		if self.__hash != myHash or self.__ino != stats.st_ino:
1329			logSys.log(logging.MSG, "Log rotation detected for %s", self.__filename)
1330			self.__hash = myHash
1331			self.__ino = stats.st_ino
1332			self.__pos = 0
1333		# Sets the file pointer to the last position.
1334		self.__handler.seek(self.__pos)
1335		return True
1336
1337	def seek(self, offs, endLine=True):
1338		h = self.__handler
1339		# seek to given position
1340		h.seek(offs, 0)
1341		# goto end of next line
1342		if offs and endLine:
1343			h.readline()
1344		# get current real position
1345		return h.tell()
1346
1347	def tell(self):
1348		# get current real position
1349		return self.__handler.tell()
1350
1351	@staticmethod
1352	def decode_line(filename, enc, line):
1353		try:
1354			return line.decode(enc, 'strict')
1355		except (UnicodeDecodeError, UnicodeEncodeError) as e:
1356			global _decode_line_warn
1357			lev = 7
1358			if not _decode_line_warn.get(filename, 0):
1359				lev = logging.WARNING
1360				_decode_line_warn.set(filename, 1)
1361			logSys.log(lev,
1362				"Error decoding line from '%s' with '%s'.", filename, enc)
1363			if logSys.getEffectiveLevel() <= lev:
1364				logSys.log(lev, "Consider setting logencoding=utf-8 (or another appropriate"
1365					" encoding) for this jail. Continuing"
1366					" to process line ignoring invalid characters: %r",
1367					line)
1368			# decode with replacing error chars:
1369			line = line.decode(enc, 'replace')
1370		return line
1371
1372	def readline(self):
1373		if self.__handler is None:
1374			return ""
1375		return FileContainer.decode_line(
1376			self.getFileName(), self.getEncoding(), self.__handler.readline())
1377
1378	def close(self):
1379		if not self.__handler is None:
1380			# Saves the last position.
1381			self.__pos = self.__handler.tell()
1382			# Closes the file.
1383			self.__handler.close()
1384			self.__handler = None
1385		## print "D: Closed %s with pos %d" % (handler, self.__pos)
1386		## sys.stdout.flush()
1387
1388_decode_line_warn = Utils.Cache(maxCount=1000, maxTime=24*60*60);
1389
1390
1391##
1392# JournalFilter class.
1393#
1394# Base interface class for systemd journal filters
1395
1396class JournalFilter(Filter): # pragma: systemd no cover
1397
1398	def clearAllParams(self):
1399		super(JournalFilter, self).clearAllParams()
1400		self.delJournalMatch()
1401
1402	def addJournalMatch(self, match): # pragma: no cover - Base class, not used
1403		pass
1404
1405	def delJournalMatch(self, match=None): # pragma: no cover - Base class, not used
1406		pass
1407
1408	def getJournalMatch(self, match): # pragma: no cover - Base class, not used
1409		return []
1410
1411