1######################## Filebeat Configuration ############################ 2 3# This file is a full configuration example documenting all non-deprecated 4# options in comments. For a shorter configuration example, that contains only 5# the most common options, please see filebeat.yml in the same directory. 6# 7# You can find the full configuration reference here: 8# https://www.elastic.co/guide/en/beats/filebeat/index.html 9 10 11#========================== Modules configuration ============================ 12filebeat.modules: 13 14#------------------------------- System Module ------------------------------- 15#- module: system 16 # Syslog 17 #syslog: 18 #enabled: true 19 20 # Set custom paths for the log files. If left empty, 21 # Filebeat will choose the paths depending on your OS. 22 #var.paths: 23 24 # Convert the timestamp to UTC. Requires Elasticsearch >= 6.1. 25 #var.convert_timezone: false 26 27 # Input configuration (advanced). Any input configuration option 28 # can be added under this section. 29 #input: 30 31 # Authorization logs 32 #auth: 33 #enabled: true 34 35 # Set custom paths for the log files. If left empty, 36 # Filebeat will choose the paths depending on your OS. 37 #var.paths: 38 39 # Convert the timestamp to UTC. Requires Elasticsearch >= 6.1. 40 #var.convert_timezone: false 41 42 # Input configuration (advanced). Any input configuration option 43 # can be added under this section. 44 #input: 45 46#------------------------------- Apache2 Module ------------------------------ 47#- module: apache2 48 # Access logs 49 #access: 50 #enabled: true 51 52 # Set custom paths for the log files. If left empty, 53 # Filebeat will choose the paths depending on your OS. 54 #var.paths: 55 56 # Input configuration (advanced). Any input configuration option 57 # can be added under this section. 58 #input: 59 60 # Error logs 61 #error: 62 #enabled: true 63 64 # Set custom paths for the log files. If left empty, 65 # Filebeat will choose the paths depending on your OS. 66 #var.paths: 67 68 # Input configuration (advanced). Any input configuration option 69 # can be added under this section. 70 #input: 71 72#------------------------------- Auditd Module ------------------------------- 73#- module: auditd 74 #log: 75 #enabled: true 76 77 # Set custom paths for the log files. If left empty, 78 # Filebeat will choose the paths depending on your OS. 79 #var.paths: 80 81 # Input configuration (advanced). Any input configuration option 82 # can be added under this section. 83 #input: 84 85#---------------------------- elasticsearch Module --------------------------- 86- module: elasticsearch 87 # Server log 88 server: 89 enabled: true 90 91 # Set custom paths for the log files. If left empty, 92 # Filebeat will choose the paths depending on your OS. 93 #var.paths: 94 95 # Convert the timestamp to UTC. Requires Elasticsearch >= 6.1. 96 #var.convert_timezone: false 97 98 gc: 99 enabled: true 100 # Set custom paths for the log files. If left empty, 101 # Filebeat will choose the paths depending on your OS. 102 #var.paths: 103 104 audit: 105 enabled: true 106 # Set custom paths for the log files. If left empty, 107 # Filebeat will choose the paths depending on your OS. 108 #var.paths: 109 110 # Convert the timestamp to UTC. Requires Elasticsearch >= 6.1. 111 #var.convert_timezone: false 112 113 slowlog: 114 enabled: true 115 # Set custom paths for the log files. If left empty, 116 # Filebeat will choose the paths depending on your OS. 117 #var.paths: 118 119 # Convert the timestamp to UTC. Requires Elasticsearch >= 6.1. 120 #var.convert_timezone: false 121 122 deprecation: 123 enabled: true 124 # Set custom paths for the log files. If left empty, 125 # Filebeat will choose the paths depending on your OS. 126 #var.paths: 127 128 # Convert the timestamp to UTC. Requires Elasticsearch >= 6.1. 129 #var.convert_timezone: false 130 131 132#------------------------------- haproxy Module ------------------------------ 133- module: haproxy 134 # All logs 135 log: 136 enabled: true 137 138 # Set which input to use between syslog (default) or file. 139 #var.input: 140 141 # Set custom paths for the log files. If left empty, 142 # Filebeat will choose the paths depending on your OS. 143 #var.paths: 144 145#------------------------------- Icinga Module ------------------------------- 146#- module: icinga 147 # Main logs 148 #main: 149 #enabled: true 150 151 # Set custom paths for the log files. If left empty, 152 # Filebeat will choose the paths depending on your OS. 153 #var.paths: 154 155 # Input configuration (advanced). Any input configuration option 156 # can be added under this section. 157 #input: 158 159 # Debug logs 160 #debug: 161 #enabled: true 162 163 # Set custom paths for the log files. If left empty, 164 # Filebeat will choose the paths depending on your OS. 165 #var.paths: 166 167 # Input configuration (advanced). Any input configuration option 168 # can be added under this section. 169 #input: 170 171 # Startup logs 172 #startup: 173 #enabled: true 174 175 # Set custom paths for the log files. If left empty, 176 # Filebeat will choose the paths depending on your OS. 177 #var.paths: 178 179 # Input configuration (advanced). Any input configuration option 180 # can be added under this section. 181 #input: 182 183#--------------------------------- IIS Module -------------------------------- 184#- module: iis 185 # Access logs 186 #access: 187 #enabled: true 188 189 # Set custom paths for the log files. If left empty, 190 # Filebeat will choose the paths depending on your OS. 191 #var.paths: 192 193 # Input configuration (advanced). Any input configuration option 194 # can be added under this section. 195 #input: 196 197 # Error logs 198 #error: 199 #enabled: true 200 201 # Set custom paths for the log files. If left empty, 202 # Filebeat will choose the paths depending on your OS. 203 #var.paths: 204 205 # Input configuration (advanced). Any input configuration option 206 # can be added under this section. 207 #input: 208 209#-------------------------------- Kafka Module ------------------------------- 210- module: kafka 211 # All logs 212 log: 213 enabled: true 214 215 # Set custom paths for Kafka. If left empty, 216 # Filebeat will look under /opt. 217 #var.kafka_home: 218 219 # Set custom paths for the log files. If left empty, 220 # Filebeat will choose the paths depending on your OS. 221 #var.paths: 222 223 # Convert the timestamp to UTC. Requires Elasticsearch >= 6.1. 224 #var.convert_timezone: false 225 226#------------------------------- kibana Module ------------------------------- 227- module: kibana 228 # All logs 229 log: 230 enabled: true 231 232 # Set custom paths for the log files. If left empty, 233 # Filebeat will choose the paths depending on your OS. 234 #var.paths: 235 236#------------------------------ logstash Module ------------------------------ 237#- module: logstash 238 # logs 239 #log: 240 #enabled: true 241 242 # Set custom paths for the log files. If left empty, 243 # Filebeat will choose the paths depending on your OS. 244 # var.paths: 245 246 # Slow logs 247 #slowlog: 248 #enabled: true 249 # Set custom paths for the log files. If left empty, 250 # Filebeat will choose the paths depending on your OS. 251 #var.paths: 252 253#------------------------------- mongodb Module ------------------------------ 254#- module: mongodb 255 # Logs 256 #log: 257 #enabled: true 258 259 # Set custom paths for the log files. If left empty, 260 # Filebeat will choose the paths depending on your OS. 261 #var.paths: 262 263 # Input configuration (advanced). Any input configuration option 264 # can be added under this section. 265 #input: 266 267#-------------------------------- MySQL Module ------------------------------- 268#- module: mysql 269 # Error logs 270 #error: 271 #enabled: true 272 273 # Set custom paths for the log files. If left empty, 274 # Filebeat will choose the paths depending on your OS. 275 #var.paths: 276 277 # Input configuration (advanced). Any input configuration option 278 # can be added under this section. 279 #input: 280 281 # Slow logs 282 #slowlog: 283 #enabled: true 284 285 # Set custom paths for the log files. If left empty, 286 # Filebeat will choose the paths depending on your OS. 287 #var.paths: 288 289 # Input configuration (advanced). Any input configuration option 290 # can be added under this section. 291 #input: 292 293#-------------------------------- Nginx Module ------------------------------- 294#- module: nginx 295 # Access logs 296 #access: 297 #enabled: true 298 299 # Set custom paths for the log files. If left empty, 300 # Filebeat will choose the paths depending on your OS. 301 #var.paths: 302 303 # Input configuration (advanced). Any input configuration option 304 # can be added under this section. 305 #input: 306 307 # Convert the timestamp to UTC. Requires Elasticsearch >= 6.1. 308 #var.convert_timezone: false 309 310 # Error logs 311 #error: 312 #enabled: true 313 314 # Set custom paths for the log files. If left empty, 315 # Filebeat will choose the paths depending on your OS. 316 #var.paths: 317 318 # Input configuration (advanced). Any input configuration option 319 # can be added under this section. 320 #input: 321 322 # Convert the timestamp to UTC. Requires Elasticsearch >= 6.1. 323 #var.convert_timezone: false 324 325#------------------------------- Osquery Module ------------------------------ 326- module: osquery 327 result: 328 enabled: true 329 330 # Set custom paths for the log files. If left empty, 331 # Filebeat will choose the paths depending on your OS. 332 #var.paths: 333 334 # If true, all fields created by this module are prefixed with 335 # `osquery.result`. Set to false to copy the fields in the root 336 # of the document. The default is true. 337 #var.use_namespace: true 338 339#----------------------------- PostgreSQL Module ----------------------------- 340#- module: postgresql 341 # Logs 342 #log: 343 #enabled: true 344 345 # Set custom paths for the log files. If left empty, 346 # Filebeat will choose the paths depending on your OS. 347 #var.paths: 348 349 # Input configuration (advanced). Any input configuration option 350 # can be added under this section. 351 #input: 352 353#-------------------------------- Redis Module ------------------------------- 354#- module: redis 355 # Main logs 356 #log: 357 #enabled: true 358 359 # Set custom paths for the log files. If left empty, 360 # Filebeat will choose the paths depending on your OS. 361 #var.paths: ["/var/log/redis/redis-server.log*"] 362 363 # Slow logs, retrieved via the Redis API (SLOWLOG) 364 #slowlog: 365 #enabled: true 366 367 # The Redis hosts to connect to. 368 #var.hosts: ["localhost:6379"] 369 370 # Optional, the password to use when connecting to Redis. 371 #var.password: 372 373#------------------------------- Traefik Module ------------------------------ 374#- module: traefik 375 # Access logs 376 #access: 377 #enabled: true 378 379 # Set custom paths for the log files. If left empty, 380 # Filebeat will choose the paths depending on your OS. 381 #var.paths: 382 383 # Input configuration (advanced). Any input configuration option 384 # can be added under this section. 385 #input: 386 387 388#=========================== Filebeat inputs ============================= 389 390# List of inputs to fetch data. 391filebeat.inputs: 392# Each - is an input. Most options can be set at the input level, so 393# you can use different inputs for various configurations. 394# Below are the input specific configurations. 395 396# Type of the files. Based on this the way the file is read is decided. 397# The different types cannot be mixed in one input 398# 399# Possible options are: 400# * log: Reads every line of the log file (default) 401# * stdin: Reads the standard in 402 403#------------------------------ Log input -------------------------------- 404- type: log 405 enabled: false 406 paths: 407 - /var/log/*.log 408- type: log 409 enabled: true 410 paths: 411 - /var/log/grafana/grafana.log 412 include_lines: ['lvl=info'] 413 fields: 414 app: grafana 415 level: info 416- type: log 417 enabled: true 418 paths: 419 - /var/log/grafana/grafana.log 420 include_lines: ['lvl=eror'] 421 fields: 422 app: grafana 423 level: error 424- type: log 425 enabled: true 426 paths: 427 - /var/log/grafana/grafana.log 428 include_lines: ['lvl=warn'] 429 fields: 430 app: grafana 431 level: warning 432- type: log 433 enabled: true 434 paths: 435 - /var/log/grafana/grafana.log 436 include_lines: ['lvl=dbug'] 437 fields: 438 app: grafana 439 level: debug 440 441#- type: log 442 443 # Change to true to enable this input configuration. 444 #enabled: false 445 446 # Paths that should be crawled and fetched. Glob based paths. 447 # To fetch all ".log" files from a specific level of subdirectories 448 # /var/log/*/*.log can be used. 449 # For each file found under this path, a harvester is started. 450 # Make sure not file is defined twice as this can lead to unexpected behaviour. 451 #paths: 452 #- /var/log/*.log 453 #- c:\programdata\elasticsearch\logs\* 454 455 # Configure the file encoding for reading files with international characters 456 # following the W3C recommendation for HTML5 (http://www.w3.org/TR/encoding). 457 # Some sample encodings: 458 # plain, utf-8, utf-16be-bom, utf-16be, utf-16le, big5, gb18030, gbk, 459 # hz-gb-2312, euc-kr, euc-jp, iso-2022-jp, shift-jis, ... 460 #encoding: plain 461 462 463 # Exclude lines. A list of regular expressions to match. It drops the lines that are 464 # matching any regular expression from the list. The include_lines is called before 465 # exclude_lines. By default, no lines are dropped. 466 #exclude_lines: ['^DBG'] 467 468 # Include lines. A list of regular expressions to match. It exports the lines that are 469 # matching any regular expression from the list. The include_lines is called before 470 # exclude_lines. By default, all the lines are exported. 471 #include_lines: ['^ERR', '^WARN'] 472 473 # Exclude files. A list of regular expressions to match. Filebeat drops the files that 474 # are matching any regular expression from the list. By default, no files are dropped. 475 #exclude_files: ['.gz$'] 476 477 # Optional additional fields. These fields can be freely picked 478 # to add additional information to the crawled log files for filtering 479 #fields: 480 # level: debug 481 # review: 1 482 483 # Set to true to store the additional fields as top level fields instead 484 # of under the "fields" sub-dictionary. In case of name conflicts with the 485 # fields added by Filebeat itself, the custom fields overwrite the default 486 # fields. 487 #fields_under_root: false 488 489 # Ignore files which were modified more then the defined timespan in the past. 490 # ignore_older is disabled by default, so no files are ignored by setting it to 0. 491 # Time strings like 2h (2 hours), 5m (5 minutes) can be used. 492 #ignore_older: 0 493 494 # How often the input checks for new files in the paths that are specified 495 # for harvesting. Specify 1s to scan the directory as frequently as possible 496 # without causing Filebeat to scan too frequently. Default: 10s. 497 #scan_frequency: 10s 498 499 # Defines the buffer size every harvester uses when fetching the file 500 #harvester_buffer_size: 16384 501 502 # Maximum number of bytes a single log event can have 503 # All bytes after max_bytes are discarded and not sent. The default is 10MB. 504 # This is especially useful for multiline log messages which can get large. 505 #max_bytes: 10485760 506 507 ### Recursive glob configuration 508 509 # Expand "**" patterns into regular glob patterns. 510 #recursive_glob.enabled: true 511 512 ### JSON configuration 513 514 # Decode JSON options. Enable this if your logs are structured in JSON. 515 # JSON key on which to apply the line filtering and multiline settings. This key 516 # must be top level and its value must be string, otherwise it is ignored. If 517 # no text key is defined, the line filtering and multiline features cannot be used. 518 #json.message_key: 519 520 # By default, the decoded JSON is placed under a "json" key in the output document. 521 # If you enable this setting, the keys are copied top level in the output document. 522 #json.keys_under_root: false 523 524 # If keys_under_root and this setting are enabled, then the values from the decoded 525 # JSON object overwrite the fields that Filebeat normally adds (type, source, offset, etc.) 526 # in case of conflicts. 527 #json.overwrite_keys: false 528 529 # If this setting is enabled, Filebeat adds a "error.message" and "error.key: json" key in case of JSON 530 # unmarshaling errors or when a text key is defined in the configuration but cannot 531 # be used. 532 #json.add_error_key: false 533 534 ### Multiline options 535 536 # Multiline can be used for log messages spanning multiple lines. This is common 537 # for Java Stack Traces or C-Line Continuation 538 539 # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [ 540 #multiline.pattern: ^\[ 541 542 # Defines if the pattern set under pattern should be negated or not. Default is false. 543 #multiline.negate: false 544 545 # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern 546 # that was (not) matched before or after or as long as a pattern is not matched based on negate. 547 # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash 548 #multiline.match: after 549 550 # The maximum number of lines that are combined to one event. 551 # In case there are more the max_lines the additional lines are discarded. 552 # Default is 500 553 #multiline.max_lines: 500 554 555 # After the defined timeout, an multiline event is sent even if no new pattern was found to start a new event 556 # Default is 5s. 557 #multiline.timeout: 5s 558 559 # Setting tail_files to true means filebeat starts reading new files at the end 560 # instead of the beginning. If this is used in combination with log rotation 561 # this can mean that the first entries of a new file are skipped. 562 #tail_files: false 563 564 # The Ingest Node pipeline ID associated with this input. If this is set, it 565 # overwrites the pipeline option from the Elasticsearch output. 566 #pipeline: 567 568 # If symlinks is enabled, symlinks are opened and harvested. The harvester is opening the 569 # original for harvesting but will report the symlink name as source. 570 #symlinks: false 571 572 # Backoff values define how aggressively filebeat crawls new files for updates 573 # The default values can be used in most cases. Backoff defines how long it is waited 574 # to check a file again after EOF is reached. Default is 1s which means the file 575 # is checked every second if new lines were added. This leads to a near real time crawling. 576 # Every time a new line appears, backoff is reset to the initial value. 577 #backoff: 1s 578 579 # Max backoff defines what the maximum backoff time is. After having backed off multiple times 580 # from checking the files, the waiting time will never exceed max_backoff independent of the 581 # backoff factor. Having it set to 10s means in the worst case a new line can be added to a log 582 # file after having backed off multiple times, it takes a maximum of 10s to read the new line 583 #max_backoff: 10s 584 585 # The backoff factor defines how fast the algorithm backs off. The bigger the backoff factor, 586 # the faster the max_backoff value is reached. If this value is set to 1, no backoff will happen. 587 # The backoff value will be multiplied each time with the backoff_factor until max_backoff is reached 588 #backoff_factor: 2 589 590 # Max number of harvesters that are started in parallel. 591 # Default is 0 which means unlimited 592 #harvester_limit: 0 593 594 ### Harvester closing options 595 596 # Close inactive closes the file handler after the predefined period. 597 # The period starts when the last line of the file was, not the file ModTime. 598 # Time strings like 2h (2 hours), 5m (5 minutes) can be used. 599 #close_inactive: 5m 600 601 # Close renamed closes a file handler when the file is renamed or rotated. 602 # Note: Potential data loss. Make sure to read and understand the docs for this option. 603 #close_renamed: false 604 605 # When enabling this option, a file handler is closed immediately in case a file can't be found 606 # any more. In case the file shows up again later, harvesting will continue at the last known position 607 # after scan_frequency. 608 #close_removed: true 609 610 # Closes the file handler as soon as the harvesters reaches the end of the file. 611 # By default this option is disabled. 612 # Note: Potential data loss. Make sure to read and understand the docs for this option. 613 #close_eof: false 614 615 ### State options 616 617 # Files for the modification data is older then clean_inactive the state from the registry is removed 618 # By default this is disabled. 619 #clean_inactive: 0 620 621 # Removes the state for file which cannot be found on disk anymore immediately 622 #clean_removed: true 623 624 # Close timeout closes the harvester after the predefined time. 625 # This is independent if the harvester did finish reading the file or not. 626 # By default this option is disabled. 627 # Note: Potential data loss. Make sure to read and understand the docs for this option. 628 #close_timeout: 0 629 630 # Defines if inputs is enabled 631 #enabled: true 632 633#----------------------------- Stdin input ------------------------------- 634# Configuration to use stdin input 635#- type: stdin 636 637#------------------------- Redis slowlog input --------------------------- 638# Experimental: Config options for the redis slow log input 639#- type: redis 640 #enabled: false 641 642 # List of hosts to pool to retrieve the slow log information. 643 #hosts: ["localhost:6379"] 644 645 # How often the input checks for redis slow log. 646 #scan_frequency: 10s 647 648 # Timeout after which time the input should return an error 649 #timeout: 1s 650 651 # Network type to be used for redis connection. Default: tcp 652 #network: tcp 653 654 # Max number of concurrent connections. Default: 10 655 #maxconn: 10 656 657 # Redis AUTH password. Empty by default. 658 #password: foobared 659 660#------------------------------ Udp input -------------------------------- 661# Experimental: Config options for the udp input 662#- type: udp 663 #enabled: false 664 665 # Maximum size of the message received over UDP 666 #max_message_size: 10KiB 667 668#------------------------------ TCP input -------------------------------- 669# Experimental: Config options for the TCP input 670#- type: tcp 671 #enabled: false 672 673 # The host and port to receive the new event 674 #host: "localhost:9000" 675 676 # Character used to split new message 677 #line_delimiter: "\n" 678 679 # Maximum size in bytes of the message received over TCP 680 #max_message_size: 20MiB 681 682 # The number of seconds of inactivity before a remote connection is closed. 683 #timeout: 300s 684 685 # Use SSL settings for TCP. 686 #ssl.enabled: true 687 688 # List of supported/valid TLS versions. By default all TLS versions 1.0 up to 689 # 1.2 are enabled. 690 #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] 691 692 # SSL configuration. By default is off. 693 # List of root certificates for client verifications 694 #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] 695 696 # Certificate for SSL server authentication. 697 #ssl.certificate: "/etc/pki/client/cert.pem" 698 699 # Server Certificate Key, 700 #ssl.key: "/etc/pki/client/cert.key" 701 702 # Optional passphrase for decrypting the Certificate Key. 703 #ssl.key_passphrase: '' 704 705 # Configure cipher suites to be used for SSL connections. 706 #ssl.cipher_suites: [] 707 708 # Configure curve types for ECDHE based cipher suites. 709 #ssl.curve_types: [] 710 711 # Configure what types of client authentication are supported. Valid options 712 # are `none`, `optional`, and `required`. Default is required. 713 #ssl.client_authentication: "required" 714 715#------------------------------ Syslog input -------------------------------- 716# Experimental: Config options for the Syslog input 717# Accept RFC3164 formatted syslog event via UDP. 718#- type: syslog 719 #enabled: false 720 #protocol.udp: 721 # The host and port to receive the new event 722 #host: "localhost:9000" 723 724 # Maximum size of the message received over UDP 725 #max_message_size: 10KiB 726 727# Accept RFC3164 formatted syslog event via TCP. 728#- type: syslog 729 #enabled: false 730 731 #protocol.tcp: 732 # The host and port to receive the new event 733 #host: "localhost:9000" 734 735 # Character used to split new message 736 #line_delimiter: "\n" 737 738 # Maximum size in bytes of the message received over TCP 739 #max_message_size: 20MiB 740 741 # The number of seconds of inactivity before a remote connection is closed. 742 #timeout: 300s 743 744 # Use SSL settings for TCP. 745 #ssl.enabled: true 746 747 # List of supported/valid TLS versions. By default all TLS versions 1.0 up to 748 # 1.2 are enabled. 749 #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] 750 751 # SSL configuration. By default is off. 752 # List of root certificates for client verifications 753 #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] 754 755 # Certificate for SSL server authentication. 756 #ssl.certificate: "/etc/pki/client/cert.pem" 757 758 # Server Certificate Key, 759 #ssl.key: "/etc/pki/client/cert.key" 760 761 # Optional passphrase for decrypting the Certificate Key. 762 #ssl.key_passphrase: '' 763 764 # Configure cipher suites to be used for SSL connections. 765 #ssl.cipher_suites: [] 766 767 # Configure curve types for ECDHE based cipher suites. 768 #ssl.curve_types: [] 769 770 # Configure what types of client authentication are supported. Valid options 771 # are `none`, `optional`, and `required`. Default is required. 772 #ssl.client_authentication: "required" 773 774#------------------------------ Docker input -------------------------------- 775# Experimental: Docker input reads and parses `json-file` logs from Docker 776#- type: docker 777 #enabled: false 778 779 # Combine partial lines flagged by `json-file` format 780 #combine_partials: true 781 782 # Use this to read from all containers, replace * with a container id to read from one: 783 #containers: 784 # stream: all # can be all, stdout or stderr 785 # ids: 786 # - '*' 787 788#========================== Filebeat autodiscover ============================== 789 790# Autodiscover allows you to detect changes in the system and spawn new modules 791# or inputs as they happen. 792 793#filebeat.autodiscover: 794 # List of enabled autodiscover providers 795# providers: 796# - type: docker 797# templates: 798# - condition: 799# equals.docker.container.image: busybox 800# config: 801# - type: log 802# paths: 803# - /var/lib/docker/containers/${data.docker.container.id}/*.log 804 805#========================= Filebeat global options ============================ 806 807# Name of the registry file. If a relative path is used, it is considered relative to the 808# data path. 809#filebeat.registry_file: ${path.data}/registry 810 811# The permissions mask to apply on registry file. The default value is 0600. 812# Must be a valid Unix-style file permissions mask expressed in octal notation. 813# This option is not supported on Windows. 814#filebeat.registry_file_permissions: 0600 815 816# The timeout value that controls when registry entries are written to disk 817# (flushed). When an unwritten update exceeds this value, it triggers a write to 818# disk. When registry_flush is set to 0s, the registry is written to disk after 819# each batch of events has been published successfully. The default value is 0s. 820#filebeat.registry_flush: 0s 821 822# By default Ingest pipelines are not updated if a pipeline with the same ID 823# already exists. If this option is enabled Filebeat overwrites pipelines 824# every time a new Elasticsearch connection is established. 825#filebeat.overwrite_pipelines: false 826 827# How long filebeat waits on shutdown for the publisher to finish. 828# Default is 0, not waiting. 829#filebeat.shutdown_timeout: 0 830 831# Enable filebeat config reloading 832#filebeat.config: 833 #inputs: 834 #enabled: false 835 #path: inputs.d/*.yml 836 #reload.enabled: true 837 #reload.period: 10s 838 #modules: 839 #enabled: false 840 #path: modules.d/*.yml 841 #reload.enabled: true 842 #reload.period: 10s 843 844#================================ General ====================================== 845 846# The name of the shipper that publishes the network data. It can be used to group 847# all the transactions sent by a single shipper in the web interface. 848# If this options is not defined, the hostname is used. 849#name: 850 851# The tags of the shipper are included in their own field with each 852# transaction published. Tags make it easy to group servers by different 853# logical properties. 854#tags: ["service-X", "web-tier"] 855 856# Optional fields that you can specify to add additional information to the 857# output. Fields can be scalar values, arrays, dictionaries, or any nested 858# combination of these. 859#fields: 860# env: staging 861 862# If this option is set to true, the custom fields are stored as top-level 863# fields in the output document instead of being grouped under a fields 864# sub-dictionary. Default is false. 865#fields_under_root: false 866 867# Internal queue configuration for buffering events to be published. 868#queue: 869 # Queue type by name (default 'mem') 870 # The memory queue will present all available events (up to the outputs 871 # bulk_max_size) to the output, the moment the output is ready to server 872 # another batch of events. 873 #mem: 874 # Max number of events the queue can buffer. 875 #events: 4096 876 877 # Hints the minimum number of events stored in the queue, 878 # before providing a batch of events to the outputs. 879 # The default value is set to 2048. 880 # A value of 0 ensures events are immediately available 881 # to be sent to the outputs. 882 #flush.min_events: 2048 883 884 # Maximum duration after which events are available to the outputs, 885 # if the number of events stored in the queue is < min_flush_events. 886 #flush.timeout: 1s 887 888 # The spool queue will store events in a local spool file, before 889 # forwarding the events to the outputs. 890 # 891 # Beta: spooling to disk is currently a beta feature. Use with care. 892 # 893 # The spool file is a circular buffer, which blocks once the file/buffer is full. 894 # Events are put into a write buffer and flushed once the write buffer 895 # is full or the flush_timeout is triggered. 896 # Once ACKed by the output, events are removed immediately from the queue, 897 # making space for new events to be persisted. 898 #spool: 899 # The file namespace configures the file path and the file creation settings. 900 # Once the file exists, the `size`, `page_size` and `prealloc` settings 901 # will have no more effect. 902 #file: 903 # Location of spool file. The default value is ${path.data}/spool.dat. 904 #path: "${path.data}/spool.dat" 905 906 # Configure file permissions if file is created. The default value is 0600. 907 #permissions: 0600 908 909 # File size hint. The spool blocks, once this limit is reached. The default value is 100 MiB. 910 #size: 100MiB 911 912 # The files page size. A file is split into multiple pages of the same size. The default value is 4KiB. 913 #page_size: 4KiB 914 915 # If prealloc is set, the required space for the file is reserved using 916 # truncate. The default value is true. 917 #prealloc: true 918 919 # Spool writer settings 920 # Events are serialized into a write buffer. The write buffer is flushed if: 921 # - The buffer limit has been reached. 922 # - The configured limit of buffered events is reached. 923 # - The flush timeout is triggered. 924 #write: 925 # Sets the write buffer size. 926 #buffer_size: 1MiB 927 928 # Maximum duration after which events are flushed if the write buffer 929 # is not full yet. The default value is 1s. 930 #flush.timeout: 1s 931 932 # Number of maximum buffered events. The write buffer is flushed once the 933 # limit is reached. 934 #flush.events: 16384 935 936 # Configure the on-disk event encoding. The encoding can be changed 937 # between restarts. 938 # Valid encodings are: json, ubjson, and cbor. 939 #codec: cbor 940 #read: 941 # Reader flush timeout, waiting for more events to become available, so 942 # to fill a complete batch as required by the outputs. 943 # If flush_timeout is 0, all available events are forwarded to the 944 # outputs immediately. 945 # The default value is 0s. 946 #flush.timeout: 0s 947 948# Sets the maximum number of CPUs that can be executing simultaneously. The 949# default is the number of logical CPUs available in the system. 950#max_procs: 951 952#================================ Processors =================================== 953 954# Processors are used to reduce the number of fields in the exported event or to 955# enhance the event with external metadata. This section defines a list of 956# processors that are applied one by one and the first one receives the initial 957# event: 958# 959# event -> filter1 -> event1 -> filter2 ->event2 ... 960# 961# The supported processors are drop_fields, drop_event, include_fields, 962# decode_json_fields, and add_cloud_metadata. 963# 964# For example, you can use the following processors to keep the fields that 965# contain CPU load percentages, but remove the fields that contain CPU ticks 966# values: 967# 968#processors: 969#- include_fields: 970# fields: ["cpu"] 971#- drop_fields: 972# fields: ["cpu.user", "cpu.system"] 973# 974# The following example drops the events that have the HTTP response code 200: 975# 976#processors: 977#- drop_event: 978# when: 979# equals: 980# http.code: 200 981# 982# The following example renames the field a to b: 983# 984#processors: 985#- rename: 986# fields: 987# - from: "a" 988# to: "b" 989# 990# The following example tokenizes the string into fields: 991# 992#processors: 993#- dissect: 994# tokenizer: "%{key1} - %{key2}" 995# field: "message" 996# target_prefix: "dissect" 997# 998# The following example enriches each event with metadata from the cloud 999# provider about the host machine. It works on EC2, GCE, DigitalOcean, 1000# Tencent Cloud, and Alibaba Cloud. 1001# 1002#processors: 1003#- add_cloud_metadata: ~ 1004# 1005# The following example enriches each event with the machine's local time zone 1006# offset from UTC. 1007# 1008#processors: 1009#- add_locale: 1010# format: offset 1011# 1012# The following example enriches each event with docker metadata, it matches 1013# given fields to an existing container id and adds info from that container: 1014# 1015#processors: 1016#- add_docker_metadata: 1017# host: "unix:///var/run/docker.sock" 1018# match_fields: ["system.process.cgroup.id"] 1019# match_pids: ["process.pid", "process.ppid"] 1020# match_source: true 1021# match_source_index: 4 1022# match_short_id: false 1023# cleanup_timeout: 60 1024# labels.dedot: false 1025# # To connect to Docker over TLS you must specify a client and CA certificate. 1026# #ssl: 1027# # certificate_authority: "/etc/pki/root/ca.pem" 1028# # certificate: "/etc/pki/client/cert.pem" 1029# # key: "/etc/pki/client/cert.key" 1030# 1031# The following example enriches each event with docker metadata, it matches 1032# container id from log path available in `source` field (by default it expects 1033# it to be /var/lib/docker/containers/*/*.log). 1034# 1035#processors: 1036#- add_docker_metadata: ~ 1037# 1038# The following example enriches each event with host metadata. 1039# 1040#processors: 1041#- add_host_metadata: 1042# netinfo.enabled: false 1043# 1044# The following example enriches each event with process metadata using 1045# process IDs included in the event. 1046# 1047#processors: 1048#- add_process_metadata: 1049# match_pids: ["system.process.ppid"] 1050# target: system.process.parent 1051# 1052# The following example decodes fields containing JSON strings 1053# and replaces the strings with valid JSON objects. 1054# 1055#processors: 1056#- decode_json_fields: 1057# fields: ["field1", "field2", ...] 1058# process_array: false 1059# max_depth: 1 1060# target: "" 1061# overwrite_keys: false 1062 1063#============================= Elastic Cloud ================================== 1064 1065# These settings simplify using filebeat with the Elastic Cloud (https://cloud.elastic.co/). 1066 1067# The cloud.id setting overwrites the `output.elasticsearch.hosts` and 1068# `setup.kibana.host` options. 1069# You can find the `cloud.id` in the Elastic Cloud web UI. 1070#cloud.id: 1071 1072# The cloud.auth setting overwrites the `output.elasticsearch.username` and 1073# `output.elasticsearch.password` settings. The format is `<user>:<pass>`. 1074#cloud.auth: 1075 1076#================================ Outputs ====================================== 1077 1078# Configure what output to use when sending the data collected by the beat. 1079 1080#-------------------------- Elasticsearch output ------------------------------- 1081output.elasticsearch: 1082 # Boolean flag to enable or disable the output module. 1083 #enabled: true 1084 1085 # Array of hosts to connect to. 1086 # Scheme and port can be left out and will be set to the default (http and 9200) 1087 # In case you specify and additional path, the scheme is required: http://localhost:9200/path 1088 # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 1089 hosts: ["elasticsearch7:9200"] 1090 1091 # Enabled ilm (beta) to use index lifecycle management instead daily indices. 1092 #ilm.enabled: false 1093 #ilm.rollover_alias: "filebeat" 1094 #ilm.pattern: "{now/d}-000001" 1095 1096 # Set gzip compression level. 1097 #compression_level: 0 1098 1099 # Configure escaping HTML symbols in strings. 1100 #escape_html: true 1101 1102 # Optional protocol and basic auth credentials. 1103 #protocol: "https" 1104 #username: "elastic" 1105 #password: "changeme" 1106 1107 # Dictionary of HTTP parameters to pass within the URL with index operations. 1108 #parameters: 1109 #param1: value1 1110 #param2: value2 1111 1112 # Number of workers per Elasticsearch host. 1113 #worker: 1 1114 1115 # Optional index name. The default is "filebeat" plus date 1116 # and generates [filebeat-]YYYY.MM.DD keys. 1117 # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. 1118 index: "filebeat-%{+yyyy.MM.dd}" 1119 1120 # Optional ingest node pipeline. By default no pipeline will be used. 1121 #pipeline: "" 1122 1123 # Optional HTTP path 1124 #path: "/elasticsearch" 1125 1126 # Custom HTTP headers to add to each request 1127 #headers: 1128 # X-My-Header: Contents of the header 1129 1130 # Proxy server URL 1131 #proxy_url: http://proxy:3128 1132 1133 # The number of times a particular Elasticsearch index operation is attempted. If 1134 # the indexing operation doesn't succeed after this many retries, the events are 1135 # dropped. The default is 3. 1136 #max_retries: 3 1137 1138 # The maximum number of events to bulk in a single Elasticsearch bulk API index request. 1139 # The default is 50. 1140 #bulk_max_size: 50 1141 1142 # The number of seconds to wait before trying to reconnect to Elasticsearch 1143 # after a network error. After waiting backoff.init seconds, the Beat 1144 # tries to reconnect. If the attempt fails, the backoff timer is increased 1145 # exponentially up to backoff.max. After a successful connection, the backoff 1146 # timer is reset. The default is 1s. 1147 #backoff.init: 1s 1148 1149 # The maximum number of seconds to wait before attempting to connect to 1150 # Elasticsearch after a network error. The default is 60s. 1151 #backoff.max: 60s 1152 1153 # Configure HTTP request timeout before failing a request to Elasticsearch. 1154 #timeout: 90 1155 1156 # Use SSL settings for HTTPS. 1157 #ssl.enabled: true 1158 1159 # Configure SSL verification mode. If `none` is configured, all server hosts 1160 # and certificates will be accepted. In this mode, SSL-based connections are 1161 # susceptible to man-in-the-middle attacks. Use only for testing. Default is 1162 # `full`. 1163 #ssl.verification_mode: full 1164 1165 # List of supported/valid TLS versions. By default all TLS versions from 1.0 up to 1166 # 1.2 are enabled. 1167 #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] 1168 1169 # List of root certificates for HTTPS server verifications 1170 #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] 1171 1172 # Certificate for SSL client authentication 1173 #ssl.certificate: "/etc/pki/client/cert.pem" 1174 1175 # Client certificate key 1176 #ssl.key: "/etc/pki/client/cert.key" 1177 1178 # Optional passphrase for decrypting the certificate key. 1179 #ssl.key_passphrase: '' 1180 1181 # Configure cipher suites to be used for SSL connections 1182 #ssl.cipher_suites: [] 1183 1184 # Configure curve types for ECDHE-based cipher suites 1185 #ssl.curve_types: [] 1186 1187 # Configure what types of renegotiation are supported. Valid options are 1188 # never, once, and freely. Default is never. 1189 #ssl.renegotiation: never 1190 1191 1192#----------------------------- Logstash output --------------------------------- 1193#output.logstash: 1194 # Boolean flag to enable or disable the output module. 1195 #enabled: true 1196 1197 # The Logstash hosts 1198 #hosts: ["localhost:5044"] 1199 1200 # Number of workers per Logstash host. 1201 #worker: 1 1202 1203 # Set gzip compression level. 1204 #compression_level: 3 1205 1206 # Configure escaping HTML symbols in strings. 1207 #escape_html: true 1208 1209 # Optional maximum time to live for a connection to Logstash, after which the 1210 # connection will be re-established. A value of `0s` (the default) will 1211 # disable this feature. 1212 # 1213 # Not yet supported for async connections (i.e. with the "pipelining" option set) 1214 #ttl: 30s 1215 1216 # Optionally load-balance events between Logstash hosts. Default is false. 1217 #loadbalance: false 1218 1219 # Number of batches to be sent asynchronously to Logstash while processing 1220 # new batches. 1221 #pipelining: 2 1222 1223 # If enabled only a subset of events in a batch of events is transferred per 1224 # transaction. The number of events to be sent increases up to `bulk_max_size` 1225 # if no error is encountered. 1226 #slow_start: false 1227 1228 # The number of seconds to wait before trying to reconnect to Logstash 1229 # after a network error. After waiting backoff.init seconds, the Beat 1230 # tries to reconnect. If the attempt fails, the backoff timer is increased 1231 # exponentially up to backoff.max. After a successful connection, the backoff 1232 # timer is reset. The default is 1s. 1233 #backoff.init: 1s 1234 1235 # The maximum number of seconds to wait before attempting to connect to 1236 # Logstash after a network error. The default is 60s. 1237 #backoff.max: 60s 1238 1239 # Optional index name. The default index name is set to filebeat 1240 # in all lowercase. 1241 #index: 'filebeat' 1242 1243 # SOCKS5 proxy server URL 1244 #proxy_url: socks5://user:password@socks5-server:2233 1245 1246 # Resolve names locally when using a proxy server. Defaults to false. 1247 #proxy_use_local_resolver: false 1248 1249 # Enable SSL support. SSL is automatically enabled if any SSL setting is set. 1250 #ssl.enabled: true 1251 1252 # Configure SSL verification mode. If `none` is configured, all server hosts 1253 # and certificates will be accepted. In this mode, SSL based connections are 1254 # susceptible to man-in-the-middle attacks. Use only for testing. Default is 1255 # `full`. 1256 #ssl.verification_mode: full 1257 1258 # List of supported/valid TLS versions. By default all TLS versions from 1.0 up to 1259 # 1.2 are enabled. 1260 #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] 1261 1262 # Optional SSL configuration options. SSL is off by default. 1263 # List of root certificates for HTTPS server verifications 1264 #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] 1265 1266 # Certificate for SSL client authentication 1267 #ssl.certificate: "/etc/pki/client/cert.pem" 1268 1269 # Client certificate key 1270 #ssl.key: "/etc/pki/client/cert.key" 1271 1272 # Optional passphrase for decrypting the Certificate Key. 1273 #ssl.key_passphrase: '' 1274 1275 # Configure cipher suites to be used for SSL connections 1276 #ssl.cipher_suites: [] 1277 1278 # Configure curve types for ECDHE-based cipher suites 1279 #ssl.curve_types: [] 1280 1281 # Configure what types of renegotiation are supported. Valid options are 1282 # never, once, and freely. Default is never. 1283 #ssl.renegotiation: never 1284 1285 # The number of times to retry publishing an event after a publishing failure. 1286 # After the specified number of retries, the events are typically dropped. 1287 # Some Beats, such as Filebeat and Winlogbeat, ignore the max_retries setting 1288 # and retry until all events are published. Set max_retries to a value less 1289 # than 0 to retry until all events are published. The default is 3. 1290 #max_retries: 3 1291 1292 # The maximum number of events to bulk in a single Logstash request. The 1293 # default is 2048. 1294 #bulk_max_size: 2048 1295 1296 # The number of seconds to wait for responses from the Logstash server before 1297 # timing out. The default is 30s. 1298 #timeout: 30s 1299 1300#------------------------------- Kafka output ---------------------------------- 1301#output.kafka: 1302 # Boolean flag to enable or disable the output module. 1303 #enabled: true 1304 1305 # The list of Kafka broker addresses from which to fetch the cluster metadata. 1306 # The cluster metadata contain the actual Kafka brokers events are published 1307 # to. 1308 #hosts: ["localhost:9092"] 1309 1310 # The Kafka topic used for produced events. The setting can be a format string 1311 # using any event field. To set the topic from document type use `%{[type]}`. 1312 #topic: beats 1313 1314 # The Kafka event key setting. Use format string to create a unique event key. 1315 # By default no event key will be generated. 1316 #key: '' 1317 1318 # The Kafka event partitioning strategy. Default hashing strategy is `hash` 1319 # using the `output.kafka.key` setting or randomly distributes events if 1320 # `output.kafka.key` is not configured. 1321 #partition.hash: 1322 # If enabled, events will only be published to partitions with reachable 1323 # leaders. Default is false. 1324 #reachable_only: false 1325 1326 # Configure alternative event field names used to compute the hash value. 1327 # If empty `output.kafka.key` setting will be used. 1328 # Default value is empty list. 1329 #hash: [] 1330 1331 # Authentication details. Password is required if username is set. 1332 #username: '' 1333 #password: '' 1334 1335 # Kafka version filebeat is assumed to run against. Defaults to the "1.0.0". 1336 #version: '1.0.0' 1337 1338 # Configure JSON encoding 1339 #codec.json: 1340 # Pretty-print JSON event 1341 #pretty: false 1342 1343 # Configure escaping HTML symbols in strings. 1344 #escape_html: true 1345 1346 # Metadata update configuration. Metadata contains leader information 1347 # used to decide which broker to use when publishing. 1348 #metadata: 1349 # Max metadata request retry attempts when cluster is in middle of leader 1350 # election. Defaults to 3 retries. 1351 #retry.max: 3 1352 1353 # Wait time between retries during leader elections. Default is 250ms. 1354 #retry.backoff: 250ms 1355 1356 # Refresh metadata interval. Defaults to every 10 minutes. 1357 #refresh_frequency: 10m 1358 1359 # The number of concurrent load-balanced Kafka output workers. 1360 #worker: 1 1361 1362 # The number of times to retry publishing an event after a publishing failure. 1363 # After the specified number of retries, events are typically dropped. 1364 # Some Beats, such as Filebeat, ignore the max_retries setting and retry until 1365 # all events are published. Set max_retries to a value less than 0 to retry 1366 # until all events are published. The default is 3. 1367 #max_retries: 3 1368 1369 # The maximum number of events to bulk in a single Kafka request. The default 1370 # is 2048. 1371 #bulk_max_size: 2048 1372 1373 # The number of seconds to wait for responses from the Kafka brokers before 1374 # timing out. The default is 30s. 1375 #timeout: 30s 1376 1377 # The maximum duration a broker will wait for number of required ACKs. The 1378 # default is 10s. 1379 #broker_timeout: 10s 1380 1381 # The number of messages buffered for each Kafka broker. The default is 256. 1382 #channel_buffer_size: 256 1383 1384 # The keep-alive period for an active network connection. If 0s, keep-alives 1385 # are disabled. The default is 0 seconds. 1386 #keep_alive: 0 1387 1388 # Sets the output compression codec. Must be one of none, snappy and gzip. The 1389 # default is gzip. 1390 #compression: gzip 1391 1392 # Set the compression level. Currently only gzip provides a compression level 1393 # between 0 and 9. The default value is chosen by the compression algorithm. 1394 #compression_level: 4 1395 1396 # The maximum permitted size of JSON-encoded messages. Bigger messages will be 1397 # dropped. The default value is 1000000 (bytes). This value should be equal to 1398 # or less than the broker's message.max.bytes. 1399 #max_message_bytes: 1000000 1400 1401 # The ACK reliability level required from broker. 0=no response, 1=wait for 1402 # local commit, -1=wait for all replicas to commit. The default is 1. Note: 1403 # If set to 0, no ACKs are returned by Kafka. Messages might be lost silently 1404 # on error. 1405 #required_acks: 1 1406 1407 # The configurable ClientID used for logging, debugging, and auditing 1408 # purposes. The default is "beats". 1409 #client_id: beats 1410 1411 # Enable SSL support. SSL is automatically enabled if any SSL setting is set. 1412 #ssl.enabled: true 1413 1414 # Optional SSL configuration options. SSL is off by default. 1415 # List of root certificates for HTTPS server verifications 1416 #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] 1417 1418 # Configure SSL verification mode. If `none` is configured, all server hosts 1419 # and certificates will be accepted. In this mode, SSL based connections are 1420 # susceptible to man-in-the-middle attacks. Use only for testing. Default is 1421 # `full`. 1422 #ssl.verification_mode: full 1423 1424 # List of supported/valid TLS versions. By default all TLS versions from 1.0 up to 1425 # 1.2 are enabled. 1426 #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] 1427 1428 # Certificate for SSL client authentication 1429 #ssl.certificate: "/etc/pki/client/cert.pem" 1430 1431 # Client Certificate Key 1432 #ssl.key: "/etc/pki/client/cert.key" 1433 1434 # Optional passphrase for decrypting the Certificate Key. 1435 #ssl.key_passphrase: '' 1436 1437 # Configure cipher suites to be used for SSL connections 1438 #ssl.cipher_suites: [] 1439 1440 # Configure curve types for ECDHE-based cipher suites 1441 #ssl.curve_types: [] 1442 1443 # Configure what types of renegotiation are supported. Valid options are 1444 # never, once, and freely. Default is never. 1445 #ssl.renegotiation: never 1446 1447#------------------------------- Redis output ---------------------------------- 1448#output.redis: 1449 # Boolean flag to enable or disable the output module. 1450 #enabled: true 1451 1452 # Configure JSON encoding 1453 #codec.json: 1454 # Pretty print json event 1455 #pretty: false 1456 1457 # Configure escaping HTML symbols in strings. 1458 #escape_html: true 1459 1460 # The list of Redis servers to connect to. If load-balancing is enabled, the 1461 # events are distributed to the servers in the list. If one server becomes 1462 # unreachable, the events are distributed to the reachable servers only. 1463 #hosts: ["localhost:6379"] 1464 1465 # The name of the Redis list or channel the events are published to. The 1466 # default is filebeat. 1467 #key: filebeat 1468 1469 # The password to authenticate to Redis with. The default is no authentication. 1470 #password: 1471 1472 # The Redis database number where the events are published. The default is 0. 1473 #db: 0 1474 1475 # The Redis data type to use for publishing events. If the data type is list, 1476 # the Redis RPUSH command is used. If the data type is channel, the Redis 1477 # PUBLISH command is used. The default value is list. 1478 #datatype: list 1479 1480 # The number of workers to use for each host configured to publish events to 1481 # Redis. Use this setting along with the loadbalance option. For example, if 1482 # you have 2 hosts and 3 workers, in total 6 workers are started (3 for each 1483 # host). 1484 #worker: 1 1485 1486 # If set to true and multiple hosts or workers are configured, the output 1487 # plugin load balances published events onto all Redis hosts. If set to false, 1488 # the output plugin sends all events to only one host (determined at random) 1489 # and will switch to another host if the currently selected one becomes 1490 # unreachable. The default value is true. 1491 #loadbalance: true 1492 1493 # The Redis connection timeout in seconds. The default is 5 seconds. 1494 #timeout: 5s 1495 1496 # The number of times to retry publishing an event after a publishing failure. 1497 # After the specified number of retries, the events are typically dropped. 1498 # Some Beats, such as Filebeat, ignore the max_retries setting and retry until 1499 # all events are published. Set max_retries to a value less than 0 to retry 1500 # until all events are published. The default is 3. 1501 #max_retries: 3 1502 1503 # The number of seconds to wait before trying to reconnect to Redis 1504 # after a network error. After waiting backoff.init seconds, the Beat 1505 # tries to reconnect. If the attempt fails, the backoff timer is increased 1506 # exponentially up to backoff.max. After a successful connection, the backoff 1507 # timer is reset. The default is 1s. 1508 #backoff.init: 1s 1509 1510 # The maximum number of seconds to wait before attempting to connect to 1511 # Redis after a network error. The default is 60s. 1512 #backoff.max: 60s 1513 1514 # The maximum number of events to bulk in a single Redis request or pipeline. 1515 # The default is 2048. 1516 #bulk_max_size: 2048 1517 1518 # The URL of the SOCKS5 proxy to use when connecting to the Redis servers. The 1519 # value must be a URL with a scheme of socks5://. 1520 #proxy_url: 1521 1522 # This option determines whether Redis hostnames are resolved locally when 1523 # using a proxy. The default value is false, which means that name resolution 1524 # occurs on the proxy server. 1525 #proxy_use_local_resolver: false 1526 1527 # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. 1528 #ssl.enabled: true 1529 1530 # Configure SSL verification mode. If `none` is configured, all server hosts 1531 # and certificates will be accepted. In this mode, SSL based connections are 1532 # susceptible to man-in-the-middle attacks. Use only for testing. Default is 1533 # `full`. 1534 #ssl.verification_mode: full 1535 1536 # List of supported/valid TLS versions. By default all TLS versions 1.0 up to 1537 # 1.2 are enabled. 1538 #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] 1539 1540 # Optional SSL configuration options. SSL is off by default. 1541 # List of root certificates for HTTPS server verifications 1542 #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] 1543 1544 # Certificate for SSL client authentication 1545 #ssl.certificate: "/etc/pki/client/cert.pem" 1546 1547 # Client Certificate Key 1548 #ssl.key: "/etc/pki/client/cert.key" 1549 1550 # Optional passphrase for decrypting the Certificate Key. 1551 #ssl.key_passphrase: '' 1552 1553 # Configure cipher suites to be used for SSL connections 1554 #ssl.cipher_suites: [] 1555 1556 # Configure curve types for ECDHE based cipher suites 1557 #ssl.curve_types: [] 1558 1559 # Configure what types of renegotiation are supported. Valid options are 1560 # never, once, and freely. Default is never. 1561 #ssl.renegotiation: never 1562 1563#------------------------------- File output ----------------------------------- 1564#output.file: 1565 # Boolean flag to enable or disable the output module. 1566 #enabled: true 1567 1568 # Configure JSON encoding 1569 #codec.json: 1570 # Pretty-print JSON event 1571 #pretty: false 1572 1573 # Configure escaping HTML symbols in strings. 1574 #escape_html: true 1575 1576 # Path to the directory where to save the generated files. The option is 1577 # mandatory. 1578 #path: "/tmp/filebeat" 1579 1580 # Name of the generated files. The default is `filebeat` and it generates 1581 # files: `filebeat`, `filebeat.1`, `filebeat.2`, etc. 1582 #filename: filebeat 1583 1584 # Maximum size in kilobytes of each file. When this size is reached, and on 1585 # every filebeat restart, the files are rotated. The default value is 10240 1586 # kB. 1587 #rotate_every_kb: 10000 1588 1589 # Maximum number of files under path. When this number of files is reached, 1590 # the oldest file is deleted and the rest are shifted from last to first. The 1591 # default is 7 files. 1592 #number_of_files: 7 1593 1594 # Permissions to use for file creation. The default is 0600. 1595 #permissions: 0600 1596 1597 1598#----------------------------- Console output --------------------------------- 1599#output.console: 1600 # Boolean flag to enable or disable the output module. 1601 #enabled: true 1602 1603 # Configure JSON encoding 1604 #codec.json: 1605 # Pretty-print JSON event 1606 #pretty: false 1607 1608 # Configure escaping HTML symbols in strings. 1609 #escape_html: true 1610 1611#================================= Paths ====================================== 1612 1613# The home path for the filebeat installation. This is the default base path 1614# for all other path settings and for miscellaneous files that come with the 1615# distribution (for example, the sample dashboards). 1616# If not set by a CLI flag or in the configuration file, the default for the 1617# home path is the location of the binary. 1618#path.home: 1619 1620# The configuration path for the filebeat installation. This is the default 1621# base path for configuration files, including the main YAML configuration file 1622# and the Elasticsearch template file. If not set by a CLI flag or in the 1623# configuration file, the default for the configuration path is the home path. 1624#path.config: ${path.home} 1625 1626# The data path for the filebeat installation. This is the default base path 1627# for all the files in which filebeat needs to store its data. If not set by a 1628# CLI flag or in the configuration file, the default for the data path is a data 1629# subdirectory inside the home path. 1630#path.data: ${path.home}/data 1631 1632# The logs path for a filebeat installation. This is the default location for 1633# the Beat's log files. If not set by a CLI flag or in the configuration file, 1634# the default for the logs path is a logs subdirectory inside the home path. 1635#path.logs: ${path.home}/logs 1636 1637#================================ Keystore ========================================== 1638# Location of the Keystore containing the keys and their sensitive values. 1639#keystore.path: "${path.config}/beats.keystore" 1640 1641#============================== Dashboards ===================================== 1642# These settings control loading the sample dashboards to the Kibana index. Loading 1643# the dashboards are disabled by default and can be enabled either by setting the 1644# options here, or by using the `-setup` CLI flag or the `setup` command. 1645#setup.dashboards.enabled: false 1646 1647# The directory from where to read the dashboards. The default is the `kibana` 1648# folder in the home path. 1649#setup.dashboards.directory: ${path.home}/kibana 1650 1651# The URL from where to download the dashboards archive. It is used instead of 1652# the directory if it has a value. 1653#setup.dashboards.url: 1654 1655# The file archive (zip file) from where to read the dashboards. It is used instead 1656# of the directory when it has a value. 1657#setup.dashboards.file: 1658 1659# In case the archive contains the dashboards from multiple Beats, this lets you 1660# select which one to load. You can load all the dashboards in the archive by 1661# setting this to the empty string. 1662#setup.dashboards.beat: filebeat 1663 1664# The name of the Kibana index to use for setting the configuration. Default is ".kibana" 1665#setup.dashboards.kibana_index: .kibana 1666 1667# The Elasticsearch index name. This overwrites the index name defined in the 1668# dashboards and index pattern. Example: testbeat-* 1669#setup.dashboards.index: 1670 1671# Always use the Kibana API for loading the dashboards instead of autodetecting 1672# how to install the dashboards by first querying Elasticsearch. 1673#setup.dashboards.always_kibana: false 1674 1675# If true and Kibana is not reachable at the time when dashboards are loaded, 1676# it will retry to reconnect to Kibana instead of exiting with an error. 1677#setup.dashboards.retry.enabled: false 1678 1679# Duration interval between Kibana connection retries. 1680#setup.dashboards.retry.interval: 1s 1681 1682# Maximum number of retries before exiting with an error, 0 for unlimited retrying. 1683#setup.dashboards.retry.maximum: 0 1684 1685 1686#============================== Template ===================================== 1687 1688# A template is used to set the mapping in Elasticsearch 1689# By default template loading is enabled and the template is loaded. 1690# These settings can be adjusted to load your own template or overwrite existing ones. 1691 1692# Set to false to disable template loading. 1693#setup.template.enabled: true 1694 1695# Template name. By default the template name is "filebeat-%{[beat.version]}" 1696# The template name and pattern has to be set in case the Elasticsearch index pattern is modified. 1697setup.template.name: "filebeat" 1698 1699# Template pattern. By default the template pattern is "-%{[beat.version]}-*" to apply to the default index settings. 1700# The first part is the version of the beat and then -* is used to match all daily indices. 1701# The template name and pattern has to be set in case the Elasticsearch index pattern is modified. 1702setup.template.pattern: "filebeat-*" 1703 1704# Path to fields.yml file to generate the template 1705#setup.template.fields: "${path.config}/fields.yml" 1706 1707# A list of fields to be added to the template and Kibana index pattern. Also 1708# specify setup.template.overwrite: true to overwrite the existing template. 1709# This setting is experimental. 1710#setup.template.append_fields: 1711#- name: field_name 1712# type: field_type 1713 1714# Enable JSON template loading. If this is enabled, the fields.yml is ignored. 1715#setup.template.json.enabled: false 1716 1717# Path to the JSON template file 1718#setup.template.json.path: "${path.config}/template.json" 1719 1720# Name under which the template is stored in Elasticsearch 1721#setup.template.json.name: "" 1722 1723# Overwrite existing template 1724#setup.template.overwrite: false 1725 1726# Elasticsearch template settings 1727setup.template.settings: 1728 1729 # A dictionary of settings to place into the settings.index dictionary 1730 # of the Elasticsearch template. For more details, please check 1731 # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping.html 1732 #index: 1733 #number_of_shards: 1 1734 #codec: best_compression 1735 #number_of_routing_shards: 30 1736 1737 # A dictionary of settings for the _source field. For more details, please check 1738 # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html 1739 #_source: 1740 #enabled: false 1741 1742#============================== Kibana ===================================== 1743 1744# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API. 1745# This requires a Kibana endpoint configuration. 1746setup.kibana: 1747 1748 # Kibana Host 1749 # Scheme and port can be left out and will be set to the default (http and 5601) 1750 # In case you specify and additional path, the scheme is required: http://localhost:5601/path 1751 # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601 1752 #host: "localhost:5601" 1753 1754 # Optional protocol and basic auth credentials. 1755 #protocol: "https" 1756 #username: "elastic" 1757 #password: "changeme" 1758 1759 # Optional HTTP path 1760 #path: "" 1761 1762 # Use SSL settings for HTTPS. Default is true. 1763 #ssl.enabled: true 1764 1765 # Configure SSL verification mode. If `none` is configured, all server hosts 1766 # and certificates will be accepted. In this mode, SSL based connections are 1767 # susceptible to man-in-the-middle attacks. Use only for testing. Default is 1768 # `full`. 1769 #ssl.verification_mode: full 1770 1771 # List of supported/valid TLS versions. By default all TLS versions from 1.0 up to 1772 # 1.2 are enabled. 1773 #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] 1774 1775 # SSL configuration. The default is off. 1776 # List of root certificates for HTTPS server verifications 1777 #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] 1778 1779 # Certificate for SSL client authentication 1780 #ssl.certificate: "/etc/pki/client/cert.pem" 1781 1782 # Client certificate key 1783 #ssl.key: "/etc/pki/client/cert.key" 1784 1785 # Optional passphrase for decrypting the certificate key. 1786 #ssl.key_passphrase: '' 1787 1788 # Configure cipher suites to be used for SSL connections 1789 #ssl.cipher_suites: [] 1790 1791 # Configure curve types for ECDHE-based cipher suites 1792 #ssl.curve_types: [] 1793 1794 1795 1796#================================ Logging ====================================== 1797# There are four options for the log output: file, stderr, syslog, eventlog 1798# The file output is the default. 1799 1800# Sets log level. The default log level is info. 1801# Available log levels are: error, warning, info, debug 1802#logging.level: info 1803 1804# Enable debug output for selected components. To enable all selectors use ["*"] 1805# Other available selectors are "beat", "publish", "service" 1806# Multiple selectors can be chained. 1807#logging.selectors: [ ] 1808 1809# Send all logging output to syslog. The default is false. 1810#logging.to_syslog: false 1811 1812# Send all logging output to Windows Event Logs. The default is false. 1813#logging.to_eventlog: false 1814 1815# If enabled, filebeat periodically logs its internal metrics that have changed 1816# in the last period. For each metric that changed, the delta from the value at 1817# the beginning of the period is logged. Also, the total values for 1818# all non-zero internal metrics are logged on shutdown. The default is true. 1819#logging.metrics.enabled: true 1820 1821# The period after which to log the internal metrics. The default is 30s. 1822#logging.metrics.period: 30s 1823 1824# Logging to rotating files. Set logging.to_files to false to disable logging to 1825# files. 1826logging.to_files: true 1827logging.files: 1828 # Configure the path where the logs are written. The default is the logs directory 1829 # under the home path (the binary location). 1830 #path: /var/log/filebeat 1831 1832 # The name of the files where the logs are written to. 1833 #name: filebeat 1834 1835 # Configure log file size limit. If limit is reached, log file will be 1836 # automatically rotated 1837 #rotateeverybytes: 10485760 # = 10MB 1838 1839 # Number of rotated log files to keep. Oldest files will be deleted first. 1840 #keepfiles: 7 1841 1842 # The permissions mask to apply when rotating log files. The default value is 0600. 1843 # Must be a valid Unix-style file permissions mask expressed in octal notation. 1844 #permissions: 0600 1845 1846 # Enable log file rotation on time intervals in addition to size-based rotation. 1847 # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h 1848 # are boundary-aligned with minutes, hours, days, weeks, months, and years as 1849 # reported by the local system clock. All other intervals are calculated from the 1850 # Unix epoch. Defaults to disabled. 1851 #interval: 0 1852 1853# Set to true to log messages in JSON format. 1854#logging.json: false 1855 1856 1857#============================== Xpack Monitoring ===================================== 1858# filebeat can export internal metrics to a central Elasticsearch monitoring cluster. 1859# This requires xpack monitoring to be enabled in Elasticsearch. 1860# The reporting is disabled by default. 1861 1862# Set to true to enable the monitoring reporter. 1863#xpack.monitoring.enabled: false 1864 1865# Uncomment to send the metrics to Elasticsearch. Most settings from the 1866# Elasticsearch output are accepted here as well. Any setting that is not set is 1867# automatically inherited from the Elasticsearch output configuration, so if you 1868# have the Elasticsearch output configured, you can simply uncomment the 1869# following line, and leave the rest commented out. 1870#xpack.monitoring.elasticsearch: 1871 1872 # Array of hosts to connect to. 1873 # Scheme and port can be left out and will be set to the default (http and 9200) 1874 # In case you specify and additional path, the scheme is required: http://localhost:9200/path 1875 # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 1876 #hosts: ["localhost:9200"] 1877 1878 # Set gzip compression level. 1879 #compression_level: 0 1880 1881 # Optional protocol and basic auth credentials. 1882 #protocol: "https" 1883 #username: "beats_system" 1884 #password: "changeme" 1885 1886 # Dictionary of HTTP parameters to pass within the URL with index operations. 1887 #parameters: 1888 #param1: value1 1889 #param2: value2 1890 1891 # Custom HTTP headers to add to each request 1892 #headers: 1893 # X-My-Header: Contents of the header 1894 1895 # Proxy server url 1896 #proxy_url: http://proxy:3128 1897 1898 # The number of times a particular Elasticsearch index operation is attempted. If 1899 # the indexing operation doesn't succeed after this many retries, the events are 1900 # dropped. The default is 3. 1901 #max_retries: 3 1902 1903 # The maximum number of events to bulk in a single Elasticsearch bulk API index request. 1904 # The default is 50. 1905 #bulk_max_size: 50 1906 1907 # The number of seconds to wait before trying to reconnect to Elasticsearch 1908 # after a network error. After waiting backoff.init seconds, the Beat 1909 # tries to reconnect. If the attempt fails, the backoff timer is increased 1910 # exponentially up to backoff.max. After a successful connection, the backoff 1911 # timer is reset. The default is 1s. 1912 #backoff.init: 1s 1913 1914 # The maximum number of seconds to wait before attempting to connect to 1915 # Elasticsearch after a network error. The default is 60s. 1916 #backoff.max: 60s 1917 1918 # Configure HTTP request timeout before failing an request to Elasticsearch. 1919 #timeout: 90 1920 1921 # Use SSL settings for HTTPS. 1922 #ssl.enabled: true 1923 1924 # Configure SSL verification mode. If `none` is configured, all server hosts 1925 # and certificates will be accepted. In this mode, SSL based connections are 1926 # susceptible to man-in-the-middle attacks. Use only for testing. Default is 1927 # `full`. 1928 #ssl.verification_mode: full 1929 1930 # List of supported/valid TLS versions. By default all TLS versions from 1.0 up to 1931 # 1.2 are enabled. 1932 #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] 1933 1934 # SSL configuration. The default is off. 1935 # List of root certificates for HTTPS server verifications 1936 #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] 1937 1938 # Certificate for SSL client authentication 1939 #ssl.certificate: "/etc/pki/client/cert.pem" 1940 1941 # Client certificate key 1942 #ssl.key: "/etc/pki/client/cert.key" 1943 1944 # Optional passphrase for decrypting the certificate key. 1945 #ssl.key_passphrase: '' 1946 1947 # Configure cipher suites to be used for SSL connections 1948 #ssl.cipher_suites: [] 1949 1950 # Configure curve types for ECDHE-based cipher suites 1951 #ssl.curve_types: [] 1952 1953 # Configure what types of renegotiation are supported. Valid options are 1954 # never, once, and freely. Default is never. 1955 #ssl.renegotiation: never 1956 1957 #metrics.period: 10s 1958 #state.period: 1m 1959 1960#================================ HTTP Endpoint ====================================== 1961# Each beat can expose internal metrics through a HTTP endpoint. For security 1962# reasons the endpoint is disabled by default. This feature is currently experimental. 1963# Stats can be access through http://localhost:5066/stats . For pretty JSON output 1964# append ?pretty to the URL. 1965 1966# Defines if the HTTP endpoint is enabled. 1967#http.enabled: false 1968 1969# The HTTP endpoint will bind to this hostname or IP address. It is recommended to use only localhost. 1970#http.host: localhost 1971 1972# Port on which the HTTP endpoint will bind. Default is 5066. 1973#http.port: 5066 1974 1975#============================= Process Security ================================ 1976 1977# Enable or disable seccomp system call filtering on Linux. Default is enabled. 1978#seccomp.enabled: true 1979