diff --git a/filebeat/etc/beat.full.yml b/filebeat/etc/beat.full.yml new file mode 100644 index 000000000000..d7a2e88f8ff8 --- /dev/null +++ b/filebeat/etc/beat.full.yml @@ -0,0 +1,203 @@ +##################$$$###### Filebeat Configuration ############################ + +# This file is a full configuration example documenting all non-deprecated +# options in comments. For a shorter configuration example, that contains only +# the most common options, please see filebeat.yml in the same directory. +# +# You can find the full configuration reference here: +# https://www.elastic.co/guide/en/beats/filebeat/index.html + +#=========================== Filebeat prospectors ============================= + +# List of prospectors to fetch data. +filebeat.prospectors: +# Each - is a prospector. Most options can be set at the prospector level, so +# you can use different prospectors for various configurations. +# Below are the prospector specific configurations. + +# Type of the files. Based on this the way the file is read is decided. +# The different types cannot be mixed in one prospector +# +# Possible options are: +# * log: Reads every line of the log file (default) +# * stdin: Reads the standard in + +#------------------------------ Log prospector -------------------------------- +- input_type: log + + # Paths that should be crawled and fetched. Glob based paths. + # To fetch all ".log" files from a specific level of subdirectories + # /var/log/*/*.log can be used. + # For each file found under this path, a harvester is started. + # Make sure not file is defined twice as this can lead to unexpected behaviour. + paths: + - /var/log/*.log + #- c:\programdata\elasticsearch\logs\* + + # Configure the file encoding for reading files with international characters + # following the W3C recommendation for HTML5 (http://www.w3.org/TR/encoding). + # Some sample encodings: + # plain, utf-8, utf-16be-bom, utf-16be, utf-16le, big5, gb18030, gbk, + # hz-gb-2312, euc-kr, euc-jp, iso-2022-jp, shift-jis, ... + #encoding: plain + + + # Exclude lines. A list of regular expressions to match. It drops the lines that are + # matching any regular expression from the list. The include_lines is called before + # exclude_lines. By default, no lines are dropped. + #exclude_lines: ["^DBG"] + + # Include lines. A list of regular expressions to match. It exports the lines that are + # matching any regular expression from the list. The include_lines is called before + # exclude_lines. By default, all the lines are exported. + #include_lines: ["^ERR", "^WARN"] + + # Exclude files. A list of regular expressions to match. Filebeat drops the files that + # are matching any regular expression from the list. By default, no files are dropped. + #exclude_files: [".gz$"] + + # Optional additional fields. These field can be freely picked + # to add additional information to the crawled log files for filtering + #fields: + # level: debug + # review: 1 + + # Set to true to store the additional fields as top level fields instead + # of under the "fields" sub-dictionary. In case of name conflicts with the + # fields added by Filebeat itself, the custom fields overwrite the default + # fields. + #fields_under_root: false + + # Ignore files which were modified more then the defined timespan in the past. + # ignore_older is disabled by default, so no files are ignored by setting it to 0. + # Time strings like 2h (2 hours), 5m (5 minutes) can be used. + #ignore_older: 0 + + # Close older closes the file handler for which were not modified + # for longer then close_older + # Time strings like 2h (2 hours), 5m (5 minutes) can be used. + #close_older: 1h + + # Type to be published in the 'type' field. For Elasticsearch output, + # the type defines the document type these entries should be stored + # in. Default: log + #document_type: log + + # Scan frequency in seconds. + # How often these files should be checked for changes. In case it is set + # to 0s, it is done as often as possible. Default: 10s + #scan_frequency: 10s + + # Defines the buffer size every harvester uses when fetching the file + #harvester_buffer_size: 16384 + + # Maximum number of bytes a single log event can have + # All bytes after max_bytes are discarded and not sent. The default is 10MB. + # This is especially useful for multiline log messages which can get large. + #max_bytes: 10485760 + + ### JSON configuration + + # Decode JSON options. Enable this if your logs are structured in JSON. + # JSON key on which to apply the line filtering and multiline settings. This key + # must be top level and its value must be string, otherwise it is ignored. If + # no text key is defined, the line filtering and multiline features cannot be used. + #json.message_key: + + # By default, the decoded JSON is placed under a "json" key in the output document. + # If you enable this setting, the keys are copied top level in the output document. + #json.keys_under_root: false + + # If keys_under_root and this setting are enabled, then the values from the decoded + # JSON object overwrite the fields that Filebeat normally adds (type, source, offset, etc.) + # in case of conflicts. + #json.overwrite_keys: false + + # If this setting is enabled, Filebeat adds a "json_error" key in case of JSON + # unmarshaling errors or when a text key is defined in the configuration but cannot + # be used. + #json.add_error_key: false + + ### Multiline options + + # Mutiline can be used for log messages spanning multiple lines. This is common + # for Java Stack Traces or C-Line Continuation + + # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [ + #multiline.pattern: ^\[ + + # Defines if the pattern set under pattern should be negated or not. Default is false. + #multiline.negate: false + + # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern + # that was (not) matched before or after or as long as a pattern is not matched based on negate. + # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash + #multiline.match: after + + # The maximum number of lines that are combined to one event. + # In case there are more the max_lines the additional lines are discarded. + # Default is 500 + #multiline.max_lines: 500 + + # After the defined timeout, an multiline event is sent even if no new pattern was found to start a new event + # Default is 5s. + #multiline.timeout: 5s + + # Setting tail_files to true means filebeat starts reading new files at the end + # instead of the beginning. If this is used in combination with log rotation + # this can mean that the first entries of a new file are skipped. + #tail_files: false + + # Backoff values define how aggressively filebeat crawls new files for updates + # The default values can be used in most cases. Backoff defines how long it is waited + # to check a file again after EOF is reached. Default is 1s which means the file + # is checked every second if new lines were added. This leads to a near real time crawling. + # Every time a new line appears, backoff is reset to the initial value. + #backoff: 1s + + # Max backoff defines what the maximum backoff time is. After having backed off multiple times + # from checking the files, the waiting time will never exceed max_backoff independent of the + # backoff factor. Having it set to 10s means in the worst case a new line can be added to a log + # file after having backed off multiple times, it takes a maximum of 10s to read the new line + #max_backoff: 10s + + # The backoff factor defines how fast the algorithm backs off. The bigger the backoff factor, + # the faster the max_backoff value is reached. If this value is set to 1, no backoff will happen. + # The backoff value will be multiplied each time with the backoff_factor until max_backoff is reached + #backoff_factor: 2 + + # This option closes a file, as soon as the file name changes. + # This config option is recommended on windows only. Filebeat keeps the files it's reading open. This can cause + # issues when the file is removed, as the file will not be fully removed until also Filebeat closes + # the reading. Filebeat closes the file handler after ignore_older. During this time no new file with the + # same name can be created. Turning this feature on the other hand can lead to loss of data + # on rotate files. It can happen that after file rotation the beginning of the new + # file is skipped, as the reading starts at the end. We recommend to leave this option on false + # but lower the ignore_older value to release files faster. + #force_close_files: false + +#----------------------------- Stdin prospector ------------------------------- +# Configuration to use stdin input +#- input_type: stdin + +#========================= Filebeat global options ============================ + +# Event count spool threshold - forces network flush if exceeded +#filebeat.spool_size: 2048 + +# Enable async publisher pipeline in filebeat (Experimental!) +#filebeat.publish_async: false + +# Defines how often the spooler is flushed. After idle_timeout the spooler is +# Flush even though spool_size is not reached. +#filebeat.idle_timeout: 5s + +# Name of the registry file. If a relative path is used, it is considered relative to the +# data path. +#filebeat.registry_file: registry + +# +# These config files must have the full filebeat config part inside, but only +# the prospector part is processed. All global options like spool_size are ignored. +# The config_dir MUST point to a different directory then where the main filebeat config file is in. +#filebeat.config_dir: diff --git a/filebeat/etc/beat.short.yml b/filebeat/etc/beat.short.yml deleted file mode 100644 index 8ac24e02c7ea..000000000000 --- a/filebeat/etc/beat.short.yml +++ /dev/null @@ -1,58 +0,0 @@ -###################### Filebeat Configuration Example ######################### - -# This file is an example configuration file highlighting only the most common -# options. The filebeat.full.yml file from the same directory contains all the -# supported options with more comments. You can use it as a reference. -# -# You can find the full configuration reference here: -# https://www.elastic.co/guide/en/beats/filebeat/index.html - -#=========================== Filebeat prospectors ============================= - -filebeat.prospectors: - -# Each - is a prospector. Most options can be set at the prospector level, so -# you can use different prospectors for various configurations. -# Below are the prospector specific configurations. - -- input_type: log - - # Paths that should be crawled and fetched. Glob based paths. - paths: - - /var/log/*.log - #- c:\programdata\elasticsearch\logs\* - - # Exclude lines. A list of regular expressions to match. It drops the lines that are - # matching any regular expression from the list. - #exclude_lines: ["^DBG"] - - # Include lines. A list of regular expressions to match. It exports the lines that are - # matching any regular expression from the list. - #include_lines: ["^ERR", "^WARN"] - - # Exclude files. A list of regular expressions to match. Filebeat drops the files that - # are matching any regular expression from the list. By default, no files are dropped. - #exclude_files: [".gz$"] - - # Optional additional fields. These field can be freely picked - # to add additional information to the crawled log files for filtering - #fields: - # level: debug - # review: 1 - - ### Multiline options - - # Mutiline can be used for log messages spanning multiple lines. This is common - # for Java Stack Traces or C-Line Continuation - - # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [ - #multiline.pattern: ^\[ - - # Defines if the pattern set under pattern should be negated or not. Default is false. - #multiline.negate: false - - # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern - # that was (not) matched before or after or as long as a pattern is not matched based on negate. - # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash - #multiline.match: after - diff --git a/filebeat/etc/beat.yml b/filebeat/etc/beat.yml index 28aed2b32088..8ac24e02c7ea 100644 --- a/filebeat/etc/beat.yml +++ b/filebeat/etc/beat.yml @@ -1,55 +1,33 @@ -##################$$$###### Filebeat Configuration ############################ +###################### Filebeat Configuration Example ######################### -# This file is a full configuration example documenting all non-deprecated -# options in comments. For a shorter configuration example, that contains only -# the most common options, please see filebeat.short.yml in the same directory. +# This file is an example configuration file highlighting only the most common +# options. The filebeat.full.yml file from the same directory contains all the +# supported options with more comments. You can use it as a reference. # # You can find the full configuration reference here: # https://www.elastic.co/guide/en/beats/filebeat/index.html #=========================== Filebeat prospectors ============================= -# List of prospectors to fetch data. filebeat.prospectors: + # Each - is a prospector. Most options can be set at the prospector level, so # you can use different prospectors for various configurations. # Below are the prospector specific configurations. -# Type of the files. Based on this the way the file is read is decided. -# The different types cannot be mixed in one prospector -# -# Possible options are: -# * log: Reads every line of the log file (default) -# * stdin: Reads the standard in - -#------------------------------ Log prospector -------------------------------- - input_type: log # Paths that should be crawled and fetched. Glob based paths. - # To fetch all ".log" files from a specific level of subdirectories - # /var/log/*/*.log can be used. - # For each file found under this path, a harvester is started. - # Make sure not file is defined twice as this can lead to unexpected behaviour. paths: - /var/log/*.log #- c:\programdata\elasticsearch\logs\* - # Configure the file encoding for reading files with international characters - # following the W3C recommendation for HTML5 (http://www.w3.org/TR/encoding). - # Some sample encodings: - # plain, utf-8, utf-16be-bom, utf-16be, utf-16le, big5, gb18030, gbk, - # hz-gb-2312, euc-kr, euc-jp, iso-2022-jp, shift-jis, ... - #encoding: plain - - # Exclude lines. A list of regular expressions to match. It drops the lines that are - # matching any regular expression from the list. The include_lines is called before - # exclude_lines. By default, no lines are dropped. + # matching any regular expression from the list. #exclude_lines: ["^DBG"] # Include lines. A list of regular expressions to match. It exports the lines that are - # matching any regular expression from the list. The include_lines is called before - # exclude_lines. By default, all the lines are exported. + # matching any regular expression from the list. #include_lines: ["^ERR", "^WARN"] # Exclude files. A list of regular expressions to match. Filebeat drops the files that @@ -62,62 +40,6 @@ filebeat.prospectors: # level: debug # review: 1 - # Set to true to store the additional fields as top level fields instead - # of under the "fields" sub-dictionary. In case of name conflicts with the - # fields added by Filebeat itself, the custom fields overwrite the default - # fields. - #fields_under_root: false - - # Ignore files which were modified more then the defined timespan in the past. - # ignore_older is disabled by default, so no files are ignored by setting it to 0. - # Time strings like 2h (2 hours), 5m (5 minutes) can be used. - #ignore_older: 0 - - # Close older closes the file handler for which were not modified - # for longer then close_older - # Time strings like 2h (2 hours), 5m (5 minutes) can be used. - #close_older: 1h - - # Type to be published in the 'type' field. For Elasticsearch output, - # the type defines the document type these entries should be stored - # in. Default: log - #document_type: log - - # Scan frequency in seconds. - # How often these files should be checked for changes. In case it is set - # to 0s, it is done as often as possible. Default: 10s - #scan_frequency: 10s - - # Defines the buffer size every harvester uses when fetching the file - #harvester_buffer_size: 16384 - - # Maximum number of bytes a single log event can have - # All bytes after max_bytes are discarded and not sent. The default is 10MB. - # This is especially useful for multiline log messages which can get large. - #max_bytes: 10485760 - - ### JSON configuration - - # Decode JSON options. Enable this if your logs are structured in JSON. - # JSON key on which to apply the line filtering and multiline settings. This key - # must be top level and its value must be string, otherwise it is ignored. If - # no text key is defined, the line filtering and multiline features cannot be used. - #json.message_key: - - # By default, the decoded JSON is placed under a "json" key in the output document. - # If you enable this setting, the keys are copied top level in the output document. - #json.keys_under_root: false - - # If keys_under_root and this setting are enabled, then the values from the decoded - # JSON object overwrite the fields that Filebeat normally adds (type, source, offset, etc.) - # in case of conflicts. - #json.overwrite_keys: false - - # If this setting is enabled, Filebeat adds a "json_error" key in case of JSON - # unmarshaling errors or when a text key is defined in the configuration but cannot - # be used. - #json.add_error_key: false - ### Multiline options # Mutiline can be used for log messages spanning multiple lines. This is common @@ -134,70 +56,3 @@ filebeat.prospectors: # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash #multiline.match: after - # The maximum number of lines that are combined to one event. - # In case there are more the max_lines the additional lines are discarded. - # Default is 500 - #multiline.max_lines: 500 - - # After the defined timeout, an multiline event is sent even if no new pattern was found to start a new event - # Default is 5s. - #multiline.timeout: 5s - - # Setting tail_files to true means filebeat starts reading new files at the end - # instead of the beginning. If this is used in combination with log rotation - # this can mean that the first entries of a new file are skipped. - #tail_files: false - - # Backoff values define how aggressively filebeat crawls new files for updates - # The default values can be used in most cases. Backoff defines how long it is waited - # to check a file again after EOF is reached. Default is 1s which means the file - # is checked every second if new lines were added. This leads to a near real time crawling. - # Every time a new line appears, backoff is reset to the initial value. - #backoff: 1s - - # Max backoff defines what the maximum backoff time is. After having backed off multiple times - # from checking the files, the waiting time will never exceed max_backoff independent of the - # backoff factor. Having it set to 10s means in the worst case a new line can be added to a log - # file after having backed off multiple times, it takes a maximum of 10s to read the new line - #max_backoff: 10s - - # The backoff factor defines how fast the algorithm backs off. The bigger the backoff factor, - # the faster the max_backoff value is reached. If this value is set to 1, no backoff will happen. - # The backoff value will be multiplied each time with the backoff_factor until max_backoff is reached - #backoff_factor: 2 - - # This option closes a file, as soon as the file name changes. - # This config option is recommended on windows only. Filebeat keeps the files it's reading open. This can cause - # issues when the file is removed, as the file will not be fully removed until also Filebeat closes - # the reading. Filebeat closes the file handler after ignore_older. During this time no new file with the - # same name can be created. Turning this feature on the other hand can lead to loss of data - # on rotate files. It can happen that after file rotation the beginning of the new - # file is skipped, as the reading starts at the end. We recommend to leave this option on false - # but lower the ignore_older value to release files faster. - #force_close_files: false - -#----------------------------- Stdin prospector ------------------------------- -# Configuration to use stdin input -#- input_type: stdin - -#========================= Filebeat global options ============================ - -# Event count spool threshold - forces network flush if exceeded -#filebeat.spool_size: 2048 - -# Enable async publisher pipeline in filebeat (Experimental!) -#filebeat.publish_async: false - -# Defines how often the spooler is flushed. After idle_timeout the spooler is -# Flush even though spool_size is not reached. -#filebeat.idle_timeout: 5s - -# Name of the registry file. If a relative path is used, it is considered relative to the -# data path. -#filebeat.registry_file: registry - -# -# These config files must have the full filebeat config part inside, but only -# the prospector part is processed. All global options like spool_size are ignored. -# The config_dir MUST point to a different directory then where the main filebeat config file is in. -#filebeat.config_dir: diff --git a/filebeat/filebeat.full.yml b/filebeat/filebeat.full.yml new file mode 100644 index 000000000000..d2b311b73236 --- /dev/null +++ b/filebeat/filebeat.full.yml @@ -0,0 +1,467 @@ +##################$$$###### Filebeat Configuration ############################ + +# This file is a full configuration example documenting all non-deprecated +# options in comments. For a shorter configuration example, that contains only +# the most common options, please see filebeat.yml in the same directory. +# +# You can find the full configuration reference here: +# https://www.elastic.co/guide/en/beats/filebeat/index.html + +#=========================== Filebeat prospectors ============================= + +# List of prospectors to fetch data. +filebeat.prospectors: +# Each - is a prospector. Most options can be set at the prospector level, so +# you can use different prospectors for various configurations. +# Below are the prospector specific configurations. + +# Type of the files. Based on this the way the file is read is decided. +# The different types cannot be mixed in one prospector +# +# Possible options are: +# * log: Reads every line of the log file (default) +# * stdin: Reads the standard in + +#------------------------------ Log prospector -------------------------------- +- input_type: log + + # Paths that should be crawled and fetched. Glob based paths. + # To fetch all ".log" files from a specific level of subdirectories + # /var/log/*/*.log can be used. + # For each file found under this path, a harvester is started. + # Make sure not file is defined twice as this can lead to unexpected behaviour. + paths: + - /var/log/*.log + #- c:\programdata\elasticsearch\logs\* + + # Configure the file encoding for reading files with international characters + # following the W3C recommendation for HTML5 (http://www.w3.org/TR/encoding). + # Some sample encodings: + # plain, utf-8, utf-16be-bom, utf-16be, utf-16le, big5, gb18030, gbk, + # hz-gb-2312, euc-kr, euc-jp, iso-2022-jp, shift-jis, ... + #encoding: plain + + + # Exclude lines. A list of regular expressions to match. It drops the lines that are + # matching any regular expression from the list. The include_lines is called before + # exclude_lines. By default, no lines are dropped. + #exclude_lines: ["^DBG"] + + # Include lines. A list of regular expressions to match. It exports the lines that are + # matching any regular expression from the list. The include_lines is called before + # exclude_lines. By default, all the lines are exported. + #include_lines: ["^ERR", "^WARN"] + + # Exclude files. A list of regular expressions to match. Filebeat drops the files that + # are matching any regular expression from the list. By default, no files are dropped. + #exclude_files: [".gz$"] + + # Optional additional fields. These field can be freely picked + # to add additional information to the crawled log files for filtering + #fields: + # level: debug + # review: 1 + + # Set to true to store the additional fields as top level fields instead + # of under the "fields" sub-dictionary. In case of name conflicts with the + # fields added by Filebeat itself, the custom fields overwrite the default + # fields. + #fields_under_root: false + + # Ignore files which were modified more then the defined timespan in the past. + # ignore_older is disabled by default, so no files are ignored by setting it to 0. + # Time strings like 2h (2 hours), 5m (5 minutes) can be used. + #ignore_older: 0 + + # Close older closes the file handler for which were not modified + # for longer then close_older + # Time strings like 2h (2 hours), 5m (5 minutes) can be used. + #close_older: 1h + + # Type to be published in the 'type' field. For Elasticsearch output, + # the type defines the document type these entries should be stored + # in. Default: log + #document_type: log + + # Scan frequency in seconds. + # How often these files should be checked for changes. In case it is set + # to 0s, it is done as often as possible. Default: 10s + #scan_frequency: 10s + + # Defines the buffer size every harvester uses when fetching the file + #harvester_buffer_size: 16384 + + # Maximum number of bytes a single log event can have + # All bytes after max_bytes are discarded and not sent. The default is 10MB. + # This is especially useful for multiline log messages which can get large. + #max_bytes: 10485760 + + ### JSON configuration + + # Decode JSON options. Enable this if your logs are structured in JSON. + # JSON key on which to apply the line filtering and multiline settings. This key + # must be top level and its value must be string, otherwise it is ignored. If + # no text key is defined, the line filtering and multiline features cannot be used. + #json.message_key: + + # By default, the decoded JSON is placed under a "json" key in the output document. + # If you enable this setting, the keys are copied top level in the output document. + #json.keys_under_root: false + + # If keys_under_root and this setting are enabled, then the values from the decoded + # JSON object overwrite the fields that Filebeat normally adds (type, source, offset, etc.) + # in case of conflicts. + #json.overwrite_keys: false + + # If this setting is enabled, Filebeat adds a "json_error" key in case of JSON + # unmarshaling errors or when a text key is defined in the configuration but cannot + # be used. + #json.add_error_key: false + + ### Multiline options + + # Mutiline can be used for log messages spanning multiple lines. This is common + # for Java Stack Traces or C-Line Continuation + + # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [ + #multiline.pattern: ^\[ + + # Defines if the pattern set under pattern should be negated or not. Default is false. + #multiline.negate: false + + # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern + # that was (not) matched before or after or as long as a pattern is not matched based on negate. + # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash + #multiline.match: after + + # The maximum number of lines that are combined to one event. + # In case there are more the max_lines the additional lines are discarded. + # Default is 500 + #multiline.max_lines: 500 + + # After the defined timeout, an multiline event is sent even if no new pattern was found to start a new event + # Default is 5s. + #multiline.timeout: 5s + + # Setting tail_files to true means filebeat starts reading new files at the end + # instead of the beginning. If this is used in combination with log rotation + # this can mean that the first entries of a new file are skipped. + #tail_files: false + + # Backoff values define how aggressively filebeat crawls new files for updates + # The default values can be used in most cases. Backoff defines how long it is waited + # to check a file again after EOF is reached. Default is 1s which means the file + # is checked every second if new lines were added. This leads to a near real time crawling. + # Every time a new line appears, backoff is reset to the initial value. + #backoff: 1s + + # Max backoff defines what the maximum backoff time is. After having backed off multiple times + # from checking the files, the waiting time will never exceed max_backoff independent of the + # backoff factor. Having it set to 10s means in the worst case a new line can be added to a log + # file after having backed off multiple times, it takes a maximum of 10s to read the new line + #max_backoff: 10s + + # The backoff factor defines how fast the algorithm backs off. The bigger the backoff factor, + # the faster the max_backoff value is reached. If this value is set to 1, no backoff will happen. + # The backoff value will be multiplied each time with the backoff_factor until max_backoff is reached + #backoff_factor: 2 + + # This option closes a file, as soon as the file name changes. + # This config option is recommended on windows only. Filebeat keeps the files it's reading open. This can cause + # issues when the file is removed, as the file will not be fully removed until also Filebeat closes + # the reading. Filebeat closes the file handler after ignore_older. During this time no new file with the + # same name can be created. Turning this feature on the other hand can lead to loss of data + # on rotate files. It can happen that after file rotation the beginning of the new + # file is skipped, as the reading starts at the end. We recommend to leave this option on false + # but lower the ignore_older value to release files faster. + #force_close_files: false + +#----------------------------- Stdin prospector ------------------------------- +# Configuration to use stdin input +#- input_type: stdin + +#========================= Filebeat global options ============================ + +# Event count spool threshold - forces network flush if exceeded +#filebeat.spool_size: 2048 + +# Enable async publisher pipeline in filebeat (Experimental!) +#filebeat.publish_async: false + +# Defines how often the spooler is flushed. After idle_timeout the spooler is +# Flush even though spool_size is not reached. +#filebeat.idle_timeout: 5s + +# Name of the registry file. If a relative path is used, it is considered relative to the +# data path. +#filebeat.registry_file: registry + +# +# These config files must have the full filebeat config part inside, but only +# the prospector part is processed. All global options like spool_size are ignored. +# The config_dir MUST point to a different directory then where the main filebeat config file is in. +#filebeat.config_dir: + +#================================ General ===================================== + +# The name of the shipper that publishes the network data. It can be used to group +# all the transactions sent by a single shipper in the web interface. +# If this options is not defined, the hostname is used. +#name: + +# The tags of the shipper are included in their own field with each +# transaction published. Tags make it easy to group servers by different +# logical properties. +#tags: ["service-X", "web-tier"] + +# Optional fields that you can specify to add additional information to the +# output. Fields can be scalar values, arrays, dictionaries, or any nested +# combination of these. +#fields: +# env: staging + +# If this option is set to true, the custom fields are stored as top-level +# fields in the output document instead of being grouped under a fields +# sub-dictionary. Default is false. +#fields_under_root: false + +# Uncomment the following if you want to ignore transactions created +# by the server on which the shipper is installed. This option is useful +# to remove duplicates if shippers are installed on multiple servers. +#ignore_outgoing: true + +# How often (in seconds) shippers are publishing their IPs to the topology map. +# The default is 10 seconds. +#refresh_topology_freq: 10 + +# Expiration time (in seconds) of the IPs published by a shipper to the topology map. +# All the IPs will be deleted afterwards. Note, that the value must be higher than +# refresh_topology_freq. The default is 15 seconds. +#topology_expire: 15 + +# Internal queue size for single events in processing pipeline +#queue_size: 1000 + +# Sets the maximum number of CPUs that can be executing simultaneously. The +# default is the number of logical CPUs available in the system. +#max_procs: + +#================================ Outputs ===================================== + +# Configure what outputs to use when sending the data collected by the beat. +# Multiple outputs may be used. + +#-------------------------- Elasticsearch output ------------------------------ +output.elasticsearch: + # Array of hosts to connect to. + # Scheme and port can be left out and will be set to the default (http and 9200) + # In case you specify and additional path, the scheme is required: http://localhost:9200/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 + hosts: ["localhost:9200"] + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "admin" + #password: "s3cr3t" + + # Dictionary of HTTP parameters to pass within the url with index operations. + #parameters: + #param1: value1 + #param2: value2 + + # Number of workers per Elasticsearch host. + #worker: 1 + + # Optional index name. The default is "filebeat" and generates + # [filebeat-]YYYY.MM.DD keys. + #index: "filebeat" + + # Optional HTTP Path + #path: "/elasticsearch" + + # Proxy server url + #proxy_url: http://proxy:3128 + + # The number of times a particular Elasticsearch index operation is attempted. If + # the indexing operation doesn't succeed after this many retries, the events are + # dropped. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Elasticsearch bulk API index request. + # The default is 50. + #bulk_max_size: 50 + + # Configure http request timeout before failing an request to Elasticsearch. + #timeout: 90 + + # The number of seconds to wait for new events between two bulk API index requests. + # If `bulk_max_size` is reached before this interval expires, addition bulk index + # requests are made. + #flush_interval: 1 + + # Boolean that sets if the topology is kept in Elasticsearch. The default is + # false. This option makes sense only for Packetbeat. + #save_topology: false + + # The time to live in seconds for the topology information that is stored in + # Elasticsearch. The default is 15 seconds. + #topology_expire: 15 + + # A template is used to set the mapping in Elasticsearch + # By default template loading is enabled and the template is loaded. + # These settings can be adjusted to load your own template or overwrite existing ones + + # Template name. By default the template name is filebeat. + template.name: "filebeat" + + # Path to template file + template.path: "filebeat.template.json" + + # Overwrite existing template + template.overwrite: false + + # TLS configuration. By default is off. + # List of root certificates for HTTPS server verifications + #tls.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for TLS client authentication + #tls.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #tls.certificate_key: "/etc/pki/client/cert.key" + + # Controls whether the client verifies server certificates and host name. + # If insecure is set to true, all server host names and certificates will be + # accepted. In this mode TLS based connections are susceptible to + # man-in-the-middle attacks. Use only for testing. + #tls.insecure: true + + # Configure cipher suites to be used for TLS connections + #tls.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #tls.curve_types: [] + + # Configure minimum TLS version allowed for connection to logstash + #tls.min_version: 1.0 + + # Configure maximum TLS version allowed for connection to logstash + #tls.max_version: 1.2 + + +#----------------------------- Logstash output -------------------------------- +#output.logstash: + # The Logstash hosts + #hosts: ["localhost:5044"] + + # Number of workers per Logstash host. + #worker: 1 + + # Set gzip compression level. + #compression_level: 3 + + # Optional load balance the events between the Logstash hosts + #loadbalance: true + + # Optional index name. The default index name is set to name of the beat + # in all lowercase. + #index: filebeat + + # SOCKS5 proxy server URL + #proxy_url: socks5://user:password@socks5-server:2233 + + # Resolve names locally when using a proxy server. Defaults to false. + #proxy_use_local_resolver: false + + # Optional TLS. By default is off. + # List of root certificates for HTTPS server verifications + #tls.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for TLS client authentication + #tls.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #tls.certificate_key: "/etc/pki/client/cert.key" + + # Controls whether the client verifies server certificates and host name. + # If insecure is set to true, all server host names and certificates will be + # accepted. In this mode TLS based connections are susceptible to + # man-in-the-middle attacks. Use only for testing. + #tls.insecure: true + + # Configure cipher suites to be used for TLS connections + #tls.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #tls.curve_types: [] + + +#------------------------------- File output ---------------------------------- +#output.file: + # Path to the directory where to save the generated files. The option is mandatory. + #path: "/tmp/filebeat" + + # Name of the generated files. The default is `filebeat` and it generates files: `filebeat`, `filebeat.1`, `filebeat.2`, etc. + #filename: filebeat + + # Maximum size in kilobytes of each file. When this size is reached, the files are + # rotated. The default value is 10240 kB. + #rotate_every_kb: 10000 + + # Maximum number of files under path. When this number of files is reached, the + # oldest file is deleted and the rest are shifted from last to first. The default + # is 7 files. + #number_of_files: 7 + + +#----------------------------- Console output --------------------------------- +#output.console: + # Pretty print json event + #pretty: false + +#================================ Logging ===================================== +# There are three options for the log output: syslog, file, stderr. +# Under Windows systems, the log files are per default sent to the file output, +# under all other system per default to syslog. + +# Sets log level. The default log level is error. +# Available log levels are: critical, error, warning, info, debug +#logging.level: error + +# Enable debug output for selected components. To enable all selectors use ["*"] +# Other available selectors are beat, publish, service +# Multiple selectors can be chained. +#logging.selectors: [ ] + +# Send all logging output to syslog. The default is false. +#logging.to_syslog: true + +# Logging to rotating files files. Set logging.to_files to false to disable logging to +# files. +logging.to_files: true +logging.files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/mybeat + + # The name of the files where the logs are written to. + #name: mybeat + + # Configure log file size limit. If limit is reached, log file will be + # automatically rotated + #rotateeverybytes: 10485760 # = 10MB + + # Number of rotated log files to keep. Oldest files will be deleted first. + #keepfiles: 7 + +#================================ Filters ===================================== + +# This section defines a list of filtering rules that are applied one by one starting with the +# exported event: +# event -> filter1 -> event1 -> filter2 ->event2 ... +# Supported actions: drop_fields, drop_event, include_fields +#filters: +# - drop_fields: +# equals: +# status: OK +# fields: [ ] diff --git a/filebeat/filebeat.short.yml b/filebeat/filebeat.short.yml deleted file mode 100644 index 33dbac3be7b3..000000000000 --- a/filebeat/filebeat.short.yml +++ /dev/null @@ -1,113 +0,0 @@ -###################### Filebeat Configuration Example ######################### - -# This file is an example configuration file highlighting only the most common -# options. The filebeat.full.yml file from the same directory contains all the -# supported options with more comments. You can use it as a reference. -# -# You can find the full configuration reference here: -# https://www.elastic.co/guide/en/beats/filebeat/index.html - -#=========================== Filebeat prospectors ============================= - -filebeat.prospectors: - -# Each - is a prospector. Most options can be set at the prospector level, so -# you can use different prospectors for various configurations. -# Below are the prospector specific configurations. - -- input_type: log - - # Paths that should be crawled and fetched. Glob based paths. - paths: - - /var/log/*.log - #- c:\programdata\elasticsearch\logs\* - - # Exclude lines. A list of regular expressions to match. It drops the lines that are - # matching any regular expression from the list. - #exclude_lines: ["^DBG"] - - # Include lines. A list of regular expressions to match. It exports the lines that are - # matching any regular expression from the list. - #include_lines: ["^ERR", "^WARN"] - - # Exclude files. A list of regular expressions to match. Filebeat drops the files that - # are matching any regular expression from the list. By default, no files are dropped. - #exclude_files: [".gz$"] - - # Optional additional fields. These field can be freely picked - # to add additional information to the crawled log files for filtering - #fields: - # level: debug - # review: 1 - - ### Multiline options - - # Mutiline can be used for log messages spanning multiple lines. This is common - # for Java Stack Traces or C-Line Continuation - - # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [ - #multiline.pattern: ^\[ - - # Defines if the pattern set under pattern should be negated or not. Default is false. - #multiline.negate: false - - # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern - # that was (not) matched before or after or as long as a pattern is not matched based on negate. - # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash - #multiline.match: after - - -#================================ General ===================================== - -# The name of the shipper that publishes the network data. It can be used to group -# all the transactions sent by a single shipper in the web interface. -#name: - -# The tags of the shipper are included in their own field with each -# transaction published. -#tags: ["service-X", "web-tier"] - -# Optional fields that you can specify to add additional information to the -# output. -#fields: -# env: staging - -#================================ Outputs ===================================== - -# Configure what outputs to use when sending the data collected by the beat. -# Multiple outputs may be used. - -#-------------------------- Elasticsearch output ------------------------------ -output.elasticsearch: - # Array of hosts to connect to. - hosts: ["localhost:9200"] - - # Template name. By default the template name is filebeat. - template.name: "filebeat" - - # Path to template file - template.path: "filebeat.template.json" - - # Overwrite existing template - template.overwrite: false - -#----------------------------- Logstash output -------------------------------- -#output.logstash: - # The Logstash hosts - #hosts: ["localhost:5044"] - - # Optional TLS. By default is off. - # List of root certificates for HTTPS server verifications - #tls.certificate_authorities: ["/etc/pki/root/ca.pem"] - - # Certificate for TLS client authentication - #tls.certificate: "/etc/pki/client/cert.pem" - - # Client Certificate Key - #tls.certificate_key: "/etc/pki/client/cert.key" - -#================================ Logging ===================================== - -# Sets log level. The default log level is error. -# Available log levels are: critical, error, warning, info, debug -#logging.level: error diff --git a/filebeat/filebeat.yml b/filebeat/filebeat.yml index f862c48da130..33dbac3be7b3 100644 --- a/filebeat/filebeat.yml +++ b/filebeat/filebeat.yml @@ -1,55 +1,33 @@ -##################$$$###### Filebeat Configuration ############################ +###################### Filebeat Configuration Example ######################### -# This file is a full configuration example documenting all non-deprecated -# options in comments. For a shorter configuration example, that contains only -# the most common options, please see filebeat.short.yml in the same directory. +# This file is an example configuration file highlighting only the most common +# options. The filebeat.full.yml file from the same directory contains all the +# supported options with more comments. You can use it as a reference. # # You can find the full configuration reference here: # https://www.elastic.co/guide/en/beats/filebeat/index.html #=========================== Filebeat prospectors ============================= -# List of prospectors to fetch data. filebeat.prospectors: + # Each - is a prospector. Most options can be set at the prospector level, so # you can use different prospectors for various configurations. # Below are the prospector specific configurations. -# Type of the files. Based on this the way the file is read is decided. -# The different types cannot be mixed in one prospector -# -# Possible options are: -# * log: Reads every line of the log file (default) -# * stdin: Reads the standard in - -#------------------------------ Log prospector -------------------------------- - input_type: log # Paths that should be crawled and fetched. Glob based paths. - # To fetch all ".log" files from a specific level of subdirectories - # /var/log/*/*.log can be used. - # For each file found under this path, a harvester is started. - # Make sure not file is defined twice as this can lead to unexpected behaviour. paths: - /var/log/*.log #- c:\programdata\elasticsearch\logs\* - # Configure the file encoding for reading files with international characters - # following the W3C recommendation for HTML5 (http://www.w3.org/TR/encoding). - # Some sample encodings: - # plain, utf-8, utf-16be-bom, utf-16be, utf-16le, big5, gb18030, gbk, - # hz-gb-2312, euc-kr, euc-jp, iso-2022-jp, shift-jis, ... - #encoding: plain - - # Exclude lines. A list of regular expressions to match. It drops the lines that are - # matching any regular expression from the list. The include_lines is called before - # exclude_lines. By default, no lines are dropped. + # matching any regular expression from the list. #exclude_lines: ["^DBG"] # Include lines. A list of regular expressions to match. It exports the lines that are - # matching any regular expression from the list. The include_lines is called before - # exclude_lines. By default, all the lines are exported. + # matching any regular expression from the list. #include_lines: ["^ERR", "^WARN"] # Exclude files. A list of regular expressions to match. Filebeat drops the files that @@ -62,62 +40,6 @@ filebeat.prospectors: # level: debug # review: 1 - # Set to true to store the additional fields as top level fields instead - # of under the "fields" sub-dictionary. In case of name conflicts with the - # fields added by Filebeat itself, the custom fields overwrite the default - # fields. - #fields_under_root: false - - # Ignore files which were modified more then the defined timespan in the past. - # ignore_older is disabled by default, so no files are ignored by setting it to 0. - # Time strings like 2h (2 hours), 5m (5 minutes) can be used. - #ignore_older: 0 - - # Close older closes the file handler for which were not modified - # for longer then close_older - # Time strings like 2h (2 hours), 5m (5 minutes) can be used. - #close_older: 1h - - # Type to be published in the 'type' field. For Elasticsearch output, - # the type defines the document type these entries should be stored - # in. Default: log - #document_type: log - - # Scan frequency in seconds. - # How often these files should be checked for changes. In case it is set - # to 0s, it is done as often as possible. Default: 10s - #scan_frequency: 10s - - # Defines the buffer size every harvester uses when fetching the file - #harvester_buffer_size: 16384 - - # Maximum number of bytes a single log event can have - # All bytes after max_bytes are discarded and not sent. The default is 10MB. - # This is especially useful for multiline log messages which can get large. - #max_bytes: 10485760 - - ### JSON configuration - - # Decode JSON options. Enable this if your logs are structured in JSON. - # JSON key on which to apply the line filtering and multiline settings. This key - # must be top level and its value must be string, otherwise it is ignored. If - # no text key is defined, the line filtering and multiline features cannot be used. - #json.message_key: - - # By default, the decoded JSON is placed under a "json" key in the output document. - # If you enable this setting, the keys are copied top level in the output document. - #json.keys_under_root: false - - # If keys_under_root and this setting are enabled, then the values from the decoded - # JSON object overwrite the fields that Filebeat normally adds (type, source, offset, etc.) - # in case of conflicts. - #json.overwrite_keys: false - - # If this setting is enabled, Filebeat adds a "json_error" key in case of JSON - # unmarshaling errors or when a text key is defined in the configuration but cannot - # be used. - #json.add_error_key: false - ### Multiline options # Mutiline can be used for log messages spanning multiple lines. This is common @@ -134,118 +56,22 @@ filebeat.prospectors: # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash #multiline.match: after - # The maximum number of lines that are combined to one event. - # In case there are more the max_lines the additional lines are discarded. - # Default is 500 - #multiline.max_lines: 500 - - # After the defined timeout, an multiline event is sent even if no new pattern was found to start a new event - # Default is 5s. - #multiline.timeout: 5s - - # Setting tail_files to true means filebeat starts reading new files at the end - # instead of the beginning. If this is used in combination with log rotation - # this can mean that the first entries of a new file are skipped. - #tail_files: false - - # Backoff values define how aggressively filebeat crawls new files for updates - # The default values can be used in most cases. Backoff defines how long it is waited - # to check a file again after EOF is reached. Default is 1s which means the file - # is checked every second if new lines were added. This leads to a near real time crawling. - # Every time a new line appears, backoff is reset to the initial value. - #backoff: 1s - - # Max backoff defines what the maximum backoff time is. After having backed off multiple times - # from checking the files, the waiting time will never exceed max_backoff independent of the - # backoff factor. Having it set to 10s means in the worst case a new line can be added to a log - # file after having backed off multiple times, it takes a maximum of 10s to read the new line - #max_backoff: 10s - - # The backoff factor defines how fast the algorithm backs off. The bigger the backoff factor, - # the faster the max_backoff value is reached. If this value is set to 1, no backoff will happen. - # The backoff value will be multiplied each time with the backoff_factor until max_backoff is reached - #backoff_factor: 2 - - # This option closes a file, as soon as the file name changes. - # This config option is recommended on windows only. Filebeat keeps the files it's reading open. This can cause - # issues when the file is removed, as the file will not be fully removed until also Filebeat closes - # the reading. Filebeat closes the file handler after ignore_older. During this time no new file with the - # same name can be created. Turning this feature on the other hand can lead to loss of data - # on rotate files. It can happen that after file rotation the beginning of the new - # file is skipped, as the reading starts at the end. We recommend to leave this option on false - # but lower the ignore_older value to release files faster. - #force_close_files: false - -#----------------------------- Stdin prospector ------------------------------- -# Configuration to use stdin input -#- input_type: stdin - -#========================= Filebeat global options ============================ - -# Event count spool threshold - forces network flush if exceeded -#filebeat.spool_size: 2048 - -# Enable async publisher pipeline in filebeat (Experimental!) -#filebeat.publish_async: false - -# Defines how often the spooler is flushed. After idle_timeout the spooler is -# Flush even though spool_size is not reached. -#filebeat.idle_timeout: 5s - -# Name of the registry file. If a relative path is used, it is considered relative to the -# data path. -#filebeat.registry_file: registry - -# -# These config files must have the full filebeat config part inside, but only -# the prospector part is processed. All global options like spool_size are ignored. -# The config_dir MUST point to a different directory then where the main filebeat config file is in. -#filebeat.config_dir: #================================ General ===================================== # The name of the shipper that publishes the network data. It can be used to group # all the transactions sent by a single shipper in the web interface. -# If this options is not defined, the hostname is used. #name: # The tags of the shipper are included in their own field with each -# transaction published. Tags make it easy to group servers by different -# logical properties. +# transaction published. #tags: ["service-X", "web-tier"] # Optional fields that you can specify to add additional information to the -# output. Fields can be scalar values, arrays, dictionaries, or any nested -# combination of these. +# output. #fields: # env: staging -# If this option is set to true, the custom fields are stored as top-level -# fields in the output document instead of being grouped under a fields -# sub-dictionary. Default is false. -#fields_under_root: false - -# Uncomment the following if you want to ignore transactions created -# by the server on which the shipper is installed. This option is useful -# to remove duplicates if shippers are installed on multiple servers. -#ignore_outgoing: true - -# How often (in seconds) shippers are publishing their IPs to the topology map. -# The default is 10 seconds. -#refresh_topology_freq: 10 - -# Expiration time (in seconds) of the IPs published by a shipper to the topology map. -# All the IPs will be deleted afterwards. Note, that the value must be higher than -# refresh_topology_freq. The default is 15 seconds. -#topology_expire: 15 - -# Internal queue size for single events in processing pipeline -#queue_size: 1000 - -# Sets the maximum number of CPUs that can be executing simultaneously. The -# default is the number of logical CPUs available in the system. -#max_procs: - #================================ Outputs ===================================== # Configure what outputs to use when sending the data collected by the beat. @@ -254,63 +80,8 @@ filebeat.prospectors: #-------------------------- Elasticsearch output ------------------------------ output.elasticsearch: # Array of hosts to connect to. - # Scheme and port can be left out and will be set to the default (http and 9200) - # In case you specify and additional path, the scheme is required: http://localhost:9200/path - # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 hosts: ["localhost:9200"] - # Optional protocol and basic auth credentials. - #protocol: "https" - #username: "admin" - #password: "s3cr3t" - - # Dictionary of HTTP parameters to pass within the url with index operations. - #parameters: - #param1: value1 - #param2: value2 - - # Number of workers per Elasticsearch host. - #worker: 1 - - # Optional index name. The default is "filebeat" and generates - # [filebeat-]YYYY.MM.DD keys. - #index: "filebeat" - - # Optional HTTP Path - #path: "/elasticsearch" - - # Proxy server url - #proxy_url: http://proxy:3128 - - # The number of times a particular Elasticsearch index operation is attempted. If - # the indexing operation doesn't succeed after this many retries, the events are - # dropped. The default is 3. - #max_retries: 3 - - # The maximum number of events to bulk in a single Elasticsearch bulk API index request. - # The default is 50. - #bulk_max_size: 50 - - # Configure http request timeout before failing an request to Elasticsearch. - #timeout: 90 - - # The number of seconds to wait for new events between two bulk API index requests. - # If `bulk_max_size` is reached before this interval expires, addition bulk index - # requests are made. - #flush_interval: 1 - - # Boolean that sets if the topology is kept in Elasticsearch. The default is - # false. This option makes sense only for Packetbeat. - #save_topology: false - - # The time to live in seconds for the topology information that is stored in - # Elasticsearch. The default is 15 seconds. - #topology_expire: 15 - - # A template is used to set the mapping in Elasticsearch - # By default template loading is enabled and the template is loaded. - # These settings can be adjusted to load your own template or overwrite existing ones - # Template name. By default the template name is filebeat. template.name: "filebeat" @@ -320,59 +91,11 @@ output.elasticsearch: # Overwrite existing template template.overwrite: false - # TLS configuration. By default is off. - # List of root certificates for HTTPS server verifications - #tls.certificate_authorities: ["/etc/pki/root/ca.pem"] - - # Certificate for TLS client authentication - #tls.certificate: "/etc/pki/client/cert.pem" - - # Client Certificate Key - #tls.certificate_key: "/etc/pki/client/cert.key" - - # Controls whether the client verifies server certificates and host name. - # If insecure is set to true, all server host names and certificates will be - # accepted. In this mode TLS based connections are susceptible to - # man-in-the-middle attacks. Use only for testing. - #tls.insecure: true - - # Configure cipher suites to be used for TLS connections - #tls.cipher_suites: [] - - # Configure curve types for ECDHE based cipher suites - #tls.curve_types: [] - - # Configure minimum TLS version allowed for connection to logstash - #tls.min_version: 1.0 - - # Configure maximum TLS version allowed for connection to logstash - #tls.max_version: 1.2 - - #----------------------------- Logstash output -------------------------------- #output.logstash: # The Logstash hosts #hosts: ["localhost:5044"] - # Number of workers per Logstash host. - #worker: 1 - - # Set gzip compression level. - #compression_level: 3 - - # Optional load balance the events between the Logstash hosts - #loadbalance: true - - # Optional index name. The default index name is set to name of the beat - # in all lowercase. - #index: filebeat - - # SOCKS5 proxy server URL - #proxy_url: socks5://user:password@socks5-server:2233 - - # Resolve names locally when using a proxy server. Defaults to false. - #proxy_use_local_resolver: false - # Optional TLS. By default is off. # List of root certificates for HTTPS server verifications #tls.certificate_authorities: ["/etc/pki/root/ca.pem"] @@ -383,85 +106,8 @@ output.elasticsearch: # Client Certificate Key #tls.certificate_key: "/etc/pki/client/cert.key" - # Controls whether the client verifies server certificates and host name. - # If insecure is set to true, all server host names and certificates will be - # accepted. In this mode TLS based connections are susceptible to - # man-in-the-middle attacks. Use only for testing. - #tls.insecure: true - - # Configure cipher suites to be used for TLS connections - #tls.cipher_suites: [] - - # Configure curve types for ECDHE based cipher suites - #tls.curve_types: [] - - -#------------------------------- File output ---------------------------------- -#output.file: - # Path to the directory where to save the generated files. The option is mandatory. - #path: "/tmp/filebeat" - - # Name of the generated files. The default is `filebeat` and it generates files: `filebeat`, `filebeat.1`, `filebeat.2`, etc. - #filename: filebeat - - # Maximum size in kilobytes of each file. When this size is reached, the files are - # rotated. The default value is 10240 kB. - #rotate_every_kb: 10000 - - # Maximum number of files under path. When this number of files is reached, the - # oldest file is deleted and the rest are shifted from last to first. The default - # is 7 files. - #number_of_files: 7 - - -#----------------------------- Console output --------------------------------- -#output.console: - # Pretty print json event - #pretty: false - #================================ Logging ===================================== -# There are three options for the log output: syslog, file, stderr. -# Under Windows systems, the log files are per default sent to the file output, -# under all other system per default to syslog. # Sets log level. The default log level is error. # Available log levels are: critical, error, warning, info, debug #logging.level: error - -# Enable debug output for selected components. To enable all selectors use ["*"] -# Other available selectors are beat, publish, service -# Multiple selectors can be chained. -#logging.selectors: [ ] - -# Send all logging output to syslog. The default is false. -#logging.to_syslog: true - -# Logging to rotating files files. Set logging.to_files to false to disable logging to -# files. -logging.to_files: true -logging.files: - # Configure the path where the logs are written. The default is the logs directory - # under the home path (the binary location). - #path: /var/log/mybeat - - # The name of the files where the logs are written to. - #name: mybeat - - # Configure log file size limit. If limit is reached, log file will be - # automatically rotated - #rotateeverybytes: 10485760 # = 10MB - - # Number of rotated log files to keep. Oldest files will be deleted first. - #keepfiles: 7 - -#================================ Filters ===================================== - -# This section defines a list of filtering rules that are applied one by one starting with the -# exported event: -# event -> filter1 -> event1 -> filter2 ->event2 ... -# Supported actions: drop_fields, drop_event, include_fields -#filters: -# - drop_fields: -# equals: -# status: OK -# fields: [ ] diff --git a/libbeat/_beat/config.full.yml b/libbeat/_beat/config.full.yml new file mode 100644 index 000000000000..acfc0b51fc78 --- /dev/null +++ b/libbeat/_beat/config.full.yml @@ -0,0 +1,264 @@ + +#================================ General ===================================== + +# The name of the shipper that publishes the network data. It can be used to group +# all the transactions sent by a single shipper in the web interface. +# If this options is not defined, the hostname is used. +#name: + +# The tags of the shipper are included in their own field with each +# transaction published. Tags make it easy to group servers by different +# logical properties. +#tags: ["service-X", "web-tier"] + +# Optional fields that you can specify to add additional information to the +# output. Fields can be scalar values, arrays, dictionaries, or any nested +# combination of these. +#fields: +# env: staging + +# If this option is set to true, the custom fields are stored as top-level +# fields in the output document instead of being grouped under a fields +# sub-dictionary. Default is false. +#fields_under_root: false + +# Uncomment the following if you want to ignore transactions created +# by the server on which the shipper is installed. This option is useful +# to remove duplicates if shippers are installed on multiple servers. +#ignore_outgoing: true + +# How often (in seconds) shippers are publishing their IPs to the topology map. +# The default is 10 seconds. +#refresh_topology_freq: 10 + +# Expiration time (in seconds) of the IPs published by a shipper to the topology map. +# All the IPs will be deleted afterwards. Note, that the value must be higher than +# refresh_topology_freq. The default is 15 seconds. +#topology_expire: 15 + +# Internal queue size for single events in processing pipeline +#queue_size: 1000 + +# Sets the maximum number of CPUs that can be executing simultaneously. The +# default is the number of logical CPUs available in the system. +#max_procs: + +#================================ Outputs ===================================== + +# Configure what outputs to use when sending the data collected by the beat. +# Multiple outputs may be used. + +#-------------------------- Elasticsearch output ------------------------------ +output.elasticsearch: + # Array of hosts to connect to. + # Scheme and port can be left out and will be set to the default (http and 9200) + # In case you specify and additional path, the scheme is required: http://localhost:9200/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 + hosts: ["localhost:9200"] + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "admin" + #password: "s3cr3t" + + # Dictionary of HTTP parameters to pass within the url with index operations. + #parameters: + #param1: value1 + #param2: value2 + + # Number of workers per Elasticsearch host. + #worker: 1 + + # Optional index name. The default is "beatname" and generates + # [beatname-]YYYY.MM.DD keys. + #index: "beatname" + + # Optional HTTP Path + #path: "/elasticsearch" + + # Proxy server url + #proxy_url: http://proxy:3128 + + # The number of times a particular Elasticsearch index operation is attempted. If + # the indexing operation doesn't succeed after this many retries, the events are + # dropped. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Elasticsearch bulk API index request. + # The default is 50. + #bulk_max_size: 50 + + # Configure http request timeout before failing an request to Elasticsearch. + #timeout: 90 + + # The number of seconds to wait for new events between two bulk API index requests. + # If `bulk_max_size` is reached before this interval expires, addition bulk index + # requests are made. + #flush_interval: 1 + + # Boolean that sets if the topology is kept in Elasticsearch. The default is + # false. This option makes sense only for Packetbeat. + #save_topology: false + + # The time to live in seconds for the topology information that is stored in + # Elasticsearch. The default is 15 seconds. + #topology_expire: 15 + + # A template is used to set the mapping in Elasticsearch + # By default template loading is enabled and the template is loaded. + # These settings can be adjusted to load your own template or overwrite existing ones + + # Template name. By default the template name is beatname. + template.name: "beatname" + + # Path to template file + template.path: "beatname.template.json" + + # Overwrite existing template + template.overwrite: false + + # TLS configuration. By default is off. + # List of root certificates for HTTPS server verifications + #tls.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for TLS client authentication + #tls.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #tls.certificate_key: "/etc/pki/client/cert.key" + + # Controls whether the client verifies server certificates and host name. + # If insecure is set to true, all server host names and certificates will be + # accepted. In this mode TLS based connections are susceptible to + # man-in-the-middle attacks. Use only for testing. + #tls.insecure: true + + # Configure cipher suites to be used for TLS connections + #tls.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #tls.curve_types: [] + + # Configure minimum TLS version allowed for connection to logstash + #tls.min_version: 1.0 + + # Configure maximum TLS version allowed for connection to logstash + #tls.max_version: 1.2 + + +#----------------------------- Logstash output -------------------------------- +#output.logstash: + # The Logstash hosts + #hosts: ["localhost:5044"] + + # Number of workers per Logstash host. + #worker: 1 + + # Set gzip compression level. + #compression_level: 3 + + # Optional load balance the events between the Logstash hosts + #loadbalance: true + + # Optional index name. The default index name is set to name of the beat + # in all lowercase. + #index: beatname + + # SOCKS5 proxy server URL + #proxy_url: socks5://user:password@socks5-server:2233 + + # Resolve names locally when using a proxy server. Defaults to false. + #proxy_use_local_resolver: false + + # Optional TLS. By default is off. + # List of root certificates for HTTPS server verifications + #tls.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for TLS client authentication + #tls.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #tls.certificate_key: "/etc/pki/client/cert.key" + + # Controls whether the client verifies server certificates and host name. + # If insecure is set to true, all server host names and certificates will be + # accepted. In this mode TLS based connections are susceptible to + # man-in-the-middle attacks. Use only for testing. + #tls.insecure: true + + # Configure cipher suites to be used for TLS connections + #tls.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #tls.curve_types: [] + + +#------------------------------- File output ---------------------------------- +#output.file: + # Path to the directory where to save the generated files. The option is mandatory. + #path: "/tmp/beatname" + + # Name of the generated files. The default is `beatname` and it generates files: `beatname`, `beatname.1`, `beatname.2`, etc. + #filename: beatname + + # Maximum size in kilobytes of each file. When this size is reached, the files are + # rotated. The default value is 10240 kB. + #rotate_every_kb: 10000 + + # Maximum number of files under path. When this number of files is reached, the + # oldest file is deleted and the rest are shifted from last to first. The default + # is 7 files. + #number_of_files: 7 + + +#----------------------------- Console output --------------------------------- +#output.console: + # Pretty print json event + #pretty: false + +#================================ Logging ===================================== +# There are three options for the log output: syslog, file, stderr. +# Under Windows systems, the log files are per default sent to the file output, +# under all other system per default to syslog. + +# Sets log level. The default log level is error. +# Available log levels are: critical, error, warning, info, debug +#logging.level: error + +# Enable debug output for selected components. To enable all selectors use ["*"] +# Other available selectors are beat, publish, service +# Multiple selectors can be chained. +#logging.selectors: [ ] + +# Send all logging output to syslog. The default is false. +#logging.to_syslog: true + +# Logging to rotating files files. Set logging.to_files to false to disable logging to +# files. +logging.to_files: true +logging.files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/mybeat + + # The name of the files where the logs are written to. + #name: mybeat + + # Configure log file size limit. If limit is reached, log file will be + # automatically rotated + #rotateeverybytes: 10485760 # = 10MB + + # Number of rotated log files to keep. Oldest files will be deleted first. + #keepfiles: 7 + +#================================ Filters ===================================== + +# This section defines a list of filtering rules that are applied one by one starting with the +# exported event: +# event -> filter1 -> event1 -> filter2 ->event2 ... +# Supported actions: drop_fields, drop_event, include_fields +#filters: +# - drop_fields: +# equals: +# status: OK +# fields: [ ] diff --git a/libbeat/_beat/config.short.yml b/libbeat/_beat/config.short.yml deleted file mode 100644 index 75037838d9b6..000000000000 --- a/libbeat/_beat/config.short.yml +++ /dev/null @@ -1,55 +0,0 @@ - -#================================ General ===================================== - -# The name of the shipper that publishes the network data. It can be used to group -# all the transactions sent by a single shipper in the web interface. -#name: - -# The tags of the shipper are included in their own field with each -# transaction published. -#tags: ["service-X", "web-tier"] - -# Optional fields that you can specify to add additional information to the -# output. -#fields: -# env: staging - -#================================ Outputs ===================================== - -# Configure what outputs to use when sending the data collected by the beat. -# Multiple outputs may be used. - -#-------------------------- Elasticsearch output ------------------------------ -output.elasticsearch: - # Array of hosts to connect to. - hosts: ["localhost:9200"] - - # Template name. By default the template name is beatname. - template.name: "beatname" - - # Path to template file - template.path: "beatname.template.json" - - # Overwrite existing template - template.overwrite: false - -#----------------------------- Logstash output -------------------------------- -#output.logstash: - # The Logstash hosts - #hosts: ["localhost:5044"] - - # Optional TLS. By default is off. - # List of root certificates for HTTPS server verifications - #tls.certificate_authorities: ["/etc/pki/root/ca.pem"] - - # Certificate for TLS client authentication - #tls.certificate: "/etc/pki/client/cert.pem" - - # Client Certificate Key - #tls.certificate_key: "/etc/pki/client/cert.key" - -#================================ Logging ===================================== - -# Sets log level. The default log level is error. -# Available log levels are: critical, error, warning, info, debug -#logging.level: error diff --git a/libbeat/_beat/config.yml b/libbeat/_beat/config.yml index acfc0b51fc78..75037838d9b6 100644 --- a/libbeat/_beat/config.yml +++ b/libbeat/_beat/config.yml @@ -3,46 +3,17 @@ # The name of the shipper that publishes the network data. It can be used to group # all the transactions sent by a single shipper in the web interface. -# If this options is not defined, the hostname is used. #name: # The tags of the shipper are included in their own field with each -# transaction published. Tags make it easy to group servers by different -# logical properties. +# transaction published. #tags: ["service-X", "web-tier"] # Optional fields that you can specify to add additional information to the -# output. Fields can be scalar values, arrays, dictionaries, or any nested -# combination of these. +# output. #fields: # env: staging -# If this option is set to true, the custom fields are stored as top-level -# fields in the output document instead of being grouped under a fields -# sub-dictionary. Default is false. -#fields_under_root: false - -# Uncomment the following if you want to ignore transactions created -# by the server on which the shipper is installed. This option is useful -# to remove duplicates if shippers are installed on multiple servers. -#ignore_outgoing: true - -# How often (in seconds) shippers are publishing their IPs to the topology map. -# The default is 10 seconds. -#refresh_topology_freq: 10 - -# Expiration time (in seconds) of the IPs published by a shipper to the topology map. -# All the IPs will be deleted afterwards. Note, that the value must be higher than -# refresh_topology_freq. The default is 15 seconds. -#topology_expire: 15 - -# Internal queue size for single events in processing pipeline -#queue_size: 1000 - -# Sets the maximum number of CPUs that can be executing simultaneously. The -# default is the number of logical CPUs available in the system. -#max_procs: - #================================ Outputs ===================================== # Configure what outputs to use when sending the data collected by the beat. @@ -51,63 +22,8 @@ #-------------------------- Elasticsearch output ------------------------------ output.elasticsearch: # Array of hosts to connect to. - # Scheme and port can be left out and will be set to the default (http and 9200) - # In case you specify and additional path, the scheme is required: http://localhost:9200/path - # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 hosts: ["localhost:9200"] - # Optional protocol and basic auth credentials. - #protocol: "https" - #username: "admin" - #password: "s3cr3t" - - # Dictionary of HTTP parameters to pass within the url with index operations. - #parameters: - #param1: value1 - #param2: value2 - - # Number of workers per Elasticsearch host. - #worker: 1 - - # Optional index name. The default is "beatname" and generates - # [beatname-]YYYY.MM.DD keys. - #index: "beatname" - - # Optional HTTP Path - #path: "/elasticsearch" - - # Proxy server url - #proxy_url: http://proxy:3128 - - # The number of times a particular Elasticsearch index operation is attempted. If - # the indexing operation doesn't succeed after this many retries, the events are - # dropped. The default is 3. - #max_retries: 3 - - # The maximum number of events to bulk in a single Elasticsearch bulk API index request. - # The default is 50. - #bulk_max_size: 50 - - # Configure http request timeout before failing an request to Elasticsearch. - #timeout: 90 - - # The number of seconds to wait for new events between two bulk API index requests. - # If `bulk_max_size` is reached before this interval expires, addition bulk index - # requests are made. - #flush_interval: 1 - - # Boolean that sets if the topology is kept in Elasticsearch. The default is - # false. This option makes sense only for Packetbeat. - #save_topology: false - - # The time to live in seconds for the topology information that is stored in - # Elasticsearch. The default is 15 seconds. - #topology_expire: 15 - - # A template is used to set the mapping in Elasticsearch - # By default template loading is enabled and the template is loaded. - # These settings can be adjusted to load your own template or overwrite existing ones - # Template name. By default the template name is beatname. template.name: "beatname" @@ -117,59 +33,11 @@ output.elasticsearch: # Overwrite existing template template.overwrite: false - # TLS configuration. By default is off. - # List of root certificates for HTTPS server verifications - #tls.certificate_authorities: ["/etc/pki/root/ca.pem"] - - # Certificate for TLS client authentication - #tls.certificate: "/etc/pki/client/cert.pem" - - # Client Certificate Key - #tls.certificate_key: "/etc/pki/client/cert.key" - - # Controls whether the client verifies server certificates and host name. - # If insecure is set to true, all server host names and certificates will be - # accepted. In this mode TLS based connections are susceptible to - # man-in-the-middle attacks. Use only for testing. - #tls.insecure: true - - # Configure cipher suites to be used for TLS connections - #tls.cipher_suites: [] - - # Configure curve types for ECDHE based cipher suites - #tls.curve_types: [] - - # Configure minimum TLS version allowed for connection to logstash - #tls.min_version: 1.0 - - # Configure maximum TLS version allowed for connection to logstash - #tls.max_version: 1.2 - - #----------------------------- Logstash output -------------------------------- #output.logstash: # The Logstash hosts #hosts: ["localhost:5044"] - # Number of workers per Logstash host. - #worker: 1 - - # Set gzip compression level. - #compression_level: 3 - - # Optional load balance the events between the Logstash hosts - #loadbalance: true - - # Optional index name. The default index name is set to name of the beat - # in all lowercase. - #index: beatname - - # SOCKS5 proxy server URL - #proxy_url: socks5://user:password@socks5-server:2233 - - # Resolve names locally when using a proxy server. Defaults to false. - #proxy_use_local_resolver: false - # Optional TLS. By default is off. # List of root certificates for HTTPS server verifications #tls.certificate_authorities: ["/etc/pki/root/ca.pem"] @@ -180,85 +48,8 @@ output.elasticsearch: # Client Certificate Key #tls.certificate_key: "/etc/pki/client/cert.key" - # Controls whether the client verifies server certificates and host name. - # If insecure is set to true, all server host names and certificates will be - # accepted. In this mode TLS based connections are susceptible to - # man-in-the-middle attacks. Use only for testing. - #tls.insecure: true - - # Configure cipher suites to be used for TLS connections - #tls.cipher_suites: [] - - # Configure curve types for ECDHE based cipher suites - #tls.curve_types: [] - - -#------------------------------- File output ---------------------------------- -#output.file: - # Path to the directory where to save the generated files. The option is mandatory. - #path: "/tmp/beatname" - - # Name of the generated files. The default is `beatname` and it generates files: `beatname`, `beatname.1`, `beatname.2`, etc. - #filename: beatname - - # Maximum size in kilobytes of each file. When this size is reached, the files are - # rotated. The default value is 10240 kB. - #rotate_every_kb: 10000 - - # Maximum number of files under path. When this number of files is reached, the - # oldest file is deleted and the rest are shifted from last to first. The default - # is 7 files. - #number_of_files: 7 - - -#----------------------------- Console output --------------------------------- -#output.console: - # Pretty print json event - #pretty: false - #================================ Logging ===================================== -# There are three options for the log output: syslog, file, stderr. -# Under Windows systems, the log files are per default sent to the file output, -# under all other system per default to syslog. # Sets log level. The default log level is error. # Available log levels are: critical, error, warning, info, debug #logging.level: error - -# Enable debug output for selected components. To enable all selectors use ["*"] -# Other available selectors are beat, publish, service -# Multiple selectors can be chained. -#logging.selectors: [ ] - -# Send all logging output to syslog. The default is false. -#logging.to_syslog: true - -# Logging to rotating files files. Set logging.to_files to false to disable logging to -# files. -logging.to_files: true -logging.files: - # Configure the path where the logs are written. The default is the logs directory - # under the home path (the binary location). - #path: /var/log/mybeat - - # The name of the files where the logs are written to. - #name: mybeat - - # Configure log file size limit. If limit is reached, log file will be - # automatically rotated - #rotateeverybytes: 10485760 # = 10MB - - # Number of rotated log files to keep. Oldest files will be deleted first. - #keepfiles: 7 - -#================================ Filters ===================================== - -# This section defines a list of filtering rules that are applied one by one starting with the -# exported event: -# event -> filter1 -> event1 -> filter2 ->event2 ... -# Supported actions: drop_fields, drop_event, include_fields -#filters: -# - drop_fields: -# equals: -# status: OK -# fields: [ ] diff --git a/libbeat/scripts/Makefile b/libbeat/scripts/Makefile index a0acafee17b3..8f1f5422feaa 100755 --- a/libbeat/scripts/Makefile +++ b/libbeat/scripts/Makefile @@ -221,8 +221,8 @@ update: python-env echo "Update config file" -rm -f etc/${BEATNAME}.yml cat etc/beat.yml ${ES_BEATS}/libbeat/_beat/config.yml | sed -e "s/beatname/${BEATNAME}/g" > ${BEATNAME}.yml - -rm -f etc/${BEATNAME}.short.yml - cat etc/beat.short.yml ${ES_BEATS}/libbeat/_beat/config.short.yml | sed -e "s/beatname/${BEATNAME}/g" > ${BEATNAME}.short.yml + -rm -f etc/${BEATNAME}.full.yml + cat etc/beat.full.yml ${ES_BEATS}/libbeat/_beat/config.full.yml | sed -e "s/beatname/${BEATNAME}/g" > ${BEATNAME}.full.yml # Update fields echo "Update fields" diff --git a/metricbeat/Makefile b/metricbeat/Makefile index 125226ee60d4..e885fff05548 100644 --- a/metricbeat/Makefile +++ b/metricbeat/Makefile @@ -39,7 +39,7 @@ docs: .PHONY: configs configs: python ${ES_BEATS}/metricbeat/scripts/config_collector.py $(PWD) > etc/beat.yml - python ${ES_BEATS}/metricbeat/scripts/config_collector.py --short $(PWD) > etc/beat.short.yml + python ${ES_BEATS}/metricbeat/scripts/config_collector.py --full $(PWD) > etc/beat.full.yml # This is called by the beats packer before building starts .PHONY: before-build diff --git a/metricbeat/docs/modules/apache.asciidoc b/metricbeat/docs/modules/apache.asciidoc index c3e2f9f3fd98..3b6dccd5c602 100644 --- a/metricbeat/docs/modules/apache.asciidoc +++ b/metricbeat/docs/modules/apache.asciidoc @@ -25,15 +25,6 @@ metricbeat.modules: # Apache hosts hosts: ["http://127.0.0.1/"] - - # Path to server status. Default server-status - #server_status_path: "server-status" - - # Username of hosts. Empty by default - #username: test - - # Password of hosts. Empty by default - #password: test123 ---- === MetricSets diff --git a/metricbeat/docs/modules/mysql.asciidoc b/metricbeat/docs/modules/mysql.asciidoc index cc4bc1e7d95d..29e54ca24130 100644 --- a/metricbeat/docs/modules/mysql.asciidoc +++ b/metricbeat/docs/modules/mysql.asciidoc @@ -35,12 +35,6 @@ metricbeat.modules: # Host DSN should be defined as "tcp(127.0.0.1:3306)/" # The username and password can either be set in the DSN or for all hosts in username and password config option hosts: ["root@tcp(127.0.0.1:3306)/"] - - # Username of hosts. Empty by default - #username: root - - # Password of hosts. Empty by default - #password: test ---- === MetricSets diff --git a/metricbeat/docs/modules/nginx.asciidoc b/metricbeat/docs/modules/nginx.asciidoc index 19e68d0a0894..5f4ad7a73f08 100644 --- a/metricbeat/docs/modules/nginx.asciidoc +++ b/metricbeat/docs/modules/nginx.asciidoc @@ -25,8 +25,6 @@ metricbeat.modules: # Nginx hosts hosts: ["http://127.0.0.1/"] - # Path to server status. Default server-status - #server_status_path: "server-status" ---- === MetricSets diff --git a/metricbeat/docs/modules/redis.asciidoc b/metricbeat/docs/modules/redis.asciidoc index 4959daeacf46..d442a64e6602 100644 --- a/metricbeat/docs/modules/redis.asciidoc +++ b/metricbeat/docs/modules/redis.asciidoc @@ -37,32 +37,6 @@ metricbeat.modules: # Redis hosts hosts: ["127.0.0.1:6379"] - - # Enabled defines if the module is enabled. Default: true - #enabled: true - - # Timeout after which time a metricset should return an error - # Timeout is by default defined as period, as a fetch of a metricset - # should never take longer then period, as otherwise calls can pile up. - #timeout: 1s - - # Optional fields to be added to each event - #fields: - # datacenter: west - - # Network type to be used for redis connection. Default: tcp - #network: tcp - - # Max number of concurrent connections. Default: 10 - #maxconn: 10 - - # Filters can be used to reduce the number of fields sent. - #filters: - # - include_fields: - # fields: ["stats"] - - # Redis AUTH password. Empty by default. - #password: foobared ---- === MetricSets diff --git a/metricbeat/etc/beat.full.yml b/metricbeat/etc/beat.full.yml new file mode 100644 index 000000000000..3f149a88dda2 --- /dev/null +++ b/metricbeat/etc/beat.full.yml @@ -0,0 +1,108 @@ +########################## Metricbeat Configuration ########################### + +# This file is a full configuration example documenting all non-deprecated +# options in comments. For a shorter configuration example, that contains only +# the most common options, please see metricbeat.yml in the same directory. +# +# You can find the full configuration reference here: +# https://www.elastic.co/guide/en/beats/metricbeat/index.html + +#========================== Modules configuration ============================ +metricbeat.modules: + +#---------------------------- Apache Status Module --------------------------- +- module: apache + metricsets: ["status"] + enabled: true + period: 1s + + # Apache hosts + hosts: ["http://127.0.0.1/"] + + # Path to server status. Default server-status + #server_status_path: "server-status" + + # Username of hosts. Empty by default + #username: test + + # Password of hosts. Empty by default + #password: test123 + +#---------------------------- MySQL Status Module ---------------------------- +- module: mysql + metricsets: ["status"] + enabled: true + period: 2s + + # Host DSN should be defined as "tcp(127.0.0.1:3306)/" + # The username and password can either be set in the DSN or for all hosts in username and password config option + hosts: ["root@tcp(127.0.0.1:3306)/"] + + # Username of hosts. Empty by default + #username: root + + # Password of hosts. Empty by default + #password: test + +#---------------------------- Nginx Status Module ---------------------------- +- module: nginx + metricsets: ["stubstatus"] + enabled: true + period: 1s + + # Nginx hosts + hosts: ["http://127.0.0.1/"] + + # Path to server status. Default server-status + #server_status_path: "server-status" + +#---------------------------- Redis Status Module ---------------------------- +- module: redis + metricsets: ["info"] + enabled: true + period: 1s + + # Redis hosts + hosts: ["127.0.0.1:6379"] + + # Enabled defines if the module is enabled. Default: true + #enabled: true + + # Timeout after which time a metricset should return an error + # Timeout is by default defined as period, as a fetch of a metricset + # should never take longer then period, as otherwise calls can pile up. + #timeout: 1s + + # Optional fields to be added to each event + #fields: + # datacenter: west + + # Network type to be used for redis connection. Default: tcp + #network: tcp + + # Max number of concurrent connections. Default: 10 + #maxconn: 10 + + # Filters can be used to reduce the number of fields sent. + #filters: + # - include_fields: + # fields: ["stats"] + + # Redis AUTH password. Empty by default. + #password: foobared + +#---------------------------- System Status Module --------------------------- +- module: system + metricsets: ["cpu", "cores", "filesystem", "fsstats", "memory", "process"] + enabled: true + period: 2s + processes: ['.*'] + +#-------------------------- Zookeeper Status Module -------------------------- +- module: zookeeper + metricsets: ["mntr"] + enabled: true + period: 5s + hosts: ["localhost:2181"] + + diff --git a/metricbeat/etc/beat.yml b/metricbeat/etc/beat.yml index 389919cc9fc3..334bbde7497f 100644 --- a/metricbeat/etc/beat.yml +++ b/metricbeat/etc/beat.yml @@ -1,8 +1,8 @@ -########################## Metricbeat Configuration ########################### +###################### Metricbeat Configuration Example ####################### -# This file is a full configuration example documenting all non-deprecated -# options in comments. For a shorter configuration example, that contains only -# the most common options, please see metricbeat.short.yml in the same directory. +# This file is an example configuration file highlighting only the most common +# options. The metricbeat.full.yml file from the same directory contains all the +# supported options with more comments. You can use it as a reference. # # You can find the full configuration reference here: # https://www.elastic.co/guide/en/beats/metricbeat/index.html @@ -19,15 +19,6 @@ metricbeat.modules: # Apache hosts hosts: ["http://127.0.0.1/"] - # Path to server status. Default server-status - #server_status_path: "server-status" - - # Username of hosts. Empty by default - #username: test - - # Password of hosts. Empty by default - #password: test123 - #---------------------------- MySQL Status Module ---------------------------- - module: mysql metricsets: ["status"] @@ -38,12 +29,6 @@ metricbeat.modules: # The username and password can either be set in the DSN or for all hosts in username and password config option hosts: ["root@tcp(127.0.0.1:3306)/"] - # Username of hosts. Empty by default - #username: root - - # Password of hosts. Empty by default - #password: test - #---------------------------- Nginx Status Module ---------------------------- - module: nginx metricsets: ["stubstatus"] @@ -53,8 +38,6 @@ metricbeat.modules: # Nginx hosts hosts: ["http://127.0.0.1/"] - # Path to server status. Default server-status - #server_status_path: "server-status" #---------------------------- Redis Status Module ---------------------------- - module: redis @@ -65,32 +48,6 @@ metricbeat.modules: # Redis hosts hosts: ["127.0.0.1:6379"] - # Enabled defines if the module is enabled. Default: true - #enabled: true - - # Timeout after which time a metricset should return an error - # Timeout is by default defined as period, as a fetch of a metricset - # should never take longer then period, as otherwise calls can pile up. - #timeout: 1s - - # Optional fields to be added to each event - #fields: - # datacenter: west - - # Network type to be used for redis connection. Default: tcp - #network: tcp - - # Max number of concurrent connections. Default: 10 - #maxconn: 10 - - # Filters can be used to reduce the number of fields sent. - #filters: - # - include_fields: - # fields: ["stats"] - - # Redis AUTH password. Empty by default. - #password: foobared - #---------------------------- System Status Module --------------------------- - module: system metricsets: ["cpu", "cores", "filesystem", "fsstats", "memory", "process"] @@ -98,11 +55,4 @@ metricbeat.modules: period: 2s processes: ['.*'] -#-------------------------- Zookeeper Status Module -------------------------- -- module: zookeeper - metricsets: ["mntr"] - enabled: true - period: 5s - hosts: ["localhost:2181"] - diff --git a/metricbeat/metricbeat.full.yml b/metricbeat/metricbeat.full.yml new file mode 100644 index 000000000000..d0fa53a21ed8 --- /dev/null +++ b/metricbeat/metricbeat.full.yml @@ -0,0 +1,372 @@ +########################## Metricbeat Configuration ########################### + +# This file is a full configuration example documenting all non-deprecated +# options in comments. For a shorter configuration example, that contains only +# the most common options, please see metricbeat.yml in the same directory. +# +# You can find the full configuration reference here: +# https://www.elastic.co/guide/en/beats/metricbeat/index.html + +#========================== Modules configuration ============================ +metricbeat.modules: + +#---------------------------- Apache Status Module --------------------------- +- module: apache + metricsets: ["status"] + enabled: true + period: 1s + + # Apache hosts + hosts: ["http://127.0.0.1/"] + + # Path to server status. Default server-status + #server_status_path: "server-status" + + # Username of hosts. Empty by default + #username: test + + # Password of hosts. Empty by default + #password: test123 + +#---------------------------- MySQL Status Module ---------------------------- +- module: mysql + metricsets: ["status"] + enabled: true + period: 2s + + # Host DSN should be defined as "tcp(127.0.0.1:3306)/" + # The username and password can either be set in the DSN or for all hosts in username and password config option + hosts: ["root@tcp(127.0.0.1:3306)/"] + + # Username of hosts. Empty by default + #username: root + + # Password of hosts. Empty by default + #password: test + +#---------------------------- Nginx Status Module ---------------------------- +- module: nginx + metricsets: ["stubstatus"] + enabled: true + period: 1s + + # Nginx hosts + hosts: ["http://127.0.0.1/"] + + # Path to server status. Default server-status + #server_status_path: "server-status" + +#---------------------------- Redis Status Module ---------------------------- +- module: redis + metricsets: ["info"] + enabled: true + period: 1s + + # Redis hosts + hosts: ["127.0.0.1:6379"] + + # Enabled defines if the module is enabled. Default: true + #enabled: true + + # Timeout after which time a metricset should return an error + # Timeout is by default defined as period, as a fetch of a metricset + # should never take longer then period, as otherwise calls can pile up. + #timeout: 1s + + # Optional fields to be added to each event + #fields: + # datacenter: west + + # Network type to be used for redis connection. Default: tcp + #network: tcp + + # Max number of concurrent connections. Default: 10 + #maxconn: 10 + + # Filters can be used to reduce the number of fields sent. + #filters: + # - include_fields: + # fields: ["stats"] + + # Redis AUTH password. Empty by default. + #password: foobared + +#---------------------------- System Status Module --------------------------- +- module: system + metricsets: ["cpu", "cores", "filesystem", "fsstats", "memory", "process"] + enabled: true + period: 2s + processes: ['.*'] + +#-------------------------- Zookeeper Status Module -------------------------- +- module: zookeeper + metricsets: ["mntr"] + enabled: true + period: 5s + hosts: ["localhost:2181"] + + + +#================================ General ===================================== + +# The name of the shipper that publishes the network data. It can be used to group +# all the transactions sent by a single shipper in the web interface. +# If this options is not defined, the hostname is used. +#name: + +# The tags of the shipper are included in their own field with each +# transaction published. Tags make it easy to group servers by different +# logical properties. +#tags: ["service-X", "web-tier"] + +# Optional fields that you can specify to add additional information to the +# output. Fields can be scalar values, arrays, dictionaries, or any nested +# combination of these. +#fields: +# env: staging + +# If this option is set to true, the custom fields are stored as top-level +# fields in the output document instead of being grouped under a fields +# sub-dictionary. Default is false. +#fields_under_root: false + +# Uncomment the following if you want to ignore transactions created +# by the server on which the shipper is installed. This option is useful +# to remove duplicates if shippers are installed on multiple servers. +#ignore_outgoing: true + +# How often (in seconds) shippers are publishing their IPs to the topology map. +# The default is 10 seconds. +#refresh_topology_freq: 10 + +# Expiration time (in seconds) of the IPs published by a shipper to the topology map. +# All the IPs will be deleted afterwards. Note, that the value must be higher than +# refresh_topology_freq. The default is 15 seconds. +#topology_expire: 15 + +# Internal queue size for single events in processing pipeline +#queue_size: 1000 + +# Sets the maximum number of CPUs that can be executing simultaneously. The +# default is the number of logical CPUs available in the system. +#max_procs: + +#================================ Outputs ===================================== + +# Configure what outputs to use when sending the data collected by the beat. +# Multiple outputs may be used. + +#-------------------------- Elasticsearch output ------------------------------ +output.elasticsearch: + # Array of hosts to connect to. + # Scheme and port can be left out and will be set to the default (http and 9200) + # In case you specify and additional path, the scheme is required: http://localhost:9200/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 + hosts: ["localhost:9200"] + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "admin" + #password: "s3cr3t" + + # Dictionary of HTTP parameters to pass within the url with index operations. + #parameters: + #param1: value1 + #param2: value2 + + # Number of workers per Elasticsearch host. + #worker: 1 + + # Optional index name. The default is "metricbeat" and generates + # [metricbeat-]YYYY.MM.DD keys. + #index: "metricbeat" + + # Optional HTTP Path + #path: "/elasticsearch" + + # Proxy server url + #proxy_url: http://proxy:3128 + + # The number of times a particular Elasticsearch index operation is attempted. If + # the indexing operation doesn't succeed after this many retries, the events are + # dropped. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Elasticsearch bulk API index request. + # The default is 50. + #bulk_max_size: 50 + + # Configure http request timeout before failing an request to Elasticsearch. + #timeout: 90 + + # The number of seconds to wait for new events between two bulk API index requests. + # If `bulk_max_size` is reached before this interval expires, addition bulk index + # requests are made. + #flush_interval: 1 + + # Boolean that sets if the topology is kept in Elasticsearch. The default is + # false. This option makes sense only for Packetbeat. + #save_topology: false + + # The time to live in seconds for the topology information that is stored in + # Elasticsearch. The default is 15 seconds. + #topology_expire: 15 + + # A template is used to set the mapping in Elasticsearch + # By default template loading is enabled and the template is loaded. + # These settings can be adjusted to load your own template or overwrite existing ones + + # Template name. By default the template name is metricbeat. + template.name: "metricbeat" + + # Path to template file + template.path: "metricbeat.template.json" + + # Overwrite existing template + template.overwrite: false + + # TLS configuration. By default is off. + # List of root certificates for HTTPS server verifications + #tls.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for TLS client authentication + #tls.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #tls.certificate_key: "/etc/pki/client/cert.key" + + # Controls whether the client verifies server certificates and host name. + # If insecure is set to true, all server host names and certificates will be + # accepted. In this mode TLS based connections are susceptible to + # man-in-the-middle attacks. Use only for testing. + #tls.insecure: true + + # Configure cipher suites to be used for TLS connections + #tls.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #tls.curve_types: [] + + # Configure minimum TLS version allowed for connection to logstash + #tls.min_version: 1.0 + + # Configure maximum TLS version allowed for connection to logstash + #tls.max_version: 1.2 + + +#----------------------------- Logstash output -------------------------------- +#output.logstash: + # The Logstash hosts + #hosts: ["localhost:5044"] + + # Number of workers per Logstash host. + #worker: 1 + + # Set gzip compression level. + #compression_level: 3 + + # Optional load balance the events between the Logstash hosts + #loadbalance: true + + # Optional index name. The default index name is set to name of the beat + # in all lowercase. + #index: metricbeat + + # SOCKS5 proxy server URL + #proxy_url: socks5://user:password@socks5-server:2233 + + # Resolve names locally when using a proxy server. Defaults to false. + #proxy_use_local_resolver: false + + # Optional TLS. By default is off. + # List of root certificates for HTTPS server verifications + #tls.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for TLS client authentication + #tls.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #tls.certificate_key: "/etc/pki/client/cert.key" + + # Controls whether the client verifies server certificates and host name. + # If insecure is set to true, all server host names and certificates will be + # accepted. In this mode TLS based connections are susceptible to + # man-in-the-middle attacks. Use only for testing. + #tls.insecure: true + + # Configure cipher suites to be used for TLS connections + #tls.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #tls.curve_types: [] + + +#------------------------------- File output ---------------------------------- +#output.file: + # Path to the directory where to save the generated files. The option is mandatory. + #path: "/tmp/metricbeat" + + # Name of the generated files. The default is `metricbeat` and it generates files: `metricbeat`, `metricbeat.1`, `metricbeat.2`, etc. + #filename: metricbeat + + # Maximum size in kilobytes of each file. When this size is reached, the files are + # rotated. The default value is 10240 kB. + #rotate_every_kb: 10000 + + # Maximum number of files under path. When this number of files is reached, the + # oldest file is deleted and the rest are shifted from last to first. The default + # is 7 files. + #number_of_files: 7 + + +#----------------------------- Console output --------------------------------- +#output.console: + # Pretty print json event + #pretty: false + +#================================ Logging ===================================== +# There are three options for the log output: syslog, file, stderr. +# Under Windows systems, the log files are per default sent to the file output, +# under all other system per default to syslog. + +# Sets log level. The default log level is error. +# Available log levels are: critical, error, warning, info, debug +#logging.level: error + +# Enable debug output for selected components. To enable all selectors use ["*"] +# Other available selectors are beat, publish, service +# Multiple selectors can be chained. +#logging.selectors: [ ] + +# Send all logging output to syslog. The default is false. +#logging.to_syslog: true + +# Logging to rotating files files. Set logging.to_files to false to disable logging to +# files. +logging.to_files: true +logging.files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/mybeat + + # The name of the files where the logs are written to. + #name: mybeat + + # Configure log file size limit. If limit is reached, log file will be + # automatically rotated + #rotateeverybytes: 10485760 # = 10MB + + # Number of rotated log files to keep. Oldest files will be deleted first. + #keepfiles: 7 + +#================================ Filters ===================================== + +# This section defines a list of filtering rules that are applied one by one starting with the +# exported event: +# event -> filter1 -> event1 -> filter2 ->event2 ... +# Supported actions: drop_fields, drop_event, include_fields +#filters: +# - drop_fields: +# equals: +# status: OK +# fields: [ ] diff --git a/metricbeat/metricbeat.short.yml b/metricbeat/metricbeat.short.yml deleted file mode 100644 index 2da6c8c244bf..000000000000 --- a/metricbeat/metricbeat.short.yml +++ /dev/null @@ -1,113 +0,0 @@ -########################## Metricbeat Configuration ########################### - -# This file is a full configuration example documenting all non-deprecated -# options in comments. For a shorter configuration example, that contains only -# the most common options, please see metricbeat.short.yml in the same directory. -# -# You can find the full configuration reference here: -# https://www.elastic.co/guide/en/beats/metricbeat/index.html - -#========================== Modules configuration ============================ -metricbeat.modules: - -#---------------------------- Apache Status Module --------------------------- -- module: apache - metricsets: ["status"] - enabled: true - period: 1s - - # Apache hosts - hosts: ["http://127.0.0.1/"] - -#---------------------------- MySQL Status Module ---------------------------- -- module: mysql - metricsets: ["status"] - enabled: true - period: 2s - - # Host DSN should be defined as "tcp(127.0.0.1:3306)/" - # The username and password can either be set in the DSN or for all hosts in username and password config option - hosts: ["root@tcp(127.0.0.1:3306)/"] - -#---------------------------- Nginx Status Module ---------------------------- -- module: nginx - metricsets: ["stubstatus"] - enabled: true - period: 1s - - # Nginx hosts - hosts: ["http://127.0.0.1/"] - - -#---------------------------- Redis Status Module ---------------------------- -- module: redis - metricsets: ["info"] - enabled: true - period: 1s - - # Redis hosts - hosts: ["127.0.0.1:6379"] - -#---------------------------- System Status Module --------------------------- -- module: system - metricsets: ["cpu", "cores", "filesystem", "fsstats", "memory", "process"] - enabled: true - period: 2s - processes: ['.*'] - - - -#================================ General ===================================== - -# The name of the shipper that publishes the network data. It can be used to group -# all the transactions sent by a single shipper in the web interface. -#name: - -# The tags of the shipper are included in their own field with each -# transaction published. -#tags: ["service-X", "web-tier"] - -# Optional fields that you can specify to add additional information to the -# output. -#fields: -# env: staging - -#================================ Outputs ===================================== - -# Configure what outputs to use when sending the data collected by the beat. -# Multiple outputs may be used. - -#-------------------------- Elasticsearch output ------------------------------ -output.elasticsearch: - # Array of hosts to connect to. - hosts: ["localhost:9200"] - - # Template name. By default the template name is metricbeat. - template.name: "metricbeat" - - # Path to template file - template.path: "metricbeat.template.json" - - # Overwrite existing template - template.overwrite: false - -#----------------------------- Logstash output -------------------------------- -#output.logstash: - # The Logstash hosts - #hosts: ["localhost:5044"] - - # Optional TLS. By default is off. - # List of root certificates for HTTPS server verifications - #tls.certificate_authorities: ["/etc/pki/root/ca.pem"] - - # Certificate for TLS client authentication - #tls.certificate: "/etc/pki/client/cert.pem" - - # Client Certificate Key - #tls.certificate_key: "/etc/pki/client/cert.key" - -#================================ Logging ===================================== - -# Sets log level. The default log level is error. -# Available log levels are: critical, error, warning, info, debug -#logging.level: error diff --git a/metricbeat/metricbeat.yml b/metricbeat/metricbeat.yml index 2b7064403bc5..5393d98b2c23 100644 --- a/metricbeat/metricbeat.yml +++ b/metricbeat/metricbeat.yml @@ -1,8 +1,8 @@ -########################## Metricbeat Configuration ########################### +###################### Metricbeat Configuration Example ####################### -# This file is a full configuration example documenting all non-deprecated -# options in comments. For a shorter configuration example, that contains only -# the most common options, please see metricbeat.short.yml in the same directory. +# This file is an example configuration file highlighting only the most common +# options. The metricbeat.full.yml file from the same directory contains all the +# supported options with more comments. You can use it as a reference. # # You can find the full configuration reference here: # https://www.elastic.co/guide/en/beats/metricbeat/index.html @@ -19,15 +19,6 @@ metricbeat.modules: # Apache hosts hosts: ["http://127.0.0.1/"] - # Path to server status. Default server-status - #server_status_path: "server-status" - - # Username of hosts. Empty by default - #username: test - - # Password of hosts. Empty by default - #password: test123 - #---------------------------- MySQL Status Module ---------------------------- - module: mysql metricsets: ["status"] @@ -38,12 +29,6 @@ metricbeat.modules: # The username and password can either be set in the DSN or for all hosts in username and password config option hosts: ["root@tcp(127.0.0.1:3306)/"] - # Username of hosts. Empty by default - #username: root - - # Password of hosts. Empty by default - #password: test - #---------------------------- Nginx Status Module ---------------------------- - module: nginx metricsets: ["stubstatus"] @@ -53,8 +38,6 @@ metricbeat.modules: # Nginx hosts hosts: ["http://127.0.0.1/"] - # Path to server status. Default server-status - #server_status_path: "server-status" #---------------------------- Redis Status Module ---------------------------- - module: redis @@ -65,32 +48,6 @@ metricbeat.modules: # Redis hosts hosts: ["127.0.0.1:6379"] - # Enabled defines if the module is enabled. Default: true - #enabled: true - - # Timeout after which time a metricset should return an error - # Timeout is by default defined as period, as a fetch of a metricset - # should never take longer then period, as otherwise calls can pile up. - #timeout: 1s - - # Optional fields to be added to each event - #fields: - # datacenter: west - - # Network type to be used for redis connection. Default: tcp - #network: tcp - - # Max number of concurrent connections. Default: 10 - #maxconn: 10 - - # Filters can be used to reduce the number of fields sent. - #filters: - # - include_fields: - # fields: ["stats"] - - # Redis AUTH password. Empty by default. - #password: foobared - #---------------------------- System Status Module --------------------------- - module: system metricsets: ["cpu", "cores", "filesystem", "fsstats", "memory", "process"] @@ -98,59 +55,23 @@ metricbeat.modules: period: 2s processes: ['.*'] -#-------------------------- Zookeeper Status Module -------------------------- -- module: zookeeper - metricsets: ["mntr"] - enabled: true - period: 5s - hosts: ["localhost:2181"] - #================================ General ===================================== # The name of the shipper that publishes the network data. It can be used to group # all the transactions sent by a single shipper in the web interface. -# If this options is not defined, the hostname is used. #name: # The tags of the shipper are included in their own field with each -# transaction published. Tags make it easy to group servers by different -# logical properties. +# transaction published. #tags: ["service-X", "web-tier"] # Optional fields that you can specify to add additional information to the -# output. Fields can be scalar values, arrays, dictionaries, or any nested -# combination of these. +# output. #fields: # env: staging -# If this option is set to true, the custom fields are stored as top-level -# fields in the output document instead of being grouped under a fields -# sub-dictionary. Default is false. -#fields_under_root: false - -# Uncomment the following if you want to ignore transactions created -# by the server on which the shipper is installed. This option is useful -# to remove duplicates if shippers are installed on multiple servers. -#ignore_outgoing: true - -# How often (in seconds) shippers are publishing their IPs to the topology map. -# The default is 10 seconds. -#refresh_topology_freq: 10 - -# Expiration time (in seconds) of the IPs published by a shipper to the topology map. -# All the IPs will be deleted afterwards. Note, that the value must be higher than -# refresh_topology_freq. The default is 15 seconds. -#topology_expire: 15 - -# Internal queue size for single events in processing pipeline -#queue_size: 1000 - -# Sets the maximum number of CPUs that can be executing simultaneously. The -# default is the number of logical CPUs available in the system. -#max_procs: - #================================ Outputs ===================================== # Configure what outputs to use when sending the data collected by the beat. @@ -159,63 +80,8 @@ metricbeat.modules: #-------------------------- Elasticsearch output ------------------------------ output.elasticsearch: # Array of hosts to connect to. - # Scheme and port can be left out and will be set to the default (http and 9200) - # In case you specify and additional path, the scheme is required: http://localhost:9200/path - # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 hosts: ["localhost:9200"] - # Optional protocol and basic auth credentials. - #protocol: "https" - #username: "admin" - #password: "s3cr3t" - - # Dictionary of HTTP parameters to pass within the url with index operations. - #parameters: - #param1: value1 - #param2: value2 - - # Number of workers per Elasticsearch host. - #worker: 1 - - # Optional index name. The default is "metricbeat" and generates - # [metricbeat-]YYYY.MM.DD keys. - #index: "metricbeat" - - # Optional HTTP Path - #path: "/elasticsearch" - - # Proxy server url - #proxy_url: http://proxy:3128 - - # The number of times a particular Elasticsearch index operation is attempted. If - # the indexing operation doesn't succeed after this many retries, the events are - # dropped. The default is 3. - #max_retries: 3 - - # The maximum number of events to bulk in a single Elasticsearch bulk API index request. - # The default is 50. - #bulk_max_size: 50 - - # Configure http request timeout before failing an request to Elasticsearch. - #timeout: 90 - - # The number of seconds to wait for new events between two bulk API index requests. - # If `bulk_max_size` is reached before this interval expires, addition bulk index - # requests are made. - #flush_interval: 1 - - # Boolean that sets if the topology is kept in Elasticsearch. The default is - # false. This option makes sense only for Packetbeat. - #save_topology: false - - # The time to live in seconds for the topology information that is stored in - # Elasticsearch. The default is 15 seconds. - #topology_expire: 15 - - # A template is used to set the mapping in Elasticsearch - # By default template loading is enabled and the template is loaded. - # These settings can be adjusted to load your own template or overwrite existing ones - # Template name. By default the template name is metricbeat. template.name: "metricbeat" @@ -225,59 +91,11 @@ output.elasticsearch: # Overwrite existing template template.overwrite: false - # TLS configuration. By default is off. - # List of root certificates for HTTPS server verifications - #tls.certificate_authorities: ["/etc/pki/root/ca.pem"] - - # Certificate for TLS client authentication - #tls.certificate: "/etc/pki/client/cert.pem" - - # Client Certificate Key - #tls.certificate_key: "/etc/pki/client/cert.key" - - # Controls whether the client verifies server certificates and host name. - # If insecure is set to true, all server host names and certificates will be - # accepted. In this mode TLS based connections are susceptible to - # man-in-the-middle attacks. Use only for testing. - #tls.insecure: true - - # Configure cipher suites to be used for TLS connections - #tls.cipher_suites: [] - - # Configure curve types for ECDHE based cipher suites - #tls.curve_types: [] - - # Configure minimum TLS version allowed for connection to logstash - #tls.min_version: 1.0 - - # Configure maximum TLS version allowed for connection to logstash - #tls.max_version: 1.2 - - #----------------------------- Logstash output -------------------------------- #output.logstash: # The Logstash hosts #hosts: ["localhost:5044"] - # Number of workers per Logstash host. - #worker: 1 - - # Set gzip compression level. - #compression_level: 3 - - # Optional load balance the events between the Logstash hosts - #loadbalance: true - - # Optional index name. The default index name is set to name of the beat - # in all lowercase. - #index: metricbeat - - # SOCKS5 proxy server URL - #proxy_url: socks5://user:password@socks5-server:2233 - - # Resolve names locally when using a proxy server. Defaults to false. - #proxy_use_local_resolver: false - # Optional TLS. By default is off. # List of root certificates for HTTPS server verifications #tls.certificate_authorities: ["/etc/pki/root/ca.pem"] @@ -288,85 +106,8 @@ output.elasticsearch: # Client Certificate Key #tls.certificate_key: "/etc/pki/client/cert.key" - # Controls whether the client verifies server certificates and host name. - # If insecure is set to true, all server host names and certificates will be - # accepted. In this mode TLS based connections are susceptible to - # man-in-the-middle attacks. Use only for testing. - #tls.insecure: true - - # Configure cipher suites to be used for TLS connections - #tls.cipher_suites: [] - - # Configure curve types for ECDHE based cipher suites - #tls.curve_types: [] - - -#------------------------------- File output ---------------------------------- -#output.file: - # Path to the directory where to save the generated files. The option is mandatory. - #path: "/tmp/metricbeat" - - # Name of the generated files. The default is `metricbeat` and it generates files: `metricbeat`, `metricbeat.1`, `metricbeat.2`, etc. - #filename: metricbeat - - # Maximum size in kilobytes of each file. When this size is reached, the files are - # rotated. The default value is 10240 kB. - #rotate_every_kb: 10000 - - # Maximum number of files under path. When this number of files is reached, the - # oldest file is deleted and the rest are shifted from last to first. The default - # is 7 files. - #number_of_files: 7 - - -#----------------------------- Console output --------------------------------- -#output.console: - # Pretty print json event - #pretty: false - #================================ Logging ===================================== -# There are three options for the log output: syslog, file, stderr. -# Under Windows systems, the log files are per default sent to the file output, -# under all other system per default to syslog. # Sets log level. The default log level is error. # Available log levels are: critical, error, warning, info, debug #logging.level: error - -# Enable debug output for selected components. To enable all selectors use ["*"] -# Other available selectors are beat, publish, service -# Multiple selectors can be chained. -#logging.selectors: [ ] - -# Send all logging output to syslog. The default is false. -#logging.to_syslog: true - -# Logging to rotating files files. Set logging.to_files to false to disable logging to -# files. -logging.to_files: true -logging.files: - # Configure the path where the logs are written. The default is the logs directory - # under the home path (the binary location). - #path: /var/log/mybeat - - # The name of the files where the logs are written to. - #name: mybeat - - # Configure log file size limit. If limit is reached, log file will be - # automatically rotated - #rotateeverybytes: 10485760 # = 10MB - - # Number of rotated log files to keep. Oldest files will be deleted first. - #keepfiles: 7 - -#================================ Filters ===================================== - -# This section defines a list of filtering rules that are applied one by one starting with the -# exported event: -# event -> filter1 -> event1 -> filter2 ->event2 ... -# Supported actions: drop_fields, drop_event, include_fields -#filters: -# - drop_fields: -# equals: -# status: OK -# fields: [ ] diff --git a/metricbeat/module/apache/_beat/config.full.yml b/metricbeat/module/apache/_beat/config.full.yml new file mode 100644 index 000000000000..a70e43dce5c3 --- /dev/null +++ b/metricbeat/module/apache/_beat/config.full.yml @@ -0,0 +1,16 @@ +- module: apache + metricsets: ["status"] + enabled: true + period: 1s + + # Apache hosts + hosts: ["http://127.0.0.1/"] + + # Path to server status. Default server-status + #server_status_path: "server-status" + + # Username of hosts. Empty by default + #username: test + + # Password of hosts. Empty by default + #password: test123 diff --git a/metricbeat/module/apache/_beat/config.short.yml b/metricbeat/module/apache/_beat/config.short.yml deleted file mode 100644 index 2697d6df2452..000000000000 --- a/metricbeat/module/apache/_beat/config.short.yml +++ /dev/null @@ -1,7 +0,0 @@ -- module: apache - metricsets: ["status"] - enabled: true - period: 1s - - # Apache hosts - hosts: ["http://127.0.0.1/"] diff --git a/metricbeat/module/apache/_beat/config.yml b/metricbeat/module/apache/_beat/config.yml index a70e43dce5c3..2697d6df2452 100644 --- a/metricbeat/module/apache/_beat/config.yml +++ b/metricbeat/module/apache/_beat/config.yml @@ -5,12 +5,3 @@ # Apache hosts hosts: ["http://127.0.0.1/"] - - # Path to server status. Default server-status - #server_status_path: "server-status" - - # Username of hosts. Empty by default - #username: test - - # Password of hosts. Empty by default - #password: test123 diff --git a/metricbeat/module/mysql/_beat/config.short.yml b/metricbeat/module/mysql/_beat/config.full.yml similarity index 70% rename from metricbeat/module/mysql/_beat/config.short.yml rename to metricbeat/module/mysql/_beat/config.full.yml index 9926d504a562..5013b5869e33 100644 --- a/metricbeat/module/mysql/_beat/config.short.yml +++ b/metricbeat/module/mysql/_beat/config.full.yml @@ -6,3 +6,9 @@ # Host DSN should be defined as "tcp(127.0.0.1:3306)/" # The username and password can either be set in the DSN or for all hosts in username and password config option hosts: ["root@tcp(127.0.0.1:3306)/"] + + # Username of hosts. Empty by default + #username: root + + # Password of hosts. Empty by default + #password: test diff --git a/metricbeat/module/mysql/_beat/config.yml b/metricbeat/module/mysql/_beat/config.yml index 5013b5869e33..9926d504a562 100644 --- a/metricbeat/module/mysql/_beat/config.yml +++ b/metricbeat/module/mysql/_beat/config.yml @@ -6,9 +6,3 @@ # Host DSN should be defined as "tcp(127.0.0.1:3306)/" # The username and password can either be set in the DSN or for all hosts in username and password config option hosts: ["root@tcp(127.0.0.1:3306)/"] - - # Username of hosts. Empty by default - #username: root - - # Password of hosts. Empty by default - #password: test diff --git a/metricbeat/module/nginx/_beat/config.short.yml b/metricbeat/module/nginx/_beat/config.full.yml similarity index 58% rename from metricbeat/module/nginx/_beat/config.short.yml rename to metricbeat/module/nginx/_beat/config.full.yml index 80c6c586a048..900ea34b7712 100644 --- a/metricbeat/module/nginx/_beat/config.short.yml +++ b/metricbeat/module/nginx/_beat/config.full.yml @@ -6,3 +6,5 @@ # Nginx hosts hosts: ["http://127.0.0.1/"] + # Path to server status. Default server-status + #server_status_path: "server-status" diff --git a/metricbeat/module/nginx/_beat/config.yml b/metricbeat/module/nginx/_beat/config.yml index 900ea34b7712..80c6c586a048 100644 --- a/metricbeat/module/nginx/_beat/config.yml +++ b/metricbeat/module/nginx/_beat/config.yml @@ -6,5 +6,3 @@ # Nginx hosts hosts: ["http://127.0.0.1/"] - # Path to server status. Default server-status - #server_status_path: "server-status" diff --git a/metricbeat/module/redis/_beat/config.full.yml b/metricbeat/module/redis/_beat/config.full.yml new file mode 100644 index 000000000000..157b7fa0b1f9 --- /dev/null +++ b/metricbeat/module/redis/_beat/config.full.yml @@ -0,0 +1,33 @@ +- module: redis + metricsets: ["info"] + enabled: true + period: 1s + + # Redis hosts + hosts: ["127.0.0.1:6379"] + + # Enabled defines if the module is enabled. Default: true + #enabled: true + + # Timeout after which time a metricset should return an error + # Timeout is by default defined as period, as a fetch of a metricset + # should never take longer then period, as otherwise calls can pile up. + #timeout: 1s + + # Optional fields to be added to each event + #fields: + # datacenter: west + + # Network type to be used for redis connection. Default: tcp + #network: tcp + + # Max number of concurrent connections. Default: 10 + #maxconn: 10 + + # Filters can be used to reduce the number of fields sent. + #filters: + # - include_fields: + # fields: ["stats"] + + # Redis AUTH password. Empty by default. + #password: foobared diff --git a/metricbeat/module/redis/_beat/config.short.yml b/metricbeat/module/redis/_beat/config.short.yml deleted file mode 100644 index 927480f37cf9..000000000000 --- a/metricbeat/module/redis/_beat/config.short.yml +++ /dev/null @@ -1,7 +0,0 @@ -- module: redis - metricsets: ["info"] - enabled: true - period: 1s - - # Redis hosts - hosts: ["127.0.0.1:6379"] diff --git a/metricbeat/module/redis/_beat/config.yml b/metricbeat/module/redis/_beat/config.yml index 157b7fa0b1f9..927480f37cf9 100644 --- a/metricbeat/module/redis/_beat/config.yml +++ b/metricbeat/module/redis/_beat/config.yml @@ -5,29 +5,3 @@ # Redis hosts hosts: ["127.0.0.1:6379"] - - # Enabled defines if the module is enabled. Default: true - #enabled: true - - # Timeout after which time a metricset should return an error - # Timeout is by default defined as period, as a fetch of a metricset - # should never take longer then period, as otherwise calls can pile up. - #timeout: 1s - - # Optional fields to be added to each event - #fields: - # datacenter: west - - # Network type to be used for redis connection. Default: tcp - #network: tcp - - # Max number of concurrent connections. Default: 10 - #maxconn: 10 - - # Filters can be used to reduce the number of fields sent. - #filters: - # - include_fields: - # fields: ["stats"] - - # Redis AUTH password. Empty by default. - #password: foobared diff --git a/metricbeat/scripts/config_collector.py b/metricbeat/scripts/config_collector.py index 9953bbca9e4a..00d929f9bda4 100644 --- a/metricbeat/scripts/config_collector.py +++ b/metricbeat/scripts/config_collector.py @@ -4,11 +4,11 @@ # Collects config for all modules -header = """########################## Metricbeat Configuration ########################### +header_full = """########################## Metricbeat Configuration ########################### # This file is a full configuration example documenting all non-deprecated # options in comments. For a shorter configuration example, that contains only -# the most common options, please see metricbeat.short.yml in the same directory. +# the most common options, please see metricbeat.yml in the same directory. # # You can find the full configuration reference here: # https://www.elastic.co/guide/en/beats/metricbeat/index.html @@ -21,7 +21,7 @@ header_short = """###################### Metricbeat Configuration Example ####################### # This file is an example configuration file highlighting only the most common -# options. The metricbeat.yml file from the same directory contains all the +# options. The metricbeat.full.yml file from the same directory contains all the # supported options with more comments. You can use it as a reference. # # You can find the full configuration reference here: @@ -33,13 +33,17 @@ """ -def collect(beat_path, short=False): +def collect(beat_path, full=False): base_dir = beat_path + "/module" path = os.path.abspath(base_dir) # yml file - config_yml = header + + if full: + config_yml = header_full + else: + config_yml = header_short # Iterate over all modules for module in os.listdir(base_dir): @@ -51,27 +55,26 @@ def collect(beat_path, short=False): # By default, short config is read if short is set short_config = True - # Check if short config exists - if short: - short_module_config = beat_path + "/config.short.yml" - if os.path.isfile(short_module_config): - module_configs = short_module_config + # Check if full config exists + if full: + full_module_config = beat_path + "/config.full.yml" + if os.path.isfile(full_module_config): + module_configs = full_module_config # Only check folders where config exists if not os.path.isfile(module_configs): continue - # Load title from fields.yml with open(beat_path + "/fields.yml") as f: fields = yaml.load(f.read()) title = fields[0]["title"] # Check if short config was disabled in fields.yml - if short and "short_config" in fields[0]: + if not full and "short_config" in fields[0]: short_config = fields[0]["short_config"] - if short and short_config == False: + if not full and short_config is False: continue config_yml += get_title_line(title) @@ -85,6 +88,7 @@ def collect(beat_path, short=False): # output string so it can be concatenated print config_yml + # Makes sure every title line is 79 + newline chars long def get_title_line(title): dashes = (79 - 10 - len(title)) / 2 @@ -101,10 +105,10 @@ def get_title_line(title): parser = argparse.ArgumentParser( description="Collects modules config") parser.add_argument("path", help="Path to the beat folder") - parser.add_argument("--short", action="store_true", - help="Collect the short versions") + parser.add_argument("--full", action="store_true", + help="Collect the full versions") args = parser.parse_args() beat_path = args.path - collect(beat_path, args.short) + collect(beat_path, args.full) diff --git a/packetbeat/etc/beat.full.yml b/packetbeat/etc/beat.full.yml new file mode 100644 index 000000000000..952139cb1be1 --- /dev/null +++ b/packetbeat/etc/beat.full.yml @@ -0,0 +1,176 @@ +###################### Packetbeat Configuration Example ####################### + +# This file is a full configuration example documenting all non-deprecated +# options in comments. For a shorter configuration example, that contains only +# the most common options, please see packetbeat.yml in the same directory. +# +# You can find the full configuration reference here: +# https://www.elastic.co/guide/en/beats/packetbeat/index.html + +#============================== Network device ================================ + +# Select the network interface to sniff the data. You can use the "any" +# keyword to sniff on all connected interfaces. +packetbeat.interfaces.device: any + +#================================== Flows ===================================== + +# Set network flow timeout. Flow is killed if no packet is received before being +# timed out. +packetbeat.flows.timeout: 30s + +# Configure reporting period. If set to -1, only killed flows will be reported +packetbeat.flows.period: 10s + +#========================== Transaction protocols ============================= + +packetbeat.protocols.icmp: + # Enable ICMPv4 and ICMPv6 monitoring. Default: false + enabled: true + +packetbeat.protocols.amqp: + # Configure the ports where to listen for AMQP traffic. You can disable + # the AMQP protocol by commenting out the list of ports. + ports: [5672] + # Truncate messages that are published and avoid huge messages being + # indexed. + # Default: 1000 + #max_body_length: 1000 + + # Hide the header fields in header frames. + # Default: false + #parse_headers: false + + # Hide the additional arguments of method frames. + # Default: false + #parse_arguments: false + + # Hide all methods relative to connection negociation between server and + # client. + # Default: true + #hide_connection_information: true + +packetbeat.protocols.dns: + # Configure the ports where to listen for DNS traffic. You can disable + # the DNS protocol by commenting out the list of ports. + ports: [53] + + # include_authorities controls whether or not the dns.authorities field + # (authority resource records) is added to messages. + # Default: false + include_authorities: true + # include_additionals controls whether or not the dns.additionals field + # (additional resource records) is added to messages. + # Default: false + include_additionals: true + + # send_request and send_response control whether or not the stringified DNS + # request and response message are added to the result. + # Nearly all data about the request/response is available in the dns.* + # fields, but this can be useful if you need visibility specifically + # into the request or the response. + # Default: false + # send_request: true + # send_response: true + +packetbeat.protocols.http: + # Configure the ports where to listen for HTTP traffic. You can disable + # the HTTP protocol by commenting out the list of ports. + ports: [80, 8080, 8000, 5000, 8002] + + # Uncomment the following to hide certain parameters in URL or forms attached + # to HTTP requests. The names of the parameters are case insensitive. + # The value of the parameters will be replaced with the 'xxxxx' string. + # This is generally useful for avoiding storing user passwords or other + # sensitive information. + # Only query parameters and top level form parameters are replaced. + # hide_keywords: ['pass', 'password', 'passwd'] + +packetbeat.protocols.memcache: + # Configure the ports where to listen for memcache traffic. You can disable + # the Memcache protocol by commenting out the list of ports. + ports: [11211] + + # Uncomment the parseunknown option to force the memcache text protocol parser + # to accept unknown commands. + # Note: All unknown commands MUST not contain any data parts! + # Default: false + # parseunknown: true + + # Update the maxvalue option to store the values - base64 encoded - in the + # json output. + # possible values: + # maxvalue: -1 # store all values (text based protocol multi-get) + # maxvalue: 0 # store no values at all + # maxvalue: N # store up to N values + # Default: 0 + # maxvalues: -1 + + # Use maxbytespervalue to limit the number of bytes to be copied per value element. + # Note: Values will be base64 encoded, so actual size in json document + # will be 4 times maxbytespervalue. + # Default: unlimited + # maxbytespervalue: 100 + + # UDP transaction timeout in milliseconds. + # Note: Quiet messages in UDP binary protocol will get response only in error case. + # The memcached analyzer will wait for udptransactiontimeout milliseconds + # before publishing quiet messages. Non quiet messages or quiet requests with + # error response will not have to wait for the timeout. + # Default: 200 + # udptransactiontimeout: 1000 + +packetbeat.protocols.mysql: + # Configure the ports where to listen for MySQL traffic. You can disable + # the MySQL protocol by commenting out the list of ports. + ports: [3306] + +packetbeat.protocols.pgsql: + # Configure the ports where to listen for Pgsql traffic. You can disable + # the Pgsql protocol by commenting out the list of ports. + ports: [5432] + +packetbeat.protocols.redis: + # Configure the ports where to listen for Redis traffic. You can disable + # the Redis protocol by commenting out the list of ports. + ports: [6379] + +packetbeat.protocols.thrift: + # Configure the ports where to listen for Thrift-RPC traffic. You can disable + # the Thrift-RPC protocol by commenting out the list of ports. + ports: [9090] + +packetbeat.protocols.mongodb: + # Configure the ports where to listen for MongoDB traffic. You can disable + # the MongoDB protocol by commenting out the list of ports. + ports: [27017] + +packetbeat.protocols.nfs: + # Configure the ports where to listen for NFS traffic. You can disable + # the NFS protocol by commenting out the list of ports. + ports: [2049] + +#=========================== Monitored processes ============================== + +# Configure the processes to be monitored and how to find them. If a process is +# monitored then Packetbeat attempts to use it's name to fill in the `proc` and +# `client_proc` fields. +# The processes can be found by searching their command line by a given string. +# +# Process matching is optional and can be enabled by uncommenting the following +# lines. +# +#packetbeat.procs: +# enabled: false +# monitored: +# - process: mysqld +# cmdline_grep: mysqld +# +# - process: pgsql +# cmdline_grep: postgres +# +# - process: nginx +# cmdline_grep: nginx +# +# - process: app +# cmdline_grep: gunicorn diff --git a/packetbeat/etc/beat.short.yml b/packetbeat/etc/beat.short.yml deleted file mode 100644 index af442db6a705..000000000000 --- a/packetbeat/etc/beat.short.yml +++ /dev/null @@ -1,89 +0,0 @@ -#################### Packetbeat Configuration Example ######################### - -# This file is an example configuration file highlighting only the most common -# options. The packetbeat.yml file from the same directory contains all the -# supported options with more comments. You can use it as a reference. -# -# You can find the full configuration reference here: -# https://www.elastic.co/guide/en/beats/packetbeat/index.html - -#============================== Network device ================================ - -# Select the network interface to sniff the data. You can use the "any" -# keyword to sniff on all connected interfaces. -packetbeat.interfaces.device: any - -#================================== Flows ===================================== - -# Comment out all options to disable flows reporting. - -# Set network flow timeout. Flow is killed if no packet is received before being -# timed out. -packetbeat.flows.timeout: 30s - -# Configure reporting period. If set to -1, only killed flows will be reported -packetbeat.flows.period: 10s - -#========================== Transaction protocols ============================= - -packetbeat.protocols.icmp: - # Enable ICMPv4 and ICMPv6 monitoring. Default: false - enabled: true - -packetbeat.protocols.amqp: - # Configure the ports where to listen for AMQP traffic. You can disable - # the AMQP protocol by commenting out the list of ports. - ports: [5672] - -packetbeat.protocols.dns: - # Configure the ports where to listen for DNS traffic. You can disable - # the DNS protocol by commenting out the list of ports. - ports: [53] - - # include_authorities controls whether or not the dns.authorities field - # (authority resource records) is added to messages. - include_authorities: true - - # include_additionals controls whether or not the dns.additionals field - # (additional resource records) is added to messages. - include_additionals: true - -packetbeat.protocols.http: - # Configure the ports where to listen for HTTP traffic. You can disable - # the HTTP protocol by commenting out the list of ports. - ports: [80, 8080, 8000, 5000, 8002] - -packetbeat.protocols.memcache: - # Configure the ports where to listen for memcache traffic. You can disable - # the Memcache protocol by commenting out the list of ports. - ports: [11211] - -packetbeat.protocols.mysql: - # Configure the ports where to listen for MySQL traffic. You can disable - # the MySQL protocol by commenting out the list of ports. - ports: [3306] - -packetbeat.protocols.pgsql: - # Configure the ports where to listen for Pgsql traffic. You can disable - # the Pgsql protocol by commenting out the list of ports. - ports: [5432] - -packetbeat.protocols.redis: - # Configure the ports where to listen for Redis traffic. You can disable - # the Redis protocol by commenting out the list of ports. - ports: [6379] - -packetbeat.protocols.thrift: - # Configure the ports where to listen for Thrift-RPC traffic. You can disable - # the Thrift-RPC protocol by commenting out the list of ports. - ports: [9090] - -packetbeat.protocols.mongodb: - # Configure the ports where to listen for MongoDB traffic. You can disable - # the MongoDB protocol by commenting out the list of ports. - ports: [27017] - -packetbeat.protocols.nfs: - # Configure the ports where to listen for NFS traffic. You can disable - # the NFS protocol by commenting out the list of ports. - ports: [2049] diff --git a/packetbeat/etc/beat.yml b/packetbeat/etc/beat.yml index 63c571426266..a055bb7c0d27 100644 --- a/packetbeat/etc/beat.yml +++ b/packetbeat/etc/beat.yml @@ -1,8 +1,8 @@ -###################### Packetbeat Configuration Example ####################### +#################### Packetbeat Configuration Example ######################### -# This file is a full configuration example documenting all non-deprecated -# options in comments. For a shorter configuration example, that contains only -# the most common options, please see packetbeat.short.yml in the same directory. +# This file is an example configuration file highlighting only the most common +# options. The packetbeat.full.yml file from the same directory contains all the +# supported options with more comments. You can use it as a reference. # # You can find the full configuration reference here: # https://www.elastic.co/guide/en/beats/packetbeat/index.html @@ -15,6 +15,8 @@ packetbeat.interfaces.device: any #================================== Flows ===================================== +# Comment out all options to disable flows reporting. + # Set network flow timeout. Flow is killed if no packet is received before being # timed out. packetbeat.flows.timeout: 30s @@ -32,23 +34,6 @@ packetbeat.protocols.amqp: # Configure the ports where to listen for AMQP traffic. You can disable # the AMQP protocol by commenting out the list of ports. ports: [5672] - # Truncate messages that are published and avoid huge messages being - # indexed. - # Default: 1000 - #max_body_length: 1000 - - # Hide the header fields in header frames. - # Default: false - #parse_headers: false - - # Hide the additional arguments of method frames. - # Default: false - #parse_arguments: false - - # Hide all methods relative to connection negociation between server and - # client. - # Default: true - #hide_connection_information: true packetbeat.protocols.dns: # Configure the ports where to listen for DNS traffic. You can disable @@ -57,69 +42,22 @@ packetbeat.protocols.dns: # include_authorities controls whether or not the dns.authorities field # (authority resource records) is added to messages. - # Default: false include_authorities: true + # include_additionals controls whether or not the dns.additionals field # (additional resource records) is added to messages. - # Default: false include_additionals: true - # send_request and send_response control whether or not the stringified DNS - # request and response message are added to the result. - # Nearly all data about the request/response is available in the dns.* - # fields, but this can be useful if you need visibility specifically - # into the request or the response. - # Default: false - # send_request: true - # send_response: true - packetbeat.protocols.http: # Configure the ports where to listen for HTTP traffic. You can disable # the HTTP protocol by commenting out the list of ports. ports: [80, 8080, 8000, 5000, 8002] - # Uncomment the following to hide certain parameters in URL or forms attached - # to HTTP requests. The names of the parameters are case insensitive. - # The value of the parameters will be replaced with the 'xxxxx' string. - # This is generally useful for avoiding storing user passwords or other - # sensitive information. - # Only query parameters and top level form parameters are replaced. - # hide_keywords: ['pass', 'password', 'passwd'] - packetbeat.protocols.memcache: # Configure the ports where to listen for memcache traffic. You can disable # the Memcache protocol by commenting out the list of ports. ports: [11211] - # Uncomment the parseunknown option to force the memcache text protocol parser - # to accept unknown commands. - # Note: All unknown commands MUST not contain any data parts! - # Default: false - # parseunknown: true - - # Update the maxvalue option to store the values - base64 encoded - in the - # json output. - # possible values: - # maxvalue: -1 # store all values (text based protocol multi-get) - # maxvalue: 0 # store no values at all - # maxvalue: N # store up to N values - # Default: 0 - # maxvalues: -1 - - # Use maxbytespervalue to limit the number of bytes to be copied per value element. - # Note: Values will be base64 encoded, so actual size in json document - # will be 4 times maxbytespervalue. - # Default: unlimited - # maxbytespervalue: 100 - - # UDP transaction timeout in milliseconds. - # Note: Quiet messages in UDP binary protocol will get response only in error case. - # The memcached analyzer will wait for udptransactiontimeout milliseconds - # before publishing quiet messages. Non quiet messages or quiet requests with - # error response will not have to wait for the timeout. - # Default: 200 - # udptransactiontimeout: 1000 - packetbeat.protocols.mysql: # Configure the ports where to listen for MySQL traffic. You can disable # the MySQL protocol by commenting out the list of ports. @@ -149,28 +87,3 @@ packetbeat.protocols.nfs: # Configure the ports where to listen for NFS traffic. You can disable # the NFS protocol by commenting out the list of ports. ports: [2049] - -#=========================== Monitored processes ============================== - -# Configure the processes to be monitored and how to find them. If a process is -# monitored then Packetbeat attempts to use it's name to fill in the `proc` and -# `client_proc` fields. -# The processes can be found by searching their command line by a given string. -# -# Process matching is optional and can be enabled by uncommenting the following -# lines. -# -#packetbeat.procs: -# enabled: false -# monitored: -# - process: mysqld -# cmdline_grep: mysqld -# -# - process: pgsql -# cmdline_grep: postgres -# -# - process: nginx -# cmdline_grep: nginx -# -# - process: app -# cmdline_grep: gunicorn diff --git a/packetbeat/packetbeat.full.yml b/packetbeat/packetbeat.full.yml new file mode 100644 index 000000000000..ce3a8eb4b5ea --- /dev/null +++ b/packetbeat/packetbeat.full.yml @@ -0,0 +1,440 @@ +###################### Packetbeat Configuration Example ####################### + +# This file is a full configuration example documenting all non-deprecated +# options in comments. For a shorter configuration example, that contains only +# the most common options, please see packetbeat.yml in the same directory. +# +# You can find the full configuration reference here: +# https://www.elastic.co/guide/en/beats/packetbeat/index.html + +#============================== Network device ================================ + +# Select the network interface to sniff the data. You can use the "any" +# keyword to sniff on all connected interfaces. +packetbeat.interfaces.device: any + +#================================== Flows ===================================== + +# Set network flow timeout. Flow is killed if no packet is received before being +# timed out. +packetbeat.flows.timeout: 30s + +# Configure reporting period. If set to -1, only killed flows will be reported +packetbeat.flows.period: 10s + +#========================== Transaction protocols ============================= + +packetbeat.protocols.icmp: + # Enable ICMPv4 and ICMPv6 monitoring. Default: false + enabled: true + +packetbeat.protocols.amqp: + # Configure the ports where to listen for AMQP traffic. You can disable + # the AMQP protocol by commenting out the list of ports. + ports: [5672] + # Truncate messages that are published and avoid huge messages being + # indexed. + # Default: 1000 + #max_body_length: 1000 + + # Hide the header fields in header frames. + # Default: false + #parse_headers: false + + # Hide the additional arguments of method frames. + # Default: false + #parse_arguments: false + + # Hide all methods relative to connection negociation between server and + # client. + # Default: true + #hide_connection_information: true + +packetbeat.protocols.dns: + # Configure the ports where to listen for DNS traffic. You can disable + # the DNS protocol by commenting out the list of ports. + ports: [53] + + # include_authorities controls whether or not the dns.authorities field + # (authority resource records) is added to messages. + # Default: false + include_authorities: true + # include_additionals controls whether or not the dns.additionals field + # (additional resource records) is added to messages. + # Default: false + include_additionals: true + + # send_request and send_response control whether or not the stringified DNS + # request and response message are added to the result. + # Nearly all data about the request/response is available in the dns.* + # fields, but this can be useful if you need visibility specifically + # into the request or the response. + # Default: false + # send_request: true + # send_response: true + +packetbeat.protocols.http: + # Configure the ports where to listen for HTTP traffic. You can disable + # the HTTP protocol by commenting out the list of ports. + ports: [80, 8080, 8000, 5000, 8002] + + # Uncomment the following to hide certain parameters in URL or forms attached + # to HTTP requests. The names of the parameters are case insensitive. + # The value of the parameters will be replaced with the 'xxxxx' string. + # This is generally useful for avoiding storing user passwords or other + # sensitive information. + # Only query parameters and top level form parameters are replaced. + # hide_keywords: ['pass', 'password', 'passwd'] + +packetbeat.protocols.memcache: + # Configure the ports where to listen for memcache traffic. You can disable + # the Memcache protocol by commenting out the list of ports. + ports: [11211] + + # Uncomment the parseunknown option to force the memcache text protocol parser + # to accept unknown commands. + # Note: All unknown commands MUST not contain any data parts! + # Default: false + # parseunknown: true + + # Update the maxvalue option to store the values - base64 encoded - in the + # json output. + # possible values: + # maxvalue: -1 # store all values (text based protocol multi-get) + # maxvalue: 0 # store no values at all + # maxvalue: N # store up to N values + # Default: 0 + # maxvalues: -1 + + # Use maxbytespervalue to limit the number of bytes to be copied per value element. + # Note: Values will be base64 encoded, so actual size in json document + # will be 4 times maxbytespervalue. + # Default: unlimited + # maxbytespervalue: 100 + + # UDP transaction timeout in milliseconds. + # Note: Quiet messages in UDP binary protocol will get response only in error case. + # The memcached analyzer will wait for udptransactiontimeout milliseconds + # before publishing quiet messages. Non quiet messages or quiet requests with + # error response will not have to wait for the timeout. + # Default: 200 + # udptransactiontimeout: 1000 + +packetbeat.protocols.mysql: + # Configure the ports where to listen for MySQL traffic. You can disable + # the MySQL protocol by commenting out the list of ports. + ports: [3306] + +packetbeat.protocols.pgsql: + # Configure the ports where to listen for Pgsql traffic. You can disable + # the Pgsql protocol by commenting out the list of ports. + ports: [5432] + +packetbeat.protocols.redis: + # Configure the ports where to listen for Redis traffic. You can disable + # the Redis protocol by commenting out the list of ports. + ports: [6379] + +packetbeat.protocols.thrift: + # Configure the ports where to listen for Thrift-RPC traffic. You can disable + # the Thrift-RPC protocol by commenting out the list of ports. + ports: [9090] + +packetbeat.protocols.mongodb: + # Configure the ports where to listen for MongoDB traffic. You can disable + # the MongoDB protocol by commenting out the list of ports. + ports: [27017] + +packetbeat.protocols.nfs: + # Configure the ports where to listen for NFS traffic. You can disable + # the NFS protocol by commenting out the list of ports. + ports: [2049] + +#=========================== Monitored processes ============================== + +# Configure the processes to be monitored and how to find them. If a process is +# monitored then Packetbeat attempts to use it's name to fill in the `proc` and +# `client_proc` fields. +# The processes can be found by searching their command line by a given string. +# +# Process matching is optional and can be enabled by uncommenting the following +# lines. +# +#packetbeat.procs: +# enabled: false +# monitored: +# - process: mysqld +# cmdline_grep: mysqld +# +# - process: pgsql +# cmdline_grep: postgres +# +# - process: nginx +# cmdline_grep: nginx +# +# - process: app +# cmdline_grep: gunicorn + +#================================ General ===================================== + +# The name of the shipper that publishes the network data. It can be used to group +# all the transactions sent by a single shipper in the web interface. +# If this options is not defined, the hostname is used. +#name: + +# The tags of the shipper are included in their own field with each +# transaction published. Tags make it easy to group servers by different +# logical properties. +#tags: ["service-X", "web-tier"] + +# Optional fields that you can specify to add additional information to the +# output. Fields can be scalar values, arrays, dictionaries, or any nested +# combination of these. +#fields: +# env: staging + +# If this option is set to true, the custom fields are stored as top-level +# fields in the output document instead of being grouped under a fields +# sub-dictionary. Default is false. +#fields_under_root: false + +# Uncomment the following if you want to ignore transactions created +# by the server on which the shipper is installed. This option is useful +# to remove duplicates if shippers are installed on multiple servers. +#ignore_outgoing: true + +# How often (in seconds) shippers are publishing their IPs to the topology map. +# The default is 10 seconds. +#refresh_topology_freq: 10 + +# Expiration time (in seconds) of the IPs published by a shipper to the topology map. +# All the IPs will be deleted afterwards. Note, that the value must be higher than +# refresh_topology_freq. The default is 15 seconds. +#topology_expire: 15 + +# Internal queue size for single events in processing pipeline +#queue_size: 1000 + +# Sets the maximum number of CPUs that can be executing simultaneously. The +# default is the number of logical CPUs available in the system. +#max_procs: + +#================================ Outputs ===================================== + +# Configure what outputs to use when sending the data collected by the beat. +# Multiple outputs may be used. + +#-------------------------- Elasticsearch output ------------------------------ +output.elasticsearch: + # Array of hosts to connect to. + # Scheme and port can be left out and will be set to the default (http and 9200) + # In case you specify and additional path, the scheme is required: http://localhost:9200/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 + hosts: ["localhost:9200"] + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "admin" + #password: "s3cr3t" + + # Dictionary of HTTP parameters to pass within the url with index operations. + #parameters: + #param1: value1 + #param2: value2 + + # Number of workers per Elasticsearch host. + #worker: 1 + + # Optional index name. The default is "packetbeat" and generates + # [packetbeat-]YYYY.MM.DD keys. + #index: "packetbeat" + + # Optional HTTP Path + #path: "/elasticsearch" + + # Proxy server url + #proxy_url: http://proxy:3128 + + # The number of times a particular Elasticsearch index operation is attempted. If + # the indexing operation doesn't succeed after this many retries, the events are + # dropped. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Elasticsearch bulk API index request. + # The default is 50. + #bulk_max_size: 50 + + # Configure http request timeout before failing an request to Elasticsearch. + #timeout: 90 + + # The number of seconds to wait for new events between two bulk API index requests. + # If `bulk_max_size` is reached before this interval expires, addition bulk index + # requests are made. + #flush_interval: 1 + + # Boolean that sets if the topology is kept in Elasticsearch. The default is + # false. This option makes sense only for Packetbeat. + #save_topology: false + + # The time to live in seconds for the topology information that is stored in + # Elasticsearch. The default is 15 seconds. + #topology_expire: 15 + + # A template is used to set the mapping in Elasticsearch + # By default template loading is enabled and the template is loaded. + # These settings can be adjusted to load your own template or overwrite existing ones + + # Template name. By default the template name is packetbeat. + template.name: "packetbeat" + + # Path to template file + template.path: "packetbeat.template.json" + + # Overwrite existing template + template.overwrite: false + + # TLS configuration. By default is off. + # List of root certificates for HTTPS server verifications + #tls.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for TLS client authentication + #tls.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #tls.certificate_key: "/etc/pki/client/cert.key" + + # Controls whether the client verifies server certificates and host name. + # If insecure is set to true, all server host names and certificates will be + # accepted. In this mode TLS based connections are susceptible to + # man-in-the-middle attacks. Use only for testing. + #tls.insecure: true + + # Configure cipher suites to be used for TLS connections + #tls.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #tls.curve_types: [] + + # Configure minimum TLS version allowed for connection to logstash + #tls.min_version: 1.0 + + # Configure maximum TLS version allowed for connection to logstash + #tls.max_version: 1.2 + + +#----------------------------- Logstash output -------------------------------- +#output.logstash: + # The Logstash hosts + #hosts: ["localhost:5044"] + + # Number of workers per Logstash host. + #worker: 1 + + # Set gzip compression level. + #compression_level: 3 + + # Optional load balance the events between the Logstash hosts + #loadbalance: true + + # Optional index name. The default index name is set to name of the beat + # in all lowercase. + #index: packetbeat + + # SOCKS5 proxy server URL + #proxy_url: socks5://user:password@socks5-server:2233 + + # Resolve names locally when using a proxy server. Defaults to false. + #proxy_use_local_resolver: false + + # Optional TLS. By default is off. + # List of root certificates for HTTPS server verifications + #tls.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for TLS client authentication + #tls.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #tls.certificate_key: "/etc/pki/client/cert.key" + + # Controls whether the client verifies server certificates and host name. + # If insecure is set to true, all server host names and certificates will be + # accepted. In this mode TLS based connections are susceptible to + # man-in-the-middle attacks. Use only for testing. + #tls.insecure: true + + # Configure cipher suites to be used for TLS connections + #tls.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #tls.curve_types: [] + + +#------------------------------- File output ---------------------------------- +#output.file: + # Path to the directory where to save the generated files. The option is mandatory. + #path: "/tmp/packetbeat" + + # Name of the generated files. The default is `packetbeat` and it generates files: `packetbeat`, `packetbeat.1`, `packetbeat.2`, etc. + #filename: packetbeat + + # Maximum size in kilobytes of each file. When this size is reached, the files are + # rotated. The default value is 10240 kB. + #rotate_every_kb: 10000 + + # Maximum number of files under path. When this number of files is reached, the + # oldest file is deleted and the rest are shifted from last to first. The default + # is 7 files. + #number_of_files: 7 + + +#----------------------------- Console output --------------------------------- +#output.console: + # Pretty print json event + #pretty: false + +#================================ Logging ===================================== +# There are three options for the log output: syslog, file, stderr. +# Under Windows systems, the log files are per default sent to the file output, +# under all other system per default to syslog. + +# Sets log level. The default log level is error. +# Available log levels are: critical, error, warning, info, debug +#logging.level: error + +# Enable debug output for selected components. To enable all selectors use ["*"] +# Other available selectors are beat, publish, service +# Multiple selectors can be chained. +#logging.selectors: [ ] + +# Send all logging output to syslog. The default is false. +#logging.to_syslog: true + +# Logging to rotating files files. Set logging.to_files to false to disable logging to +# files. +logging.to_files: true +logging.files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/mybeat + + # The name of the files where the logs are written to. + #name: mybeat + + # Configure log file size limit. If limit is reached, log file will be + # automatically rotated + #rotateeverybytes: 10485760 # = 10MB + + # Number of rotated log files to keep. Oldest files will be deleted first. + #keepfiles: 7 + +#================================ Filters ===================================== + +# This section defines a list of filtering rules that are applied one by one starting with the +# exported event: +# event -> filter1 -> event1 -> filter2 ->event2 ... +# Supported actions: drop_fields, drop_event, include_fields +#filters: +# - drop_fields: +# equals: +# status: OK +# fields: [ ] diff --git a/packetbeat/packetbeat.short.yml b/packetbeat/packetbeat.short.yml deleted file mode 100644 index a7ed37b7544d..000000000000 --- a/packetbeat/packetbeat.short.yml +++ /dev/null @@ -1,144 +0,0 @@ -#################### Packetbeat Configuration Example ######################### - -# This file is an example configuration file highlighting only the most common -# options. The packetbeat.yml file from the same directory contains all the -# supported options with more comments. You can use it as a reference. -# -# You can find the full configuration reference here: -# https://www.elastic.co/guide/en/beats/packetbeat/index.html - -#============================== Network device ================================ - -# Select the network interface to sniff the data. You can use the "any" -# keyword to sniff on all connected interfaces. -packetbeat.interfaces.device: any - -#================================== Flows ===================================== - -# Comment out all options to disable flows reporting. - -# Set network flow timeout. Flow is killed if no packet is received before being -# timed out. -packetbeat.flows.timeout: 30s - -# Configure reporting period. If set to -1, only killed flows will be reported -packetbeat.flows.period: 10s - -#========================== Transaction protocols ============================= - -packetbeat.protocols.icmp: - # Enable ICMPv4 and ICMPv6 monitoring. Default: false - enabled: true - -packetbeat.protocols.amqp: - # Configure the ports where to listen for AMQP traffic. You can disable - # the AMQP protocol by commenting out the list of ports. - ports: [5672] - -packetbeat.protocols.dns: - # Configure the ports where to listen for DNS traffic. You can disable - # the DNS protocol by commenting out the list of ports. - ports: [53] - - # include_authorities controls whether or not the dns.authorities field - # (authority resource records) is added to messages. - include_authorities: true - - # include_additionals controls whether or not the dns.additionals field - # (additional resource records) is added to messages. - include_additionals: true - -packetbeat.protocols.http: - # Configure the ports where to listen for HTTP traffic. You can disable - # the HTTP protocol by commenting out the list of ports. - ports: [80, 8080, 8000, 5000, 8002] - -packetbeat.protocols.memcache: - # Configure the ports where to listen for memcache traffic. You can disable - # the Memcache protocol by commenting out the list of ports. - ports: [11211] - -packetbeat.protocols.mysql: - # Configure the ports where to listen for MySQL traffic. You can disable - # the MySQL protocol by commenting out the list of ports. - ports: [3306] - -packetbeat.protocols.pgsql: - # Configure the ports where to listen for Pgsql traffic. You can disable - # the Pgsql protocol by commenting out the list of ports. - ports: [5432] - -packetbeat.protocols.redis: - # Configure the ports where to listen for Redis traffic. You can disable - # the Redis protocol by commenting out the list of ports. - ports: [6379] - -packetbeat.protocols.thrift: - # Configure the ports where to listen for Thrift-RPC traffic. You can disable - # the Thrift-RPC protocol by commenting out the list of ports. - ports: [9090] - -packetbeat.protocols.mongodb: - # Configure the ports where to listen for MongoDB traffic. You can disable - # the MongoDB protocol by commenting out the list of ports. - ports: [27017] - -packetbeat.protocols.nfs: - # Configure the ports where to listen for NFS traffic. You can disable - # the NFS protocol by commenting out the list of ports. - ports: [2049] - -#================================ General ===================================== - -# The name of the shipper that publishes the network data. It can be used to group -# all the transactions sent by a single shipper in the web interface. -#name: - -# The tags of the shipper are included in their own field with each -# transaction published. -#tags: ["service-X", "web-tier"] - -# Optional fields that you can specify to add additional information to the -# output. -#fields: -# env: staging - -#================================ Outputs ===================================== - -# Configure what outputs to use when sending the data collected by the beat. -# Multiple outputs may be used. - -#-------------------------- Elasticsearch output ------------------------------ -output.elasticsearch: - # Array of hosts to connect to. - hosts: ["localhost:9200"] - - # Template name. By default the template name is packetbeat. - template.name: "packetbeat" - - # Path to template file - template.path: "packetbeat.template.json" - - # Overwrite existing template - template.overwrite: false - -#----------------------------- Logstash output -------------------------------- -#output.logstash: - # The Logstash hosts - #hosts: ["localhost:5044"] - - # Optional TLS. By default is off. - # List of root certificates for HTTPS server verifications - #tls.certificate_authorities: ["/etc/pki/root/ca.pem"] - - # Certificate for TLS client authentication - #tls.certificate: "/etc/pki/client/cert.pem" - - # Client Certificate Key - #tls.certificate_key: "/etc/pki/client/cert.key" - -#================================ Logging ===================================== - -# Sets log level. The default log level is error. -# Available log levels are: critical, error, warning, info, debug -#logging.level: error diff --git a/packetbeat/packetbeat.yml b/packetbeat/packetbeat.yml index 4b9161b79b6d..6d35beb828b7 100644 --- a/packetbeat/packetbeat.yml +++ b/packetbeat/packetbeat.yml @@ -1,8 +1,8 @@ -###################### Packetbeat Configuration Example ####################### +#################### Packetbeat Configuration Example ######################### -# This file is a full configuration example documenting all non-deprecated -# options in comments. For a shorter configuration example, that contains only -# the most common options, please see packetbeat.short.yml in the same directory. +# This file is an example configuration file highlighting only the most common +# options. The packetbeat.full.yml file from the same directory contains all the +# supported options with more comments. You can use it as a reference. # # You can find the full configuration reference here: # https://www.elastic.co/guide/en/beats/packetbeat/index.html @@ -15,6 +15,8 @@ packetbeat.interfaces.device: any #================================== Flows ===================================== +# Comment out all options to disable flows reporting. + # Set network flow timeout. Flow is killed if no packet is received before being # timed out. packetbeat.flows.timeout: 30s @@ -32,23 +34,6 @@ packetbeat.protocols.amqp: # Configure the ports where to listen for AMQP traffic. You can disable # the AMQP protocol by commenting out the list of ports. ports: [5672] - # Truncate messages that are published and avoid huge messages being - # indexed. - # Default: 1000 - #max_body_length: 1000 - - # Hide the header fields in header frames. - # Default: false - #parse_headers: false - - # Hide the additional arguments of method frames. - # Default: false - #parse_arguments: false - - # Hide all methods relative to connection negociation between server and - # client. - # Default: true - #hide_connection_information: true packetbeat.protocols.dns: # Configure the ports where to listen for DNS traffic. You can disable @@ -57,69 +42,22 @@ packetbeat.protocols.dns: # include_authorities controls whether or not the dns.authorities field # (authority resource records) is added to messages. - # Default: false include_authorities: true + # include_additionals controls whether or not the dns.additionals field # (additional resource records) is added to messages. - # Default: false include_additionals: true - # send_request and send_response control whether or not the stringified DNS - # request and response message are added to the result. - # Nearly all data about the request/response is available in the dns.* - # fields, but this can be useful if you need visibility specifically - # into the request or the response. - # Default: false - # send_request: true - # send_response: true - packetbeat.protocols.http: # Configure the ports where to listen for HTTP traffic. You can disable # the HTTP protocol by commenting out the list of ports. ports: [80, 8080, 8000, 5000, 8002] - # Uncomment the following to hide certain parameters in URL or forms attached - # to HTTP requests. The names of the parameters are case insensitive. - # The value of the parameters will be replaced with the 'xxxxx' string. - # This is generally useful for avoiding storing user passwords or other - # sensitive information. - # Only query parameters and top level form parameters are replaced. - # hide_keywords: ['pass', 'password', 'passwd'] - packetbeat.protocols.memcache: # Configure the ports where to listen for memcache traffic. You can disable # the Memcache protocol by commenting out the list of ports. ports: [11211] - # Uncomment the parseunknown option to force the memcache text protocol parser - # to accept unknown commands. - # Note: All unknown commands MUST not contain any data parts! - # Default: false - # parseunknown: true - - # Update the maxvalue option to store the values - base64 encoded - in the - # json output. - # possible values: - # maxvalue: -1 # store all values (text based protocol multi-get) - # maxvalue: 0 # store no values at all - # maxvalue: N # store up to N values - # Default: 0 - # maxvalues: -1 - - # Use maxbytespervalue to limit the number of bytes to be copied per value element. - # Note: Values will be base64 encoded, so actual size in json document - # will be 4 times maxbytespervalue. - # Default: unlimited - # maxbytespervalue: 100 - - # UDP transaction timeout in milliseconds. - # Note: Quiet messages in UDP binary protocol will get response only in error case. - # The memcached analyzer will wait for udptransactiontimeout milliseconds - # before publishing quiet messages. Non quiet messages or quiet requests with - # error response will not have to wait for the timeout. - # Default: 200 - # udptransactiontimeout: 1000 - packetbeat.protocols.mysql: # Configure the ports where to listen for MySQL traffic. You can disable # the MySQL protocol by commenting out the list of ports. @@ -150,75 +88,21 @@ packetbeat.protocols.nfs: # the NFS protocol by commenting out the list of ports. ports: [2049] -#=========================== Monitored processes ============================== - -# Configure the processes to be monitored and how to find them. If a process is -# monitored then Packetbeat attempts to use it's name to fill in the `proc` and -# `client_proc` fields. -# The processes can be found by searching their command line by a given string. -# -# Process matching is optional and can be enabled by uncommenting the following -# lines. -# -#packetbeat.procs: -# enabled: false -# monitored: -# - process: mysqld -# cmdline_grep: mysqld -# -# - process: pgsql -# cmdline_grep: postgres -# -# - process: nginx -# cmdline_grep: nginx -# -# - process: app -# cmdline_grep: gunicorn - #================================ General ===================================== # The name of the shipper that publishes the network data. It can be used to group # all the transactions sent by a single shipper in the web interface. -# If this options is not defined, the hostname is used. #name: # The tags of the shipper are included in their own field with each -# transaction published. Tags make it easy to group servers by different -# logical properties. +# transaction published. #tags: ["service-X", "web-tier"] # Optional fields that you can specify to add additional information to the -# output. Fields can be scalar values, arrays, dictionaries, or any nested -# combination of these. +# output. #fields: # env: staging -# If this option is set to true, the custom fields are stored as top-level -# fields in the output document instead of being grouped under a fields -# sub-dictionary. Default is false. -#fields_under_root: false - -# Uncomment the following if you want to ignore transactions created -# by the server on which the shipper is installed. This option is useful -# to remove duplicates if shippers are installed on multiple servers. -#ignore_outgoing: true - -# How often (in seconds) shippers are publishing their IPs to the topology map. -# The default is 10 seconds. -#refresh_topology_freq: 10 - -# Expiration time (in seconds) of the IPs published by a shipper to the topology map. -# All the IPs will be deleted afterwards. Note, that the value must be higher than -# refresh_topology_freq. The default is 15 seconds. -#topology_expire: 15 - -# Internal queue size for single events in processing pipeline -#queue_size: 1000 - -# Sets the maximum number of CPUs that can be executing simultaneously. The -# default is the number of logical CPUs available in the system. -#max_procs: - #================================ Outputs ===================================== # Configure what outputs to use when sending the data collected by the beat. @@ -227,63 +111,8 @@ packetbeat.protocols.nfs: #-------------------------- Elasticsearch output ------------------------------ output.elasticsearch: # Array of hosts to connect to. - # Scheme and port can be left out and will be set to the default (http and 9200) - # In case you specify and additional path, the scheme is required: http://localhost:9200/path - # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 hosts: ["localhost:9200"] - # Optional protocol and basic auth credentials. - #protocol: "https" - #username: "admin" - #password: "s3cr3t" - - # Dictionary of HTTP parameters to pass within the url with index operations. - #parameters: - #param1: value1 - #param2: value2 - - # Number of workers per Elasticsearch host. - #worker: 1 - - # Optional index name. The default is "packetbeat" and generates - # [packetbeat-]YYYY.MM.DD keys. - #index: "packetbeat" - - # Optional HTTP Path - #path: "/elasticsearch" - - # Proxy server url - #proxy_url: http://proxy:3128 - - # The number of times a particular Elasticsearch index operation is attempted. If - # the indexing operation doesn't succeed after this many retries, the events are - # dropped. The default is 3. - #max_retries: 3 - - # The maximum number of events to bulk in a single Elasticsearch bulk API index request. - # The default is 50. - #bulk_max_size: 50 - - # Configure http request timeout before failing an request to Elasticsearch. - #timeout: 90 - - # The number of seconds to wait for new events between two bulk API index requests. - # If `bulk_max_size` is reached before this interval expires, addition bulk index - # requests are made. - #flush_interval: 1 - - # Boolean that sets if the topology is kept in Elasticsearch. The default is - # false. This option makes sense only for Packetbeat. - #save_topology: false - - # The time to live in seconds for the topology information that is stored in - # Elasticsearch. The default is 15 seconds. - #topology_expire: 15 - - # A template is used to set the mapping in Elasticsearch - # By default template loading is enabled and the template is loaded. - # These settings can be adjusted to load your own template or overwrite existing ones - # Template name. By default the template name is packetbeat. template.name: "packetbeat" @@ -293,59 +122,11 @@ output.elasticsearch: # Overwrite existing template template.overwrite: false - # TLS configuration. By default is off. - # List of root certificates for HTTPS server verifications - #tls.certificate_authorities: ["/etc/pki/root/ca.pem"] - - # Certificate for TLS client authentication - #tls.certificate: "/etc/pki/client/cert.pem" - - # Client Certificate Key - #tls.certificate_key: "/etc/pki/client/cert.key" - - # Controls whether the client verifies server certificates and host name. - # If insecure is set to true, all server host names and certificates will be - # accepted. In this mode TLS based connections are susceptible to - # man-in-the-middle attacks. Use only for testing. - #tls.insecure: true - - # Configure cipher suites to be used for TLS connections - #tls.cipher_suites: [] - - # Configure curve types for ECDHE based cipher suites - #tls.curve_types: [] - - # Configure minimum TLS version allowed for connection to logstash - #tls.min_version: 1.0 - - # Configure maximum TLS version allowed for connection to logstash - #tls.max_version: 1.2 - - #----------------------------- Logstash output -------------------------------- #output.logstash: # The Logstash hosts #hosts: ["localhost:5044"] - # Number of workers per Logstash host. - #worker: 1 - - # Set gzip compression level. - #compression_level: 3 - - # Optional load balance the events between the Logstash hosts - #loadbalance: true - - # Optional index name. The default index name is set to name of the beat - # in all lowercase. - #index: packetbeat - - # SOCKS5 proxy server URL - #proxy_url: socks5://user:password@socks5-server:2233 - - # Resolve names locally when using a proxy server. Defaults to false. - #proxy_use_local_resolver: false - # Optional TLS. By default is off. # List of root certificates for HTTPS server verifications #tls.certificate_authorities: ["/etc/pki/root/ca.pem"] @@ -356,85 +137,8 @@ output.elasticsearch: # Client Certificate Key #tls.certificate_key: "/etc/pki/client/cert.key" - # Controls whether the client verifies server certificates and host name. - # If insecure is set to true, all server host names and certificates will be - # accepted. In this mode TLS based connections are susceptible to - # man-in-the-middle attacks. Use only for testing. - #tls.insecure: true - - # Configure cipher suites to be used for TLS connections - #tls.cipher_suites: [] - - # Configure curve types for ECDHE based cipher suites - #tls.curve_types: [] - - -#------------------------------- File output ---------------------------------- -#output.file: - # Path to the directory where to save the generated files. The option is mandatory. - #path: "/tmp/packetbeat" - - # Name of the generated files. The default is `packetbeat` and it generates files: `packetbeat`, `packetbeat.1`, `packetbeat.2`, etc. - #filename: packetbeat - - # Maximum size in kilobytes of each file. When this size is reached, the files are - # rotated. The default value is 10240 kB. - #rotate_every_kb: 10000 - - # Maximum number of files under path. When this number of files is reached, the - # oldest file is deleted and the rest are shifted from last to first. The default - # is 7 files. - #number_of_files: 7 - - -#----------------------------- Console output --------------------------------- -#output.console: - # Pretty print json event - #pretty: false - #================================ Logging ===================================== -# There are three options for the log output: syslog, file, stderr. -# Under Windows systems, the log files are per default sent to the file output, -# under all other system per default to syslog. # Sets log level. The default log level is error. # Available log levels are: critical, error, warning, info, debug #logging.level: error - -# Enable debug output for selected components. To enable all selectors use ["*"] -# Other available selectors are beat, publish, service -# Multiple selectors can be chained. -#logging.selectors: [ ] - -# Send all logging output to syslog. The default is false. -#logging.to_syslog: true - -# Logging to rotating files files. Set logging.to_files to false to disable logging to -# files. -logging.to_files: true -logging.files: - # Configure the path where the logs are written. The default is the logs directory - # under the home path (the binary location). - #path: /var/log/mybeat - - # The name of the files where the logs are written to. - #name: mybeat - - # Configure log file size limit. If limit is reached, log file will be - # automatically rotated - #rotateeverybytes: 10485760 # = 10MB - - # Number of rotated log files to keep. Oldest files will be deleted first. - #keepfiles: 7 - -#================================ Filters ===================================== - -# This section defines a list of filtering rules that are applied one by one starting with the -# exported event: -# event -> filter1 -> event1 -> filter2 ->event2 ... -# Supported actions: drop_fields, drop_event, include_fields -#filters: -# - drop_fields: -# equals: -# status: OK -# fields: [ ] diff --git a/topbeat/etc/beat.short.yml b/topbeat/etc/beat.full.yml similarity index 71% rename from topbeat/etc/beat.short.yml rename to topbeat/etc/beat.full.yml index 58291336111f..a0f03bf6ca12 100644 --- a/topbeat/etc/beat.short.yml +++ b/topbeat/etc/beat.full.yml @@ -1,8 +1,8 @@ -###################### Topbeat Configuration Example ########################## +########################### Topbeat Configuration ############################# -# This file is an example configuration file highlighting only the most common -# options. The topbeat.yml file from the same directory contains all the -# supported options with more comments. You can use it as a reference. +# This file is a full configuration example documenting all non-deprecated +# options in comments. For a shorter configuration example, that contains only +# the most common options, please see topbeat.yml in the same directory. # # You can find the full configuration reference here: # https://www.elastic.co/guide/en/beats/topbeat/index.html diff --git a/topbeat/etc/beat.yml b/topbeat/etc/beat.yml index 7ab9460d7ea3..23b356cc6291 100644 --- a/topbeat/etc/beat.yml +++ b/topbeat/etc/beat.yml @@ -1,8 +1,8 @@ -########################### Topbeat Configuration ############################# +###################### Topbeat Configuration Example ########################## -# This file is a full configuration example documenting all non-deprecated -# options in comments. For a shorter configuration example, that contains only -# the most common options, please see topbeat.short.yml in the same directory. +# This file is an example configuration file highlighting only the most common +# options. The topbeat.full.yml file from the same directory contains all the +# supported options with more comments. You can use it as a reference. # # You can find the full configuration reference here: # https://www.elastic.co/guide/en/beats/topbeat/index.html diff --git a/topbeat/topbeat.full.yml b/topbeat/topbeat.full.yml new file mode 100644 index 000000000000..4c7e09c50a06 --- /dev/null +++ b/topbeat/topbeat.full.yml @@ -0,0 +1,295 @@ +########################### Topbeat Configuration ############################# + +# This file is a full configuration example documenting all non-deprecated +# options in comments. For a shorter configuration example, that contains only +# the most common options, please see topbeat.yml in the same directory. +# +# You can find the full configuration reference here: +# https://www.elastic.co/guide/en/beats/topbeat/index.html + +#======================== Topbeat specific options ============================ + +# In seconds, defines how often to read server statistics +topbeat.period: 10 + +# Regular expression to match the processes that are monitored +# By default, all the processes are monitored +topbeat.procs: [".*"] + +# Statistics to collect (all enabled by default) +topbeat.stats: + # per system statistics, by default is true + system: true + + # per process statistics, by default is true + process: true + + # file system information, by default is true + filesystem: true + + # cpu usage per core, by default is false + cpu_per_core: false + +#================================ General ===================================== + +# The name of the shipper that publishes the network data. It can be used to group +# all the transactions sent by a single shipper in the web interface. +# If this options is not defined, the hostname is used. +#name: + +# The tags of the shipper are included in their own field with each +# transaction published. Tags make it easy to group servers by different +# logical properties. +#tags: ["service-X", "web-tier"] + +# Optional fields that you can specify to add additional information to the +# output. Fields can be scalar values, arrays, dictionaries, or any nested +# combination of these. +#fields: +# env: staging + +# If this option is set to true, the custom fields are stored as top-level +# fields in the output document instead of being grouped under a fields +# sub-dictionary. Default is false. +#fields_under_root: false + +# Uncomment the following if you want to ignore transactions created +# by the server on which the shipper is installed. This option is useful +# to remove duplicates if shippers are installed on multiple servers. +#ignore_outgoing: true + +# How often (in seconds) shippers are publishing their IPs to the topology map. +# The default is 10 seconds. +#refresh_topology_freq: 10 + +# Expiration time (in seconds) of the IPs published by a shipper to the topology map. +# All the IPs will be deleted afterwards. Note, that the value must be higher than +# refresh_topology_freq. The default is 15 seconds. +#topology_expire: 15 + +# Internal queue size for single events in processing pipeline +#queue_size: 1000 + +# Sets the maximum number of CPUs that can be executing simultaneously. The +# default is the number of logical CPUs available in the system. +#max_procs: + +#================================ Outputs ===================================== + +# Configure what outputs to use when sending the data collected by the beat. +# Multiple outputs may be used. + +#-------------------------- Elasticsearch output ------------------------------ +output.elasticsearch: + # Array of hosts to connect to. + # Scheme and port can be left out and will be set to the default (http and 9200) + # In case you specify and additional path, the scheme is required: http://localhost:9200/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 + hosts: ["localhost:9200"] + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "admin" + #password: "s3cr3t" + + # Dictionary of HTTP parameters to pass within the url with index operations. + #parameters: + #param1: value1 + #param2: value2 + + # Number of workers per Elasticsearch host. + #worker: 1 + + # Optional index name. The default is "topbeat" and generates + # [topbeat-]YYYY.MM.DD keys. + #index: "topbeat" + + # Optional HTTP Path + #path: "/elasticsearch" + + # Proxy server url + #proxy_url: http://proxy:3128 + + # The number of times a particular Elasticsearch index operation is attempted. If + # the indexing operation doesn't succeed after this many retries, the events are + # dropped. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Elasticsearch bulk API index request. + # The default is 50. + #bulk_max_size: 50 + + # Configure http request timeout before failing an request to Elasticsearch. + #timeout: 90 + + # The number of seconds to wait for new events between two bulk API index requests. + # If `bulk_max_size` is reached before this interval expires, addition bulk index + # requests are made. + #flush_interval: 1 + + # Boolean that sets if the topology is kept in Elasticsearch. The default is + # false. This option makes sense only for Packetbeat. + #save_topology: false + + # The time to live in seconds for the topology information that is stored in + # Elasticsearch. The default is 15 seconds. + #topology_expire: 15 + + # A template is used to set the mapping in Elasticsearch + # By default template loading is enabled and the template is loaded. + # These settings can be adjusted to load your own template or overwrite existing ones + + # Template name. By default the template name is topbeat. + template.name: "topbeat" + + # Path to template file + template.path: "topbeat.template.json" + + # Overwrite existing template + template.overwrite: false + + # TLS configuration. By default is off. + # List of root certificates for HTTPS server verifications + #tls.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for TLS client authentication + #tls.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #tls.certificate_key: "/etc/pki/client/cert.key" + + # Controls whether the client verifies server certificates and host name. + # If insecure is set to true, all server host names and certificates will be + # accepted. In this mode TLS based connections are susceptible to + # man-in-the-middle attacks. Use only for testing. + #tls.insecure: true + + # Configure cipher suites to be used for TLS connections + #tls.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #tls.curve_types: [] + + # Configure minimum TLS version allowed for connection to logstash + #tls.min_version: 1.0 + + # Configure maximum TLS version allowed for connection to logstash + #tls.max_version: 1.2 + + +#----------------------------- Logstash output -------------------------------- +#output.logstash: + # The Logstash hosts + #hosts: ["localhost:5044"] + + # Number of workers per Logstash host. + #worker: 1 + + # Set gzip compression level. + #compression_level: 3 + + # Optional load balance the events between the Logstash hosts + #loadbalance: true + + # Optional index name. The default index name is set to name of the beat + # in all lowercase. + #index: topbeat + + # SOCKS5 proxy server URL + #proxy_url: socks5://user:password@socks5-server:2233 + + # Resolve names locally when using a proxy server. Defaults to false. + #proxy_use_local_resolver: false + + # Optional TLS. By default is off. + # List of root certificates for HTTPS server verifications + #tls.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for TLS client authentication + #tls.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #tls.certificate_key: "/etc/pki/client/cert.key" + + # Controls whether the client verifies server certificates and host name. + # If insecure is set to true, all server host names and certificates will be + # accepted. In this mode TLS based connections are susceptible to + # man-in-the-middle attacks. Use only for testing. + #tls.insecure: true + + # Configure cipher suites to be used for TLS connections + #tls.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #tls.curve_types: [] + + +#------------------------------- File output ---------------------------------- +#output.file: + # Path to the directory where to save the generated files. The option is mandatory. + #path: "/tmp/topbeat" + + # Name of the generated files. The default is `topbeat` and it generates files: `topbeat`, `topbeat.1`, `topbeat.2`, etc. + #filename: topbeat + + # Maximum size in kilobytes of each file. When this size is reached, the files are + # rotated. The default value is 10240 kB. + #rotate_every_kb: 10000 + + # Maximum number of files under path. When this number of files is reached, the + # oldest file is deleted and the rest are shifted from last to first. The default + # is 7 files. + #number_of_files: 7 + + +#----------------------------- Console output --------------------------------- +#output.console: + # Pretty print json event + #pretty: false + +#================================ Logging ===================================== +# There are three options for the log output: syslog, file, stderr. +# Under Windows systems, the log files are per default sent to the file output, +# under all other system per default to syslog. + +# Sets log level. The default log level is error. +# Available log levels are: critical, error, warning, info, debug +#logging.level: error + +# Enable debug output for selected components. To enable all selectors use ["*"] +# Other available selectors are beat, publish, service +# Multiple selectors can be chained. +#logging.selectors: [ ] + +# Send all logging output to syslog. The default is false. +#logging.to_syslog: true + +# Logging to rotating files files. Set logging.to_files to false to disable logging to +# files. +logging.to_files: true +logging.files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/mybeat + + # The name of the files where the logs are written to. + #name: mybeat + + # Configure log file size limit. If limit is reached, log file will be + # automatically rotated + #rotateeverybytes: 10485760 # = 10MB + + # Number of rotated log files to keep. Oldest files will be deleted first. + #keepfiles: 7 + +#================================ Filters ===================================== + +# This section defines a list of filtering rules that are applied one by one starting with the +# exported event: +# event -> filter1 -> event1 -> filter2 ->event2 ... +# Supported actions: drop_fields, drop_event, include_fields +#filters: +# - drop_fields: +# equals: +# status: OK +# fields: [ ] diff --git a/topbeat/topbeat.short.yml b/topbeat/topbeat.short.yml deleted file mode 100644 index 9c4ae2ba0482..000000000000 --- a/topbeat/topbeat.short.yml +++ /dev/null @@ -1,86 +0,0 @@ -###################### Topbeat Configuration Example ########################## - -# This file is an example configuration file highlighting only the most common -# options. The topbeat.yml file from the same directory contains all the -# supported options with more comments. You can use it as a reference. -# -# You can find the full configuration reference here: -# https://www.elastic.co/guide/en/beats/topbeat/index.html - -#======================== Topbeat specific options ============================ - -# In seconds, defines how often to read server statistics -topbeat.period: 10 - -# Regular expression to match the processes that are monitored -# By default, all the processes are monitored -topbeat.procs: [".*"] - -# Statistics to collect (all enabled by default) -topbeat.stats: - # per system statistics, by default is true - system: true - - # per process statistics, by default is true - process: true - - # file system information, by default is true - filesystem: true - - # cpu usage per core, by default is false - cpu_per_core: false - -#================================ General ===================================== - -# The name of the shipper that publishes the network data. It can be used to group -# all the transactions sent by a single shipper in the web interface. -#name: - -# The tags of the shipper are included in their own field with each -# transaction published. -#tags: ["service-X", "web-tier"] - -# Optional fields that you can specify to add additional information to the -# output. -#fields: -# env: staging - -#================================ Outputs ===================================== - -# Configure what outputs to use when sending the data collected by the beat. -# Multiple outputs may be used. - -#-------------------------- Elasticsearch output ------------------------------ -output.elasticsearch: - # Array of hosts to connect to. - hosts: ["localhost:9200"] - - # Template name. By default the template name is topbeat. - template.name: "topbeat" - - # Path to template file - template.path: "topbeat.template.json" - - # Overwrite existing template - template.overwrite: false - -#----------------------------- Logstash output -------------------------------- -#output.logstash: - # The Logstash hosts - #hosts: ["localhost:5044"] - - # Optional TLS. By default is off. - # List of root certificates for HTTPS server verifications - #tls.certificate_authorities: ["/etc/pki/root/ca.pem"] - - # Certificate for TLS client authentication - #tls.certificate: "/etc/pki/client/cert.pem" - - # Client Certificate Key - #tls.certificate_key: "/etc/pki/client/cert.key" - -#================================ Logging ===================================== - -# Sets log level. The default log level is error. -# Available log levels are: critical, error, warning, info, debug -#logging.level: error diff --git a/topbeat/topbeat.yml b/topbeat/topbeat.yml index ddb6bee0f4e6..e98409aed637 100644 --- a/topbeat/topbeat.yml +++ b/topbeat/topbeat.yml @@ -1,8 +1,8 @@ -########################### Topbeat Configuration ############################# +###################### Topbeat Configuration Example ########################## -# This file is a full configuration example documenting all non-deprecated -# options in comments. For a shorter configuration example, that contains only -# the most common options, please see topbeat.short.yml in the same directory. +# This file is an example configuration file highlighting only the most common +# options. The topbeat.full.yml file from the same directory contains all the +# supported options with more comments. You can use it as a reference. # # You can find the full configuration reference here: # https://www.elastic.co/guide/en/beats/topbeat/index.html @@ -34,46 +34,17 @@ topbeat.stats: # The name of the shipper that publishes the network data. It can be used to group # all the transactions sent by a single shipper in the web interface. -# If this options is not defined, the hostname is used. #name: # The tags of the shipper are included in their own field with each -# transaction published. Tags make it easy to group servers by different -# logical properties. +# transaction published. #tags: ["service-X", "web-tier"] # Optional fields that you can specify to add additional information to the -# output. Fields can be scalar values, arrays, dictionaries, or any nested -# combination of these. +# output. #fields: # env: staging -# If this option is set to true, the custom fields are stored as top-level -# fields in the output document instead of being grouped under a fields -# sub-dictionary. Default is false. -#fields_under_root: false - -# Uncomment the following if you want to ignore transactions created -# by the server on which the shipper is installed. This option is useful -# to remove duplicates if shippers are installed on multiple servers. -#ignore_outgoing: true - -# How often (in seconds) shippers are publishing their IPs to the topology map. -# The default is 10 seconds. -#refresh_topology_freq: 10 - -# Expiration time (in seconds) of the IPs published by a shipper to the topology map. -# All the IPs will be deleted afterwards. Note, that the value must be higher than -# refresh_topology_freq. The default is 15 seconds. -#topology_expire: 15 - -# Internal queue size for single events in processing pipeline -#queue_size: 1000 - -# Sets the maximum number of CPUs that can be executing simultaneously. The -# default is the number of logical CPUs available in the system. -#max_procs: - #================================ Outputs ===================================== # Configure what outputs to use when sending the data collected by the beat. @@ -82,63 +53,8 @@ topbeat.stats: #-------------------------- Elasticsearch output ------------------------------ output.elasticsearch: # Array of hosts to connect to. - # Scheme and port can be left out and will be set to the default (http and 9200) - # In case you specify and additional path, the scheme is required: http://localhost:9200/path - # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 hosts: ["localhost:9200"] - # Optional protocol and basic auth credentials. - #protocol: "https" - #username: "admin" - #password: "s3cr3t" - - # Dictionary of HTTP parameters to pass within the url with index operations. - #parameters: - #param1: value1 - #param2: value2 - - # Number of workers per Elasticsearch host. - #worker: 1 - - # Optional index name. The default is "topbeat" and generates - # [topbeat-]YYYY.MM.DD keys. - #index: "topbeat" - - # Optional HTTP Path - #path: "/elasticsearch" - - # Proxy server url - #proxy_url: http://proxy:3128 - - # The number of times a particular Elasticsearch index operation is attempted. If - # the indexing operation doesn't succeed after this many retries, the events are - # dropped. The default is 3. - #max_retries: 3 - - # The maximum number of events to bulk in a single Elasticsearch bulk API index request. - # The default is 50. - #bulk_max_size: 50 - - # Configure http request timeout before failing an request to Elasticsearch. - #timeout: 90 - - # The number of seconds to wait for new events between two bulk API index requests. - # If `bulk_max_size` is reached before this interval expires, addition bulk index - # requests are made. - #flush_interval: 1 - - # Boolean that sets if the topology is kept in Elasticsearch. The default is - # false. This option makes sense only for Packetbeat. - #save_topology: false - - # The time to live in seconds for the topology information that is stored in - # Elasticsearch. The default is 15 seconds. - #topology_expire: 15 - - # A template is used to set the mapping in Elasticsearch - # By default template loading is enabled and the template is loaded. - # These settings can be adjusted to load your own template or overwrite existing ones - # Template name. By default the template name is topbeat. template.name: "topbeat" @@ -148,59 +64,11 @@ output.elasticsearch: # Overwrite existing template template.overwrite: false - # TLS configuration. By default is off. - # List of root certificates for HTTPS server verifications - #tls.certificate_authorities: ["/etc/pki/root/ca.pem"] - - # Certificate for TLS client authentication - #tls.certificate: "/etc/pki/client/cert.pem" - - # Client Certificate Key - #tls.certificate_key: "/etc/pki/client/cert.key" - - # Controls whether the client verifies server certificates and host name. - # If insecure is set to true, all server host names and certificates will be - # accepted. In this mode TLS based connections are susceptible to - # man-in-the-middle attacks. Use only for testing. - #tls.insecure: true - - # Configure cipher suites to be used for TLS connections - #tls.cipher_suites: [] - - # Configure curve types for ECDHE based cipher suites - #tls.curve_types: [] - - # Configure minimum TLS version allowed for connection to logstash - #tls.min_version: 1.0 - - # Configure maximum TLS version allowed for connection to logstash - #tls.max_version: 1.2 - - #----------------------------- Logstash output -------------------------------- #output.logstash: # The Logstash hosts #hosts: ["localhost:5044"] - # Number of workers per Logstash host. - #worker: 1 - - # Set gzip compression level. - #compression_level: 3 - - # Optional load balance the events between the Logstash hosts - #loadbalance: true - - # Optional index name. The default index name is set to name of the beat - # in all lowercase. - #index: topbeat - - # SOCKS5 proxy server URL - #proxy_url: socks5://user:password@socks5-server:2233 - - # Resolve names locally when using a proxy server. Defaults to false. - #proxy_use_local_resolver: false - # Optional TLS. By default is off. # List of root certificates for HTTPS server verifications #tls.certificate_authorities: ["/etc/pki/root/ca.pem"] @@ -211,85 +79,8 @@ output.elasticsearch: # Client Certificate Key #tls.certificate_key: "/etc/pki/client/cert.key" - # Controls whether the client verifies server certificates and host name. - # If insecure is set to true, all server host names and certificates will be - # accepted. In this mode TLS based connections are susceptible to - # man-in-the-middle attacks. Use only for testing. - #tls.insecure: true - - # Configure cipher suites to be used for TLS connections - #tls.cipher_suites: [] - - # Configure curve types for ECDHE based cipher suites - #tls.curve_types: [] - - -#------------------------------- File output ---------------------------------- -#output.file: - # Path to the directory where to save the generated files. The option is mandatory. - #path: "/tmp/topbeat" - - # Name of the generated files. The default is `topbeat` and it generates files: `topbeat`, `topbeat.1`, `topbeat.2`, etc. - #filename: topbeat - - # Maximum size in kilobytes of each file. When this size is reached, the files are - # rotated. The default value is 10240 kB. - #rotate_every_kb: 10000 - - # Maximum number of files under path. When this number of files is reached, the - # oldest file is deleted and the rest are shifted from last to first. The default - # is 7 files. - #number_of_files: 7 - - -#----------------------------- Console output --------------------------------- -#output.console: - # Pretty print json event - #pretty: false - #================================ Logging ===================================== -# There are three options for the log output: syslog, file, stderr. -# Under Windows systems, the log files are per default sent to the file output, -# under all other system per default to syslog. # Sets log level. The default log level is error. # Available log levels are: critical, error, warning, info, debug #logging.level: error - -# Enable debug output for selected components. To enable all selectors use ["*"] -# Other available selectors are beat, publish, service -# Multiple selectors can be chained. -#logging.selectors: [ ] - -# Send all logging output to syslog. The default is false. -#logging.to_syslog: true - -# Logging to rotating files files. Set logging.to_files to false to disable logging to -# files. -logging.to_files: true -logging.files: - # Configure the path where the logs are written. The default is the logs directory - # under the home path (the binary location). - #path: /var/log/mybeat - - # The name of the files where the logs are written to. - #name: mybeat - - # Configure log file size limit. If limit is reached, log file will be - # automatically rotated - #rotateeverybytes: 10485760 # = 10MB - - # Number of rotated log files to keep. Oldest files will be deleted first. - #keepfiles: 7 - -#================================ Filters ===================================== - -# This section defines a list of filtering rules that are applied one by one starting with the -# exported event: -# event -> filter1 -> event1 -> filter2 ->event2 ... -# Supported actions: drop_fields, drop_event, include_fields -#filters: -# - drop_fields: -# equals: -# status: OK -# fields: [ ] diff --git a/winlogbeat/etc/beat.full.yml b/winlogbeat/etc/beat.full.yml new file mode 100644 index 000000000000..f08d68fb985f --- /dev/null +++ b/winlogbeat/etc/beat.full.yml @@ -0,0 +1,35 @@ +########################## Winlogbeat Configuration ########################### + +# This file is a full configuration example documenting all non-deprecated +# options in comments. For a shorter configuration example, that contains only +# the most common options, please see winlogbeat.yml in the same directory. +# +# You can find the full configuration reference here: +# https://www.elastic.co/guide/en/beats/winlogbeat/index.html + +#======================= Winlogbeat specific options ========================== + +# The registry file is where Winlogbeat persists its state so that the beat +# can resume after shutdown or an outage. The default is .winlogbeat.yml +# in the directory in which it was started. +#winlogbeat.registry_file: .winlogbeat.yml + +# Diagnostic metrics that can retrieved through a web interface if a +# bindaddress value (host:port) is specified. The web address will be +# http:///debug/vars +#winlogbeat.metrics: +# bindaddress: 'localhost:8123' + +# event_logs specifies a list of event logs to monitor as well as any +# accompanying options. The YAML data type of event_logs is a list of +# dictionaries. +# +# The supported keys are name (required), tags, fields, fields_under_root, +# ignore_older, level, event_id, provider, and include_xml. Please visit the +# documentation for the complete details of each option. +# https://go.es.io/WinlogbeatConfig +winlogbeat.event_logs: + - name: Application + ignore_older: 72h + - name: Security + - name: System diff --git a/winlogbeat/etc/beat.short.yml b/winlogbeat/etc/beat.short.yml deleted file mode 100644 index 8b877a6c6db1..000000000000 --- a/winlogbeat/etc/beat.short.yml +++ /dev/null @@ -1,24 +0,0 @@ -###################### Winlogbeat Configuration Example ########################## - -# This file is an example configuration file highlighting only the most common -# options. The winlogbeat.yml file from the same directory contains all the -# supported options with more comments. You can use it as a reference. -# -# You can find the full configuration reference here: -# https://www.elastic.co/guide/en/beats/winlogbeat/index.html - -#======================= Winlogbeat specific options ========================== - -# event_logs specifies a list of event logs to monitor as well as any -# accompanying options. The YAML data type of event_logs is a list of -# dictionaries. -# -# The supported keys are name (required), tags, fields, fields_under_root, -# ignore_older, level, event_id, provider, and include_xml. Please visit the -# documentation for the complete details of each option. -# https://go.es.io/WinlogbeatConfig -winlogbeat.event_logs: - - name: Application - ignore_older: 72h - - name: Security - - name: System diff --git a/winlogbeat/etc/beat.yml b/winlogbeat/etc/beat.yml index 99de74133854..0c08cefb44dd 100644 --- a/winlogbeat/etc/beat.yml +++ b/winlogbeat/etc/beat.yml @@ -1,25 +1,14 @@ -########################## Winlogbeat Configuration ########################### +###################### Winlogbeat Configuration Example ########################## -# This file is a full configuration example documenting all non-deprecated -# options in comments. For a shorter configuration example, that contains only -# the most common options, please see winlogbeat.short.yml in the same directory. +# This file is an example configuration file highlighting only the most common +# options. The winlogbeat.full.yml file from the same directory contains all the +# supported options with more comments. You can use it as a reference. # # You can find the full configuration reference here: # https://www.elastic.co/guide/en/beats/winlogbeat/index.html #======================= Winlogbeat specific options ========================== -# The registry file is where Winlogbeat persists its state so that the beat -# can resume after shutdown or an outage. The default is .winlogbeat.yml -# in the directory in which it was started. -#winlogbeat.registry_file: .winlogbeat.yml - -# Diagnostic metrics that can retrieved through a web interface if a -# bindaddress value (host:port) is specified. The web address will be -# http:///debug/vars -#winlogbeat.metrics: -# bindaddress: 'localhost:8123' - # event_logs specifies a list of event logs to monitor as well as any # accompanying options. The YAML data type of event_logs is a list of # dictionaries. diff --git a/winlogbeat/winlogbeat.full.yml b/winlogbeat/winlogbeat.full.yml new file mode 100644 index 000000000000..fdcc8fb75de1 --- /dev/null +++ b/winlogbeat/winlogbeat.full.yml @@ -0,0 +1,299 @@ +########################## Winlogbeat Configuration ########################### + +# This file is a full configuration example documenting all non-deprecated +# options in comments. For a shorter configuration example, that contains only +# the most common options, please see winlogbeat.yml in the same directory. +# +# You can find the full configuration reference here: +# https://www.elastic.co/guide/en/beats/winlogbeat/index.html + +#======================= Winlogbeat specific options ========================== + +# The registry file is where Winlogbeat persists its state so that the beat +# can resume after shutdown or an outage. The default is .winlogbeat.yml +# in the directory in which it was started. +#winlogbeat.registry_file: .winlogbeat.yml + +# Diagnostic metrics that can retrieved through a web interface if a +# bindaddress value (host:port) is specified. The web address will be +# http:///debug/vars +#winlogbeat.metrics: +# bindaddress: 'localhost:8123' + +# event_logs specifies a list of event logs to monitor as well as any +# accompanying options. The YAML data type of event_logs is a list of +# dictionaries. +# +# The supported keys are name (required), tags, fields, fields_under_root, +# ignore_older, level, event_id, provider, and include_xml. Please visit the +# documentation for the complete details of each option. +# https://go.es.io/WinlogbeatConfig +winlogbeat.event_logs: + - name: Application + ignore_older: 72h + - name: Security + - name: System + +#================================ General ===================================== + +# The name of the shipper that publishes the network data. It can be used to group +# all the transactions sent by a single shipper in the web interface. +# If this options is not defined, the hostname is used. +#name: + +# The tags of the shipper are included in their own field with each +# transaction published. Tags make it easy to group servers by different +# logical properties. +#tags: ["service-X", "web-tier"] + +# Optional fields that you can specify to add additional information to the +# output. Fields can be scalar values, arrays, dictionaries, or any nested +# combination of these. +#fields: +# env: staging + +# If this option is set to true, the custom fields are stored as top-level +# fields in the output document instead of being grouped under a fields +# sub-dictionary. Default is false. +#fields_under_root: false + +# Uncomment the following if you want to ignore transactions created +# by the server on which the shipper is installed. This option is useful +# to remove duplicates if shippers are installed on multiple servers. +#ignore_outgoing: true + +# How often (in seconds) shippers are publishing their IPs to the topology map. +# The default is 10 seconds. +#refresh_topology_freq: 10 + +# Expiration time (in seconds) of the IPs published by a shipper to the topology map. +# All the IPs will be deleted afterwards. Note, that the value must be higher than +# refresh_topology_freq. The default is 15 seconds. +#topology_expire: 15 + +# Internal queue size for single events in processing pipeline +#queue_size: 1000 + +# Sets the maximum number of CPUs that can be executing simultaneously. The +# default is the number of logical CPUs available in the system. +#max_procs: + +#================================ Outputs ===================================== + +# Configure what outputs to use when sending the data collected by the beat. +# Multiple outputs may be used. + +#-------------------------- Elasticsearch output ------------------------------ +output.elasticsearch: + # Array of hosts to connect to. + # Scheme and port can be left out and will be set to the default (http and 9200) + # In case you specify and additional path, the scheme is required: http://localhost:9200/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 + hosts: ["localhost:9200"] + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "admin" + #password: "s3cr3t" + + # Dictionary of HTTP parameters to pass within the url with index operations. + #parameters: + #param1: value1 + #param2: value2 + + # Number of workers per Elasticsearch host. + #worker: 1 + + # Optional index name. The default is "winlogbeat" and generates + # [winlogbeat-]YYYY.MM.DD keys. + #index: "winlogbeat" + + # Optional HTTP Path + #path: "/elasticsearch" + + # Proxy server url + #proxy_url: http://proxy:3128 + + # The number of times a particular Elasticsearch index operation is attempted. If + # the indexing operation doesn't succeed after this many retries, the events are + # dropped. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Elasticsearch bulk API index request. + # The default is 50. + #bulk_max_size: 50 + + # Configure http request timeout before failing an request to Elasticsearch. + #timeout: 90 + + # The number of seconds to wait for new events between two bulk API index requests. + # If `bulk_max_size` is reached before this interval expires, addition bulk index + # requests are made. + #flush_interval: 1 + + # Boolean that sets if the topology is kept in Elasticsearch. The default is + # false. This option makes sense only for Packetbeat. + #save_topology: false + + # The time to live in seconds for the topology information that is stored in + # Elasticsearch. The default is 15 seconds. + #topology_expire: 15 + + # A template is used to set the mapping in Elasticsearch + # By default template loading is enabled and the template is loaded. + # These settings can be adjusted to load your own template or overwrite existing ones + + # Template name. By default the template name is winlogbeat. + template.name: "winlogbeat" + + # Path to template file + template.path: "winlogbeat.template.json" + + # Overwrite existing template + template.overwrite: false + + # TLS configuration. By default is off. + # List of root certificates for HTTPS server verifications + #tls.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for TLS client authentication + #tls.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #tls.certificate_key: "/etc/pki/client/cert.key" + + # Controls whether the client verifies server certificates and host name. + # If insecure is set to true, all server host names and certificates will be + # accepted. In this mode TLS based connections are susceptible to + # man-in-the-middle attacks. Use only for testing. + #tls.insecure: true + + # Configure cipher suites to be used for TLS connections + #tls.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #tls.curve_types: [] + + # Configure minimum TLS version allowed for connection to logstash + #tls.min_version: 1.0 + + # Configure maximum TLS version allowed for connection to logstash + #tls.max_version: 1.2 + + +#----------------------------- Logstash output -------------------------------- +#output.logstash: + # The Logstash hosts + #hosts: ["localhost:5044"] + + # Number of workers per Logstash host. + #worker: 1 + + # Set gzip compression level. + #compression_level: 3 + + # Optional load balance the events between the Logstash hosts + #loadbalance: true + + # Optional index name. The default index name is set to name of the beat + # in all lowercase. + #index: winlogbeat + + # SOCKS5 proxy server URL + #proxy_url: socks5://user:password@socks5-server:2233 + + # Resolve names locally when using a proxy server. Defaults to false. + #proxy_use_local_resolver: false + + # Optional TLS. By default is off. + # List of root certificates for HTTPS server verifications + #tls.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for TLS client authentication + #tls.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #tls.certificate_key: "/etc/pki/client/cert.key" + + # Controls whether the client verifies server certificates and host name. + # If insecure is set to true, all server host names and certificates will be + # accepted. In this mode TLS based connections are susceptible to + # man-in-the-middle attacks. Use only for testing. + #tls.insecure: true + + # Configure cipher suites to be used for TLS connections + #tls.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #tls.curve_types: [] + + +#------------------------------- File output ---------------------------------- +#output.file: + # Path to the directory where to save the generated files. The option is mandatory. + #path: "/tmp/winlogbeat" + + # Name of the generated files. The default is `winlogbeat` and it generates files: `winlogbeat`, `winlogbeat.1`, `winlogbeat.2`, etc. + #filename: winlogbeat + + # Maximum size in kilobytes of each file. When this size is reached, the files are + # rotated. The default value is 10240 kB. + #rotate_every_kb: 10000 + + # Maximum number of files under path. When this number of files is reached, the + # oldest file is deleted and the rest are shifted from last to first. The default + # is 7 files. + #number_of_files: 7 + + +#----------------------------- Console output --------------------------------- +#output.console: + # Pretty print json event + #pretty: false + +#================================ Logging ===================================== +# There are three options for the log output: syslog, file, stderr. +# Under Windows systems, the log files are per default sent to the file output, +# under all other system per default to syslog. + +# Sets log level. The default log level is error. +# Available log levels are: critical, error, warning, info, debug +#logging.level: error + +# Enable debug output for selected components. To enable all selectors use ["*"] +# Other available selectors are beat, publish, service +# Multiple selectors can be chained. +#logging.selectors: [ ] + +# Send all logging output to syslog. The default is false. +#logging.to_syslog: true + +# Logging to rotating files files. Set logging.to_files to false to disable logging to +# files. +logging.to_files: true +logging.files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/mybeat + + # The name of the files where the logs are written to. + #name: mybeat + + # Configure log file size limit. If limit is reached, log file will be + # automatically rotated + #rotateeverybytes: 10485760 # = 10MB + + # Number of rotated log files to keep. Oldest files will be deleted first. + #keepfiles: 7 + +#================================ Filters ===================================== + +# This section defines a list of filtering rules that are applied one by one starting with the +# exported event: +# event -> filter1 -> event1 -> filter2 ->event2 ... +# Supported actions: drop_fields, drop_event, include_fields +#filters: +# - drop_fields: +# equals: +# status: OK +# fields: [ ] diff --git a/winlogbeat/winlogbeat.short.yml b/winlogbeat/winlogbeat.short.yml deleted file mode 100644 index 360f3a890023..000000000000 --- a/winlogbeat/winlogbeat.short.yml +++ /dev/null @@ -1,79 +0,0 @@ -###################### Winlogbeat Configuration Example ########################## - -# This file is an example configuration file highlighting only the most common -# options. The winlogbeat.yml file from the same directory contains all the -# supported options with more comments. You can use it as a reference. -# -# You can find the full configuration reference here: -# https://www.elastic.co/guide/en/beats/winlogbeat/index.html - -#======================= Winlogbeat specific options ========================== - -# event_logs specifies a list of event logs to monitor as well as any -# accompanying options. The YAML data type of event_logs is a list of -# dictionaries. -# -# The supported keys are name (required), tags, fields, fields_under_root, -# ignore_older, level, event_id, provider, and include_xml. Please visit the -# documentation for the complete details of each option. -# https://go.es.io/WinlogbeatConfig -winlogbeat.event_logs: - - name: Application - ignore_older: 72h - - name: Security - - name: System - -#================================ General ===================================== - -# The name of the shipper that publishes the network data. It can be used to group -# all the transactions sent by a single shipper in the web interface. -#name: - -# The tags of the shipper are included in their own field with each -# transaction published. -#tags: ["service-X", "web-tier"] - -# Optional fields that you can specify to add additional information to the -# output. -#fields: -# env: staging - -#================================ Outputs ===================================== - -# Configure what outputs to use when sending the data collected by the beat. -# Multiple outputs may be used. - -#-------------------------- Elasticsearch output ------------------------------ -output.elasticsearch: - # Array of hosts to connect to. - hosts: ["localhost:9200"] - - # Template name. By default the template name is winlogbeat. - template.name: "winlogbeat" - - # Path to template file - template.path: "winlogbeat.template.json" - - # Overwrite existing template - template.overwrite: false - -#----------------------------- Logstash output -------------------------------- -#output.logstash: - # The Logstash hosts - #hosts: ["localhost:5044"] - - # Optional TLS. By default is off. - # List of root certificates for HTTPS server verifications - #tls.certificate_authorities: ["/etc/pki/root/ca.pem"] - - # Certificate for TLS client authentication - #tls.certificate: "/etc/pki/client/cert.pem" - - # Client Certificate Key - #tls.certificate_key: "/etc/pki/client/cert.key" - -#================================ Logging ===================================== - -# Sets log level. The default log level is error. -# Available log levels are: critical, error, warning, info, debug -#logging.level: error diff --git a/winlogbeat/winlogbeat.yml b/winlogbeat/winlogbeat.yml index 3c57c38db15e..1d77e40bb4b4 100644 --- a/winlogbeat/winlogbeat.yml +++ b/winlogbeat/winlogbeat.yml @@ -1,25 +1,14 @@ -########################## Winlogbeat Configuration ########################### +###################### Winlogbeat Configuration Example ########################## -# This file is a full configuration example documenting all non-deprecated -# options in comments. For a shorter configuration example, that contains only -# the most common options, please see winlogbeat.short.yml in the same directory. +# This file is an example configuration file highlighting only the most common +# options. The winlogbeat.full.yml file from the same directory contains all the +# supported options with more comments. You can use it as a reference. # # You can find the full configuration reference here: # https://www.elastic.co/guide/en/beats/winlogbeat/index.html #======================= Winlogbeat specific options ========================== -# The registry file is where Winlogbeat persists its state so that the beat -# can resume after shutdown or an outage. The default is .winlogbeat.yml -# in the directory in which it was started. -#winlogbeat.registry_file: .winlogbeat.yml - -# Diagnostic metrics that can retrieved through a web interface if a -# bindaddress value (host:port) is specified. The web address will be -# http:///debug/vars -#winlogbeat.metrics: -# bindaddress: 'localhost:8123' - # event_logs specifies a list of event logs to monitor as well as any # accompanying options. The YAML data type of event_logs is a list of # dictionaries. @@ -38,46 +27,17 @@ winlogbeat.event_logs: # The name of the shipper that publishes the network data. It can be used to group # all the transactions sent by a single shipper in the web interface. -# If this options is not defined, the hostname is used. #name: # The tags of the shipper are included in their own field with each -# transaction published. Tags make it easy to group servers by different -# logical properties. +# transaction published. #tags: ["service-X", "web-tier"] # Optional fields that you can specify to add additional information to the -# output. Fields can be scalar values, arrays, dictionaries, or any nested -# combination of these. +# output. #fields: # env: staging -# If this option is set to true, the custom fields are stored as top-level -# fields in the output document instead of being grouped under a fields -# sub-dictionary. Default is false. -#fields_under_root: false - -# Uncomment the following if you want to ignore transactions created -# by the server on which the shipper is installed. This option is useful -# to remove duplicates if shippers are installed on multiple servers. -#ignore_outgoing: true - -# How often (in seconds) shippers are publishing their IPs to the topology map. -# The default is 10 seconds. -#refresh_topology_freq: 10 - -# Expiration time (in seconds) of the IPs published by a shipper to the topology map. -# All the IPs will be deleted afterwards. Note, that the value must be higher than -# refresh_topology_freq. The default is 15 seconds. -#topology_expire: 15 - -# Internal queue size for single events in processing pipeline -#queue_size: 1000 - -# Sets the maximum number of CPUs that can be executing simultaneously. The -# default is the number of logical CPUs available in the system. -#max_procs: - #================================ Outputs ===================================== # Configure what outputs to use when sending the data collected by the beat. @@ -86,63 +46,8 @@ winlogbeat.event_logs: #-------------------------- Elasticsearch output ------------------------------ output.elasticsearch: # Array of hosts to connect to. - # Scheme and port can be left out and will be set to the default (http and 9200) - # In case you specify and additional path, the scheme is required: http://localhost:9200/path - # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 hosts: ["localhost:9200"] - # Optional protocol and basic auth credentials. - #protocol: "https" - #username: "admin" - #password: "s3cr3t" - - # Dictionary of HTTP parameters to pass within the url with index operations. - #parameters: - #param1: value1 - #param2: value2 - - # Number of workers per Elasticsearch host. - #worker: 1 - - # Optional index name. The default is "winlogbeat" and generates - # [winlogbeat-]YYYY.MM.DD keys. - #index: "winlogbeat" - - # Optional HTTP Path - #path: "/elasticsearch" - - # Proxy server url - #proxy_url: http://proxy:3128 - - # The number of times a particular Elasticsearch index operation is attempted. If - # the indexing operation doesn't succeed after this many retries, the events are - # dropped. The default is 3. - #max_retries: 3 - - # The maximum number of events to bulk in a single Elasticsearch bulk API index request. - # The default is 50. - #bulk_max_size: 50 - - # Configure http request timeout before failing an request to Elasticsearch. - #timeout: 90 - - # The number of seconds to wait for new events between two bulk API index requests. - # If `bulk_max_size` is reached before this interval expires, addition bulk index - # requests are made. - #flush_interval: 1 - - # Boolean that sets if the topology is kept in Elasticsearch. The default is - # false. This option makes sense only for Packetbeat. - #save_topology: false - - # The time to live in seconds for the topology information that is stored in - # Elasticsearch. The default is 15 seconds. - #topology_expire: 15 - - # A template is used to set the mapping in Elasticsearch - # By default template loading is enabled and the template is loaded. - # These settings can be adjusted to load your own template or overwrite existing ones - # Template name. By default the template name is winlogbeat. template.name: "winlogbeat" @@ -152,59 +57,11 @@ output.elasticsearch: # Overwrite existing template template.overwrite: false - # TLS configuration. By default is off. - # List of root certificates for HTTPS server verifications - #tls.certificate_authorities: ["/etc/pki/root/ca.pem"] - - # Certificate for TLS client authentication - #tls.certificate: "/etc/pki/client/cert.pem" - - # Client Certificate Key - #tls.certificate_key: "/etc/pki/client/cert.key" - - # Controls whether the client verifies server certificates and host name. - # If insecure is set to true, all server host names and certificates will be - # accepted. In this mode TLS based connections are susceptible to - # man-in-the-middle attacks. Use only for testing. - #tls.insecure: true - - # Configure cipher suites to be used for TLS connections - #tls.cipher_suites: [] - - # Configure curve types for ECDHE based cipher suites - #tls.curve_types: [] - - # Configure minimum TLS version allowed for connection to logstash - #tls.min_version: 1.0 - - # Configure maximum TLS version allowed for connection to logstash - #tls.max_version: 1.2 - - #----------------------------- Logstash output -------------------------------- #output.logstash: # The Logstash hosts #hosts: ["localhost:5044"] - # Number of workers per Logstash host. - #worker: 1 - - # Set gzip compression level. - #compression_level: 3 - - # Optional load balance the events between the Logstash hosts - #loadbalance: true - - # Optional index name. The default index name is set to name of the beat - # in all lowercase. - #index: winlogbeat - - # SOCKS5 proxy server URL - #proxy_url: socks5://user:password@socks5-server:2233 - - # Resolve names locally when using a proxy server. Defaults to false. - #proxy_use_local_resolver: false - # Optional TLS. By default is off. # List of root certificates for HTTPS server verifications #tls.certificate_authorities: ["/etc/pki/root/ca.pem"] @@ -215,85 +72,8 @@ output.elasticsearch: # Client Certificate Key #tls.certificate_key: "/etc/pki/client/cert.key" - # Controls whether the client verifies server certificates and host name. - # If insecure is set to true, all server host names and certificates will be - # accepted. In this mode TLS based connections are susceptible to - # man-in-the-middle attacks. Use only for testing. - #tls.insecure: true - - # Configure cipher suites to be used for TLS connections - #tls.cipher_suites: [] - - # Configure curve types for ECDHE based cipher suites - #tls.curve_types: [] - - -#------------------------------- File output ---------------------------------- -#output.file: - # Path to the directory where to save the generated files. The option is mandatory. - #path: "/tmp/winlogbeat" - - # Name of the generated files. The default is `winlogbeat` and it generates files: `winlogbeat`, `winlogbeat.1`, `winlogbeat.2`, etc. - #filename: winlogbeat - - # Maximum size in kilobytes of each file. When this size is reached, the files are - # rotated. The default value is 10240 kB. - #rotate_every_kb: 10000 - - # Maximum number of files under path. When this number of files is reached, the - # oldest file is deleted and the rest are shifted from last to first. The default - # is 7 files. - #number_of_files: 7 - - -#----------------------------- Console output --------------------------------- -#output.console: - # Pretty print json event - #pretty: false - #================================ Logging ===================================== -# There are three options for the log output: syslog, file, stderr. -# Under Windows systems, the log files are per default sent to the file output, -# under all other system per default to syslog. # Sets log level. The default log level is error. # Available log levels are: critical, error, warning, info, debug #logging.level: error - -# Enable debug output for selected components. To enable all selectors use ["*"] -# Other available selectors are beat, publish, service -# Multiple selectors can be chained. -#logging.selectors: [ ] - -# Send all logging output to syslog. The default is false. -#logging.to_syslog: true - -# Logging to rotating files files. Set logging.to_files to false to disable logging to -# files. -logging.to_files: true -logging.files: - # Configure the path where the logs are written. The default is the logs directory - # under the home path (the binary location). - #path: /var/log/mybeat - - # The name of the files where the logs are written to. - #name: mybeat - - # Configure log file size limit. If limit is reached, log file will be - # automatically rotated - #rotateeverybytes: 10485760 # = 10MB - - # Number of rotated log files to keep. Oldest files will be deleted first. - #keepfiles: 7 - -#================================ Filters ===================================== - -# This section defines a list of filtering rules that are applied one by one starting with the -# exported event: -# event -> filter1 -> event1 -> filter2 ->event2 ... -# Supported actions: drop_fields, drop_event, include_fields -#filters: -# - drop_fields: -# equals: -# status: OK -# fields: [ ]