From 48c5c1772dd9e5a4ee7bdc8e4641eaac35fcd8a2 Mon Sep 17 00:00:00 2001
From: Andrew Kroh <andrew.kroh@elastic.co>
Date: Wed, 21 Nov 2018 14:15:34 -0500
Subject: [PATCH 1/9] Build, test, package x-pack/metricbeat with mage

This enables the use of mage to build, test, and package x-pack/metricbeat. This adds it to
the test matrix on Travis CI as well.

But it does not modify the top-level metricbeat build to stop producing x-pack artifacts. This
cut-over needs still needs to be done.

(cherry picked from commit d6b1ba19ddae4ed55fef65058a99c9215c58b587)

# Conflicts:
#	metricbeat/Dockerfile
---
 .travis.yml                    |   4 +
 Makefile                       |   4 +-
 dev-tools/mage/integtest.go    |   2 +-
 metricbeat/Dockerfile          |  23 ++-
 x-pack/metricbeat/Makefile     |   3 +
 x-pack/metricbeat/magefile.go  | 270 +++++++++++++++++++++++++++++++++
 x-pack/metricbeat/packages.yml |  71 +++++++++
 7 files changed, 360 insertions(+), 17 deletions(-)
 create mode 100644 x-pack/metricbeat/Makefile
 create mode 100644 x-pack/metricbeat/magefile.go
 create mode 100644 x-pack/metricbeat/packages.yml

diff --git a/.travis.yml b/.travis.yml
index 687d82b6cf2a..60282e2c955e 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -94,6 +94,10 @@ jobs:
       env: TARGETS="-C metricbeat crosscompile"
       go: $GO_VERSION
       stage: test
+    - os: linux
+      env: TARGETS="-C x-pack/metricbeat testsuite"
+      go: $GO_VERSION
+      stage: test
 
     # Packetbeat
     - os: linux
diff --git a/Makefile b/Makefile
index 2cbf3616d2d0..a14df718103a 100644
--- a/Makefile
+++ b/Makefile
@@ -72,7 +72,7 @@ clean-vendor:
 
 .PHONY: check
 check: python-env
-	@$(foreach var,$(PROJECTS) dev-tools x-pack/filebeat,$(MAKE) -C $(var) check || exit 1;)
+	@$(foreach var,$(PROJECTS) dev-tools x-pack/filebeat x-pack/metricbeat,$(MAKE) -C $(var) check || exit 1;)
 	@# Checks also python files which are not part of the beats
 	@$(FIND) -name *.py -exec $(PYTHON_ENV)/bin/autopep8 -d --max-line-length 120  {} \; | (! grep . -q) || (echo "Code differs from autopep8's style" && false)
 	@# Validate that all updates were committed
@@ -107,7 +107,7 @@ misspell:
 
 .PHONY: fmt
 fmt: add-headers python-env
-	@$(foreach var,$(PROJECTS) dev-tools x-pack/filebeat,$(MAKE) -C $(var) fmt || exit 1;)
+	@$(foreach var,$(PROJECTS) dev-tools x-pack/filebeat x-pack/metricbeat,$(MAKE) -C $(var) fmt || exit 1;)
 	@# Cleans also python files which are not part of the beats
 	@$(FIND) -name "*.py" -exec $(PYTHON_ENV)/bin/autopep8 --in-place --max-line-length 120 {} \;
 
diff --git a/dev-tools/mage/integtest.go b/dev-tools/mage/integtest.go
index 5b3f6742cbd0..55f5a25f66ec 100644
--- a/dev-tools/mage/integtest.go
+++ b/dev-tools/mage/integtest.go
@@ -274,7 +274,7 @@ func dockerComposeBuildImages() error {
 		return err
 	}
 
-	args := []string{"build", "--pull", "--force-rm"}
+	args := []string{"-p", dockerComposeProjectName(), "build", "--pull", "--force-rm"}
 	if _, noCache := os.LookupEnv("DOCKER_NOCACHE"); noCache {
 		args = append(args, "--no-cache")
 	}
diff --git a/metricbeat/Dockerfile b/metricbeat/Dockerfile
index d3a67163ee3b..e40961df0423 100644
--- a/metricbeat/Dockerfile
+++ b/metricbeat/Dockerfile
@@ -1,21 +1,16 @@
-FROM golang:1.11.2
-MAINTAINER Nicolas Ruflin <ruflin@elastic.co>
+FROM golang:1.10.3
 
-RUN set -x && \
-    apt-get update && \
-    apt-get install -y --no-install-recommends \
-         netcat python-pip virtualenv && \
-    apt-get clean
+RUN \
+    apt-get update \
+      && apt-get install -y --no-install-recommends \
+         netcat \
+         python-pip \
+         virtualenv \
+      && rm -rf /var/lib/apt/lists/*
 
 RUN pip install --upgrade pip
 RUN pip install --upgrade setuptools
 RUN pip install --upgrade docker-compose==1.21.0
 
-# Setup work environment
-ENV METRICBEAT_PATH /go/src/github.com/elastic/beats/metricbeat
-
-RUN mkdir -p $METRICBEAT_PATH/build/coverage
-WORKDIR $METRICBEAT_PATH
-
-# Add healthcheck for docker/healthcheck metricset to check during testing
+# Add healthcheck for the docker/healthcheck metricset to check during testing.
 HEALTHCHECK CMD exit 0
diff --git a/x-pack/metricbeat/Makefile b/x-pack/metricbeat/Makefile
new file mode 100644
index 000000000000..56633e2b3e59
--- /dev/null
+++ b/x-pack/metricbeat/Makefile
@@ -0,0 +1,3 @@
+ES_BEATS ?= ../..
+
+include $(ES_BEATS)/dev-tools/make/xpack.mk
diff --git a/x-pack/metricbeat/magefile.go b/x-pack/metricbeat/magefile.go
new file mode 100644
index 000000000000..bb61140279b9
--- /dev/null
+++ b/x-pack/metricbeat/magefile.go
@@ -0,0 +1,270 @@
+// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+// or more contributor license agreements. Licensed under the Elastic License;
+// you may not use this file except in compliance with the Elastic License.
+
+// +build mage
+
+package main
+
+import (
+	"context"
+	"fmt"
+	"os"
+	"path/filepath"
+	"regexp"
+	"strings"
+	"time"
+
+	"github.com/magefile/mage/mg"
+
+	"github.com/elastic/beats/dev-tools/mage"
+)
+
+func init() {
+	mage.BeatDescription = "Metricbeat is a lightweight shipper for metrics."
+	mage.BeatLicense = "Elastic"
+}
+
+// Build builds the Beat binary.
+func Build() error {
+	return mage.Build(mage.DefaultBuildArgs())
+}
+
+// GolangCrossBuild build the Beat binary inside of the golang-builder.
+// Do not use directly, use crossBuild instead.
+func GolangCrossBuild() error {
+	return mage.GolangCrossBuild(mage.DefaultGolangCrossBuildArgs())
+}
+
+// CrossBuild cross-builds the beat for all target platforms.
+func CrossBuild() error {
+	return mage.CrossBuild()
+}
+
+// BuildGoDaemon builds the go-daemon binary (use crossBuildGoDaemon).
+func BuildGoDaemon() error {
+	return mage.BuildGoDaemon()
+}
+
+// CrossBuildGoDaemon cross-builds the go-daemon binary using Docker.
+func CrossBuildGoDaemon() error {
+	return mage.CrossBuildGoDaemon()
+}
+
+// Clean cleans all generated files and build artifacts.
+func Clean() error {
+	return mage.Clean()
+}
+
+// Package packages the Beat for distribution.
+// Use SNAPSHOT=true to build snapshots.
+// Use PLATFORMS to control the target platforms.
+// Use BEAT_VERSION_QUALIFIER to control the version qualifier.
+func Package() {
+	start := time.Now()
+	defer func() { fmt.Println("package ran for", time.Since(start)) }()
+
+	mage.LoadLocalNamedSpec("xpack")
+
+	mg.Deps(Update, prepareModulePackaging)
+	mg.Deps(CrossBuild, CrossBuildGoDaemon)
+	mg.SerialDeps(mage.Package, TestPackages)
+}
+
+// TestPackages tests the generated packages (i.e. file modes, owners, groups).
+func TestPackages() error {
+	return mage.TestPackages(mage.WithModulesD())
+}
+
+// Fields generates a fields.yml and fields.go for each module.
+func Fields() {
+	mg.Deps(fieldsYML, mage.GenerateModuleFieldsGo)
+}
+
+// fieldsYML generates a fields.yml based on filebeat + x-pack/filebeat/modules.
+func fieldsYML() error {
+	return mage.GenerateFieldsYAML(mage.OSSBeatDir("module"), "module")
+}
+
+// Dashboards collects all the dashboards and generates index patterns.
+func Dashboards() error {
+	return mage.KibanaDashboards(mage.OSSBeatDir("module"), "module")
+}
+
+// Config generates both the short and reference configs.
+func Config() {
+	mg.Deps(shortConfig, referenceConfig, createDirModulesD)
+}
+
+// Update is an alias for running fields, dashboards, config.
+func Update() {
+	mg.SerialDeps(Fields, Dashboards, Config, prepareModulePackaging,
+		mage.GenerateModuleIncludeListGo)
+}
+
+// Fmt formats source code and adds file headers.
+func Fmt() {
+	mg.Deps(mage.Format)
+}
+
+// Check runs fmt and update then returns an error if any modifications are found.
+func Check() {
+	mg.SerialDeps(mage.Format, Update, mage.Check)
+}
+
+// IntegTest executes integration tests (it uses Docker to run the tests).
+func IntegTest() {
+	mage.AddIntegTestUsage()
+	defer mage.StopIntegTestEnv()
+	mg.SerialDeps(GoIntegTest, PythonIntegTest)
+}
+
+// UnitTest executes the unit tests.
+func UnitTest() {
+	mg.SerialDeps(GoUnitTest, PythonUnitTest)
+}
+
+// GoUnitTest executes the Go unit tests.
+// Use TEST_COVERAGE=true to enable code coverage profiling.
+// Use RACE_DETECTOR=true to enable the race detector.
+func GoUnitTest(ctx context.Context) error {
+	return mage.GoTest(ctx, mage.DefaultGoTestUnitArgs())
+}
+
+// GoIntegTest executes the Go integration tests.
+// Use TEST_COVERAGE=true to enable code coverage profiling.
+// Use RACE_DETECTOR=true to enable the race detector.
+func GoIntegTest(ctx context.Context) error {
+	return mage.RunIntegTest("goIntegTest", func() error {
+		return mage.GoTest(ctx, mage.DefaultGoTestIntegrationArgs())
+	})
+}
+
+// PythonUnitTest executes the python system tests.
+func PythonUnitTest() error {
+	mg.Deps(mage.BuildSystemTestBinary)
+	return mage.PythonNoseTest(mage.DefaultPythonTestUnitArgs())
+}
+
+// PythonUnitTest executes the python system tests in the integration environment (Docker).
+func PythonIntegTest(ctx context.Context) error {
+	if !mage.IsInIntegTestEnv() {
+		mg.Deps(Fields)
+	}
+	return mage.RunIntegTest("pythonIntegTest", func() error {
+		mg.Deps(mage.BuildSystemTestBinary)
+		return mage.PythonNoseTest(mage.DefaultPythonTestIntegrationArgs())
+	})
+}
+
+// -----------------------------------------------------------------------------
+// Customizations specific to Metricbeat.
+// - Include modules.d directory in packages.
+
+const (
+	dirModulesDGenerated = "build/package/modules.d"
+)
+
+// prepareModulePackaging generates modules and modules.d directories
+// for an x-pack distribution, excluding _meta and test files so that they are
+// not included in packages.
+func prepareModulePackaging() error {
+	mg.Deps(createDirModulesD)
+
+	err := mage.Clean([]string{
+		dirModulesDGenerated,
+	})
+	if err != nil {
+		return err
+	}
+
+	for _, copyAction := range []struct {
+		src, dst string
+	}{
+		{mage.OSSBeatDir("modules.d"), dirModulesDGenerated},
+		{"modules.d", dirModulesDGenerated},
+	} {
+		err := (&mage.CopyTask{
+			Source:  copyAction.src,
+			Dest:    copyAction.dst,
+			Mode:    0644,
+			DirMode: 0755,
+		}).Execute()
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func shortConfig() error {
+	var configParts = []string{
+		mage.OSSBeatDir("_meta/common.yml"),
+		mage.OSSBeatDir("_meta/setup.yml"),
+		"{{ elastic_beats_dir }}/libbeat/_meta/config.yml",
+	}
+
+	for i, f := range configParts {
+		configParts[i] = mage.MustExpand(f)
+	}
+
+	configFile := mage.BeatName + ".yml"
+	mage.MustFileConcat(configFile, 0640, configParts...)
+	mage.MustFindReplace(configFile, regexp.MustCompile("beatname"), mage.BeatName)
+	mage.MustFindReplace(configFile, regexp.MustCompile("beat-index-prefix"), mage.BeatIndexPrefix)
+	return nil
+}
+
+func referenceConfig() error {
+	const modulesConfigYml = "build/config.modules.yml"
+	err := mage.GenerateModuleReferenceConfig(modulesConfigYml, mage.OSSBeatDir("module"), "module")
+	if err != nil {
+		return err
+	}
+	//defer os.Remove(modulesConfigYml)
+
+	var configParts = []string{
+		mage.OSSBeatDir("_meta/common.reference.yml"),
+		modulesConfigYml,
+		"{{ elastic_beats_dir }}/libbeat/_meta/config.reference.yml",
+	}
+
+	for i, f := range configParts {
+		configParts[i] = mage.MustExpand(f)
+	}
+
+	configFile := mage.BeatName + ".reference.yml"
+	mage.MustFileConcat(configFile, 0640, configParts...)
+	mage.MustFindReplace(configFile, regexp.MustCompile("beatname"), mage.BeatName)
+	mage.MustFindReplace(configFile, regexp.MustCompile("beat-index-prefix"), mage.BeatIndexPrefix)
+	return nil
+}
+
+func createDirModulesD() error {
+	if err := os.RemoveAll("modules.d"); err != nil {
+		return err
+	}
+
+	shortConfigs, err := filepath.Glob("module/*/_meta/config.yml")
+	if err != nil {
+		return err
+	}
+
+	for _, f := range shortConfigs {
+		parts := strings.Split(filepath.ToSlash(f), "/")
+		if len(parts) < 2 {
+			continue
+		}
+		moduleName := parts[1]
+
+		cp := mage.CopyTask{
+			Source: f,
+			Dest:   filepath.Join("modules.d", moduleName+".yml.disabled"),
+			Mode:   0644,
+		}
+		if err = cp.Execute(); err != nil {
+			return err
+		}
+	}
+	return nil
+}
diff --git a/x-pack/metricbeat/packages.yml b/x-pack/metricbeat/packages.yml
new file mode 100644
index 000000000000..ad3f16aba8b9
--- /dev/null
+++ b/x-pack/metricbeat/packages.yml
@@ -0,0 +1,71 @@
+specs:
+  xpack:
+  - os: windows
+    types: [zip]
+    spec:
+      <<: *windows_binary_spec
+      <<: *elastic_license_for_binaries
+      files:
+        modules.d:
+          mode: 0644
+          source: build/package/modules.d
+          config: true
+        kibana:
+          source: build/kibana
+          mode: 0644
+
+  - os: darwin
+    types: [tgz]
+    spec:
+      <<: *binary_spec
+      <<: *elastic_license_for_binaries
+      files:
+        modules.d:
+          mode: 0644
+          source: build/package/modules.d
+          config: true
+        kibana:
+          source: build/kibana
+          mode: 0644
+
+  - os: darwin
+    types: [dmg]
+    spec:
+      <<: *macos_beat_pkg_spec
+      <<: *elastic_license_for_macos_pkg
+      files:
+        /etc/{{.BeatName}}/modules.d:
+          mode: 0644
+          source: build/package/modules.d
+          config: true
+        '/Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/kibana':
+          source: build/kibana
+          mode: 0644
+
+  - os: linux
+    types: [tgz]
+    spec:
+      <<: *binary_spec
+      <<: *elastic_license_for_binaries
+      files:
+        modules.d:
+          mode: 0644
+          source: build/package/modules.d
+          config: true
+        kibana:
+          source: build/kibana
+          mode: 0644
+
+  - os: linux
+    types: [deb, rpm]
+    spec:
+      <<: *deb_rpm_spec
+      <<: *elastic_license_for_deb_rpm
+      files:
+        '/etc/{{.BeatName}}/modules.d':
+          mode: 0644
+          source: build/package/modules.d
+          config: true
+        '/usr/share/{{.BeatName}}/kibana':
+          source: build/kibana
+          mode: 0644

From d997a82540e1dd853be743a54b564e4bddd33e07 Mon Sep 17 00:00:00 2001
From: Andrew Kroh <andrew.kroh@elastic.co>
Date: Mon, 26 Nov 2018 09:59:05 -0500
Subject: [PATCH 2/9] Add PROJECTS_XPACK_MAGE to top-level Makefile

(cherry picked from commit fd70623641045e0875bb60d7d7cbb0ae9d6ff48e)
---
 Makefile                | 16 +++++++++++-----
 dev-tools/make/xpack.mk | 25 +++++++++++++++----------
 2 files changed, 26 insertions(+), 15 deletions(-)

diff --git a/Makefile b/Makefile
index a14df718103a..20af5832d580 100644
--- a/Makefile
+++ b/Makefile
@@ -13,12 +13,18 @@ REVIEWDOG_OPTIONS?=-diff "git diff master"
 REVIEWDOG_REPO=github.com/haya14busa/reviewdog/cmd/reviewdog
 XPACK_SUFFIX=x-pack/
 
+# PROJECTS_XPACK_MAGE is a list of Beats whose primary build logic is based in
+# Mage. For compatibility with CI testing these projects support a subset of the
+# makefile targets. After all Beats converge to primarily using Mage we can
+# remove this and treat all sub-projects the same.
+PROJECTS_XPACK_MAGE=x-pack/filebeat x-pack/metricbeat
+
 # Runs complete testsuites (unit, system, integration) for all beats with coverage and race detection.
 # Also it builds the docs and the generators
 
 .PHONY: testsuite
 testsuite:
-	@$(foreach var,$(PROJECTS),$(MAKE) -C $(var) testsuite || exit 1;)
+	@$(foreach var,$(PROJECTS) $(PROJECTS_XPACK_MAGE),$(MAKE) -C $(var) testsuite || exit 1;)
 
 .PHONY: setup-commit-hook
 setup-commit-hook:
@@ -54,13 +60,13 @@ coverage-report:
 
 .PHONY: update
 update: notice
-	@$(foreach var,$(PROJECTS),$(MAKE) -C $(var) update || exit 1;)
+	@$(foreach var,$(PROJECTS) $(PROJECTS_XPACK_MAGE),$(MAKE) -C $(var) update || exit 1;)
 	@$(MAKE) -C deploy/kubernetes all
 
 .PHONY: clean
 clean:
 	@rm -rf build
-	@$(foreach var,$(PROJECTS),$(MAKE) -C $(var) clean || exit 1;)
+	@$(foreach var,$(PROJECTS) $(PROJECTS_XPACK_MAGE),$(MAKE) -C $(var) clean || exit 1;)
 	@$(MAKE) -C generator clean
 	@-mage -clean 2> /dev/null
 
@@ -72,7 +78,7 @@ clean-vendor:
 
 .PHONY: check
 check: python-env
-	@$(foreach var,$(PROJECTS) dev-tools x-pack/filebeat x-pack/metricbeat,$(MAKE) -C $(var) check || exit 1;)
+	@$(foreach var,$(PROJECTS) dev-tools $(PROJECTS_XPACK_MAGE),$(MAKE) -C $(var) check || exit 1;)
 	@# Checks also python files which are not part of the beats
 	@$(FIND) -name *.py -exec $(PYTHON_ENV)/bin/autopep8 -d --max-line-length 120  {} \; | (! grep . -q) || (echo "Code differs from autopep8's style" && false)
 	@# Validate that all updates were committed
@@ -107,7 +113,7 @@ misspell:
 
 .PHONY: fmt
 fmt: add-headers python-env
-	@$(foreach var,$(PROJECTS) dev-tools x-pack/filebeat x-pack/metricbeat,$(MAKE) -C $(var) fmt || exit 1;)
+	@$(foreach var,$(PROJECTS) dev-tools $(PROJECTS_XPACK_MAGE),$(MAKE) -C $(var) fmt || exit 1;)
 	@# Cleans also python files which are not part of the beats
 	@$(FIND) -name "*.py" -exec $(PYTHON_ENV)/bin/autopep8 --in-place --max-line-length 120 {} \;
 
diff --git a/dev-tools/make/xpack.mk b/dev-tools/make/xpack.mk
index b476047dc7de..3210cb8fca01 100644
--- a/dev-tools/make/xpack.mk
+++ b/dev-tools/make/xpack.mk
@@ -4,8 +4,8 @@
 #
 # Variables
 #
-PWD           := $(CURDIR)
 .DEFAULT_GOAL := help
+PWD           := $(CURDIR)
 
 #
 # Includes
@@ -13,8 +13,12 @@ PWD           := $(CURDIR)
 include $(ES_BEATS)/dev-tools/make/mage.mk
 
 #
-# Targets
+# Targets (alphabetically sorted).
 #
+.PHONY: check
+check: mage
+	mage check
+
 .PHONY: clean
 clean: mage
 	mage clean
@@ -23,16 +27,17 @@ clean: mage
 fmt: mage
 	mage fmt
 
-.PHONY: check
-check: mage
-	mage check
-
-.PHONY: testsuite
-testsuite: mage
-	mage update build unitTest integTest
-
 # Default target.
 .PHONY: help
 help:
 	@echo Use mage rather than make. Here are the available mage targets:
 	@mage -l
+
+.PHONY: testsuite
+testsuite: mage
+	mage update build unitTest integTest
+
+.PHONY: update
+update: mage
+	mage update
+

From a6de97aa1854d3196b7b6d1ae6fbfeb7130eedf8 Mon Sep 17 00:00:00 2001
From: Andrew Kroh <andrew.kroh@elastic.co>
Date: Mon, 26 Nov 2018 10:00:11 -0500
Subject: [PATCH 3/9] Add docker to packages.yml for x-pack/metricbeat

(cherry picked from commit 0a39283d07e992452d74e58ee50cdb2bb259efe8)
---
 x-pack/metricbeat/packages.yml | 19 +++++++++++++++++++
 1 file changed, 19 insertions(+)

diff --git a/x-pack/metricbeat/packages.yml b/x-pack/metricbeat/packages.yml
index ad3f16aba8b9..a0cf52675333 100644
--- a/x-pack/metricbeat/packages.yml
+++ b/x-pack/metricbeat/packages.yml
@@ -69,3 +69,22 @@ specs:
         '/usr/share/{{.BeatName}}/kibana':
           source: build/kibana
           mode: 0644
+
+  - os: linux
+    types: [docker]
+    spec:
+      <<: *docker_spec
+      <<: *elastic_docker_spec
+      <<: *elastic_license_for_binaries
+      files:
+        '{{.BeatName}}.yml':
+          source: '../../metricbeat/metricbeat.docker.yml'
+          mode: 0600
+          config: true
+        modules.d:
+          mode: 0644
+          source: build/package/modules.d
+          config: true
+        kibana:
+          source: build/kibana
+          mode: 0644

From cffa5254a91da595967f8667c9648a7f4cb354b8 Mon Sep 17 00:00:00 2001
From: sayden <mariocaster@gmail.com>
Date: Mon, 26 Nov 2018 20:39:45 +0100
Subject: [PATCH 4/9] Added Foo module

---
 x-pack/metricbeat/module/foo/_meta/config.yml |  6 +++
 .../metricbeat/module/foo/_meta/docs.asciidoc |  2 +
 x-pack/metricbeat/module/foo/_meta/fields.yml | 11 ++++
 .../metricbeat/module/foo/bar/_meta/data.json | 19 +++++++
 .../module/foo/bar/_meta/docs.asciidoc        |  1 +
 .../module/foo/bar/_meta/fields.yml           |  9 ++++
 x-pack/metricbeat/module/foo/bar/bar.go       | 52 +++++++++++++++++++
 x-pack/metricbeat/module/foo/doc.go           |  2 +
 8 files changed, 102 insertions(+)
 create mode 100644 x-pack/metricbeat/module/foo/_meta/config.yml
 create mode 100644 x-pack/metricbeat/module/foo/_meta/docs.asciidoc
 create mode 100644 x-pack/metricbeat/module/foo/_meta/fields.yml
 create mode 100644 x-pack/metricbeat/module/foo/bar/_meta/data.json
 create mode 100644 x-pack/metricbeat/module/foo/bar/_meta/docs.asciidoc
 create mode 100644 x-pack/metricbeat/module/foo/bar/_meta/fields.yml
 create mode 100644 x-pack/metricbeat/module/foo/bar/bar.go
 create mode 100644 x-pack/metricbeat/module/foo/doc.go

diff --git a/x-pack/metricbeat/module/foo/_meta/config.yml b/x-pack/metricbeat/module/foo/_meta/config.yml
new file mode 100644
index 000000000000..2e6466ec30ed
--- /dev/null
+++ b/x-pack/metricbeat/module/foo/_meta/config.yml
@@ -0,0 +1,6 @@
+- module: foo
+  metricsets: ["bar"]
+  enabled: false
+  period: 10s
+  hosts: ["localhost"]
+
diff --git a/x-pack/metricbeat/module/foo/_meta/docs.asciidoc b/x-pack/metricbeat/module/foo/_meta/docs.asciidoc
new file mode 100644
index 000000000000..7111494befda
--- /dev/null
+++ b/x-pack/metricbeat/module/foo/_meta/docs.asciidoc
@@ -0,0 +1,2 @@
+This is the foo module.
+
diff --git a/x-pack/metricbeat/module/foo/_meta/fields.yml b/x-pack/metricbeat/module/foo/_meta/fields.yml
new file mode 100644
index 000000000000..b4aa91edc51b
--- /dev/null
+++ b/x-pack/metricbeat/module/foo/_meta/fields.yml
@@ -0,0 +1,11 @@
+- key: foo
+  title: "foo"
+  description: >
+    experimental[]
+
+    foo module
+  fields:
+    - name: foo
+      type: group
+      description: >
+      fields:
diff --git a/x-pack/metricbeat/module/foo/bar/_meta/data.json b/x-pack/metricbeat/module/foo/bar/_meta/data.json
new file mode 100644
index 000000000000..f86e348898cf
--- /dev/null
+++ b/x-pack/metricbeat/module/foo/bar/_meta/data.json
@@ -0,0 +1,19 @@
+{
+    "@timestamp":"2016-05-23T08:05:34.853Z",
+    "beat":{
+        "hostname":"beathost",
+        "name":"beathost"
+    },
+    "metricset":{
+        "host":"localhost",
+        "module":"foo",
+        "name":"bar",
+        "rtt":44269
+    },
+    "foo":{
+        "bar":{
+            "example": "bar"
+        }
+    },
+    "type":"metricsets"
+}
diff --git a/x-pack/metricbeat/module/foo/bar/_meta/docs.asciidoc b/x-pack/metricbeat/module/foo/bar/_meta/docs.asciidoc
new file mode 100644
index 000000000000..d1fc97c6d4c7
--- /dev/null
+++ b/x-pack/metricbeat/module/foo/bar/_meta/docs.asciidoc
@@ -0,0 +1 @@
+This is the bar metricset of the module foo.
diff --git a/x-pack/metricbeat/module/foo/bar/_meta/fields.yml b/x-pack/metricbeat/module/foo/bar/_meta/fields.yml
new file mode 100644
index 000000000000..481f212d17d6
--- /dev/null
+++ b/x-pack/metricbeat/module/foo/bar/_meta/fields.yml
@@ -0,0 +1,9 @@
+- name: bar
+  type: group
+  description: >
+    bar
+  fields:
+    - name: example
+      type: keyword
+      description: >
+        Example field
diff --git a/x-pack/metricbeat/module/foo/bar/bar.go b/x-pack/metricbeat/module/foo/bar/bar.go
new file mode 100644
index 000000000000..25ae8a8d4249
--- /dev/null
+++ b/x-pack/metricbeat/module/foo/bar/bar.go
@@ -0,0 +1,52 @@
+package bar
+
+import (
+	"github.com/elastic/beats/libbeat/common"
+	"github.com/elastic/beats/libbeat/common/cfgwarn"
+	"github.com/elastic/beats/metricbeat/mb"
+)
+
+// init registers the MetricSet with the central registry as soon as the program
+// starts. The New function will be called later to instantiate an instance of
+// the MetricSet for each host defined in the module's configuration. After the
+// MetricSet has been created then Fetch will begin to be called periodically.
+func init() {
+	mb.Registry.MustAddMetricSet("foo", "bar", New)
+}
+
+// MetricSet holds any configuration or state information. It must implement
+// the mb.MetricSet interface. And this is best achieved by embedding
+// mb.BaseMetricSet because it implements all of the required mb.MetricSet
+// interface methods except for Fetch.
+type MetricSet struct {
+	mb.BaseMetricSet
+	counter int
+}
+
+// New creates a new instance of the MetricSet. New is responsible for unpacking
+// any MetricSet specific configuration options if there are any.
+func New(base mb.BaseMetricSet) (mb.MetricSet, error) {
+	cfgwarn.Experimental("The foo bar metricset is experimental.")
+
+	config := struct{}{}
+	if err := base.Module().UnpackConfig(&config); err != nil {
+		return nil, err
+	}
+
+	return &MetricSet{
+		BaseMetricSet: base,
+		counter:       1,
+	}, nil
+}
+
+// Fetch methods implements the data gathering and data conversion to the right
+// format. It publishes the event which is then forwarded to the output. In case
+// of an error set the Error field of mb.Event or simply call report.Error().
+func (m *MetricSet) Fetch(report mb.ReporterV2) {
+	report.Event(mb.Event{
+		MetricSetFields: common.MapStr{
+			"counter": m.counter,
+		},
+	})
+	m.counter++
+}
diff --git a/x-pack/metricbeat/module/foo/doc.go b/x-pack/metricbeat/module/foo/doc.go
new file mode 100644
index 000000000000..91cc5628c37e
--- /dev/null
+++ b/x-pack/metricbeat/module/foo/doc.go
@@ -0,0 +1,2 @@
+// Package foo is a Metricbeat module that contains MetricSets.
+package foo

From fd128f8cdd77a3ed4bfdbc7fc4d59e227a83b745 Mon Sep 17 00:00:00 2001
From: sayden <mariocaster@gmail.com>
Date: Mon, 26 Nov 2018 21:58:26 +0100
Subject: [PATCH 5/9] updates after mage fmt update command

---
 x-pack/metricbeat/module/foo/bar/bar.go |  4 ++++
 x-pack/metricbeat/module/foo/doc.go     |  4 ++++
 x-pack/metricbeat/module/foo/fields.go  | 22 ++++++++++++++++++++++
 3 files changed, 30 insertions(+)
 create mode 100644 x-pack/metricbeat/module/foo/fields.go

diff --git a/x-pack/metricbeat/module/foo/bar/bar.go b/x-pack/metricbeat/module/foo/bar/bar.go
index 25ae8a8d4249..2102f9576447 100644
--- a/x-pack/metricbeat/module/foo/bar/bar.go
+++ b/x-pack/metricbeat/module/foo/bar/bar.go
@@ -1,3 +1,7 @@
+// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+// or more contributor license agreements. Licensed under the Elastic License;
+// you may not use this file except in compliance with the Elastic License.
+
 package bar
 
 import (
diff --git a/x-pack/metricbeat/module/foo/doc.go b/x-pack/metricbeat/module/foo/doc.go
index 91cc5628c37e..bf4f382834a6 100644
--- a/x-pack/metricbeat/module/foo/doc.go
+++ b/x-pack/metricbeat/module/foo/doc.go
@@ -1,2 +1,6 @@
+// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+// or more contributor license agreements. Licensed under the Elastic License;
+// you may not use this file except in compliance with the Elastic License.
+
 // Package foo is a Metricbeat module that contains MetricSets.
 package foo
diff --git a/x-pack/metricbeat/module/foo/fields.go b/x-pack/metricbeat/module/foo/fields.go
new file mode 100644
index 000000000000..744f92643b13
--- /dev/null
+++ b/x-pack/metricbeat/module/foo/fields.go
@@ -0,0 +1,22 @@
+// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+// or more contributor license agreements. Licensed under the Elastic License;
+// you may not use this file except in compliance with the Elastic License.
+
+// Code generated by beats/dev-tools/cmd/asset/asset.go - DO NOT EDIT.
+
+package foo
+
+import (
+	"github.com/elastic/beats/libbeat/asset"
+)
+
+func init() {
+	if err := asset.SetFields("metricbeat", "foo", Asset); err != nil {
+		panic(err)
+	}
+}
+
+// Asset returns asset data
+func Asset() string {
+	return "eJx8j0EOwiAQRfdzip/uewEW7jyFcYEyGFLoEEpje3vTooY21bf8E94LLTqeFawIAdllzwqNFWkIMDzck4vZSa9wIgDgKXJygfus/eVK62ZFEMSMngmwjr0Z1Hpo0evAH/lCniMrPJKM8b0cNLaSWnTT6bsdyX4KC9vn+0gd4kmHuP6npgQ7np+SzO72J7twLsISpVcAAAD//+HhYIk="
+}

From d7eda29f3072fc8f2b3e7f53a22f047d9b769be0 Mon Sep 17 00:00:00 2001
From: sayden <mariocaster@gmail.com>
Date: Mon, 26 Nov 2018 22:12:12 +0100
Subject: [PATCH 6/9] fix a couple of minor things within magefile

---
 x-pack/metricbeat/magefile.go | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/x-pack/metricbeat/magefile.go b/x-pack/metricbeat/magefile.go
index bb61140279b9..6d54031a64a8 100644
--- a/x-pack/metricbeat/magefile.go
+++ b/x-pack/metricbeat/magefile.go
@@ -146,7 +146,7 @@ func PythonUnitTest() error {
 	return mage.PythonNoseTest(mage.DefaultPythonTestUnitArgs())
 }
 
-// PythonUnitTest executes the python system tests in the integration environment (Docker).
+// PythonIntegTest executes the python system tests in the integration environment (Docker).
 func PythonIntegTest(ctx context.Context) error {
 	if !mage.IsInIntegTestEnv() {
 		mg.Deps(Fields)
@@ -221,7 +221,7 @@ func referenceConfig() error {
 	if err != nil {
 		return err
 	}
-	//defer os.Remove(modulesConfigYml)
+	defer os.Remove(modulesConfigYml)
 
 	var configParts = []string{
 		mage.OSSBeatDir("_meta/common.reference.yml"),

From 213fe3188453d12bbe94587a43c3306fdf172a46 Mon Sep 17 00:00:00 2001
From: sayden <mariocaster@gmail.com>
Date: Mon, 26 Nov 2018 23:19:43 +0100
Subject: [PATCH 7/9] Add a near empty docker-compose.yml file

---
 x-pack/metricbeat/docker-compose.yml | 13 +++++++++++++
 1 file changed, 13 insertions(+)
 create mode 100644 x-pack/metricbeat/docker-compose.yml

diff --git a/x-pack/metricbeat/docker-compose.yml b/x-pack/metricbeat/docker-compose.yml
new file mode 100644
index 000000000000..43df0f975236
--- /dev/null
+++ b/x-pack/metricbeat/docker-compose.yml
@@ -0,0 +1,13 @@
+version: '2.1'
+services:
+  beat:
+    build: ../../metricbeat
+    environment:
+      - TEST_ENVIRONMENT=false
+    working_dir: /go/src/github.com/elastic/beats/x-pack/metricbeat
+    volumes:
+      - ${PWD}/../..:/go/src/github.com/elastic/beats/
+      - /var/run/docker.sock:/var/run/docker.sock
+    command: make
+    env_file:
+      - ./module/mssql/_meta/env

From d302141113ae961203b850eaebbaa07023006e85 Mon Sep 17 00:00:00 2001
From: Andrew Kroh <andrew.kroh@elastic.co>
Date: Mon, 26 Nov 2018 20:02:29 -0500
Subject: [PATCH 8/9] Remove mssql env_file

---
 x-pack/metricbeat/docker-compose.yml | 2 --
 1 file changed, 2 deletions(-)

diff --git a/x-pack/metricbeat/docker-compose.yml b/x-pack/metricbeat/docker-compose.yml
index 43df0f975236..324849d7c1d0 100644
--- a/x-pack/metricbeat/docker-compose.yml
+++ b/x-pack/metricbeat/docker-compose.yml
@@ -9,5 +9,3 @@ services:
       - ${PWD}/../..:/go/src/github.com/elastic/beats/
       - /var/run/docker.sock:/var/run/docker.sock
     command: make
-    env_file:
-      - ./module/mssql/_meta/env

From a2b519f7f1a1d06b01c51a3993e1a7552d9ef272 Mon Sep 17 00:00:00 2001
From: sayden <mariocaster@gmail.com>
Date: Tue, 27 Nov 2018 08:14:28 +0100
Subject: [PATCH 9/9] Added metricbeat/metricbeat.reference.yml
 metricbeat/metricbeat.yml and include/list.go

---
 x-pack/metricbeat/include/list.go          |   13 +
 x-pack/metricbeat/metricbeat.reference.yml | 1828 ++++++++++++++++++++
 x-pack/metricbeat/metricbeat.yml           |  148 ++
 3 files changed, 1989 insertions(+)
 create mode 100644 x-pack/metricbeat/include/list.go
 create mode 100644 x-pack/metricbeat/metricbeat.reference.yml
 create mode 100644 x-pack/metricbeat/metricbeat.yml

diff --git a/x-pack/metricbeat/include/list.go b/x-pack/metricbeat/include/list.go
new file mode 100644
index 000000000000..601cf3a12201
--- /dev/null
+++ b/x-pack/metricbeat/include/list.go
@@ -0,0 +1,13 @@
+// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+// or more contributor license agreements. Licensed under the Elastic License;
+// you may not use this file except in compliance with the Elastic License.
+
+// Code generated by beats/dev-tools/module_include_list/module_include_list.go - DO NOT EDIT.
+
+package include
+
+import (
+	// Import modules.
+	_ "github.com/elastic/beats/x-pack/metricbeat/module/foo"
+	_ "github.com/elastic/beats/x-pack/metricbeat/module/foo/bar"
+)
diff --git a/x-pack/metricbeat/metricbeat.reference.yml b/x-pack/metricbeat/metricbeat.reference.yml
new file mode 100644
index 000000000000..dc7162238060
--- /dev/null
+++ b/x-pack/metricbeat/metricbeat.reference.yml
@@ -0,0 +1,1828 @@
+########################## Metricbeat Configuration ###########################
+
+# This file is a full configuration example documenting all non-deprecated
+# options in comments. For a shorter configuration example, that contains only
+# the most common options, please see metricbeat.yml in the same directory.
+#
+# You can find the full configuration reference here:
+# https://www.elastic.co/guide/en/beats/metricbeat/index.html
+
+#============================  Config Reloading ===============================
+
+# Config reloading allows to dynamically load modules. Each file which is
+# monitored must contain one or multiple modules as a list.
+metricbeat.config.modules:
+
+  # Glob pattern for configuration reloading
+  path: ${path.config}/conf.d/*.yml
+
+  # Period on which files under path should be checked for changes
+  reload.period: 10s
+
+  # Set to true to enable config reloading
+  reload.enabled: false
+
+# Maximum amount of time to randomly delay the start of a metricset. Use 0 to
+# disable startup delay.
+metricbeat.max_start_delay: 10s
+
+#============================== Autodiscover ===================================
+
+# Autodiscover allows you to detect changes in the system and spawn new modules
+# as they happen.
+
+#metricbeat.autodiscover:
+  # List of enabled autodiscover providers
+#  providers:
+#    - type: docker
+#      templates:
+#        - condition:
+#            equals.docker.container.image: etcd
+#          config:
+#            - module: etcd
+#              metricsets: ["leader", "self", "store"]
+#              period: 10s
+#              hosts: ["${host}:2379"]
+
+#==========================  Modules configuration =============================
+metricbeat.modules:
+
+#-------------------------------- System Module --------------------------------
+- module: system
+  metricsets:
+    - cpu             # CPU usage
+    - load            # CPU load averages
+    - memory          # Memory usage
+    - network         # Network IO
+    - process         # Per process metrics
+    - process_summary # Process summary
+    - uptime          # System Uptime
+    #- core           # Per CPU core usage
+    #- diskio         # Disk IO
+    #- filesystem     # File system usage for each mountpoint
+    #- fsstat         # File system summary metrics
+    #- raid           # Raid
+    #- socket         # Sockets and connection info (linux only)
+  enabled: true
+  period: 10s
+  processes: ['.*']
+
+  # Configure the metric types that are included by these metricsets.
+  cpu.metrics:  ["percentages"]  # The other available options are normalized_percentages and ticks.
+  core.metrics: ["percentages"]  # The other available option is ticks.
+
+  # A list of filesystem types to ignore. The filesystem metricset will not
+  # collect data from filesystems matching any of the specified types, and
+  # fsstats will not include data from these filesystems in its summary stats.
+  # If not set, types associated to virtual filesystems are automatically
+  # added when this information is available in the system (e.g. the list of
+  # `nodev` types in `/proc/filesystem`).
+  #filesystem.ignore_types: []
+
+  # These options allow you to filter out all processes that are not
+  # in the top N by CPU or memory, in order to reduce the number of documents created.
+  # If both the `by_cpu` and `by_memory` options are used, the union of the two sets
+  # is included.
+  #process.include_top_n:
+
+    # Set to false to disable this feature and include all processes
+    #enabled: true
+
+    # How many processes to include from the top by CPU. The processes are sorted
+    # by the `system.process.cpu.total.pct` field.
+    #by_cpu: 0
+
+    # How many processes to include from the top by memory. The processes are sorted
+    # by the `system.process.memory.rss.bytes` field.
+    #by_memory: 0
+
+  # If false, cmdline of a process is not cached.
+  #process.cmdline.cache.enabled: true
+
+  # Enable collection of cgroup metrics from processes on Linux.
+  #process.cgroups.enabled: true
+
+  # A list of regular expressions used to whitelist environment variables
+  # reported with the process metricset's events. Defaults to empty.
+  #process.env.whitelist: []
+
+  # Include the cumulative CPU tick values with the process metrics. Defaults
+  # to false.
+  #process.include_cpu_ticks: false
+
+  # Raid mount point to monitor
+  #raid.mount_point: '/'
+
+  # Configure reverse DNS lookup on remote IP addresses in the socket metricset.
+  #socket.reverse_lookup.enabled: false
+  #socket.reverse_lookup.success_ttl: 60s
+  #socket.reverse_lookup.failure_ttl: 60s
+
+  # Diskio configurations
+  #diskio.include_devices: []
+
+#------------------------------ Aerospike Module ------------------------------
+- module: aerospike
+  metricsets: ["namespace"]
+  enabled: true
+  period: 10s
+  hosts: ["localhost:3000"]
+
+#-------------------------------- Apache Module --------------------------------
+- module: apache
+  metricsets: ["status"]
+  period: 10s
+  enabled: true
+
+  # Apache hosts
+  hosts: ["http://127.0.0.1"]
+
+  # Path to server status. Default server-status
+  #server_status_path: "server-status"
+
+  # Username of hosts.  Empty by default
+  #username: username
+
+  # Password of hosts. Empty by default
+  #password: password
+
+#--------------------------------- Ceph Module ---------------------------------
+- module: ceph
+  metricsets: ["cluster_disk", "cluster_health", "monitor_health", "pool_disk", "osd_tree"]
+  period: 10s
+  hosts: ["localhost:5000"]
+  enabled: true
+
+#------------------------------ Couchbase Module ------------------------------
+- module: couchbase
+  metricsets: ["bucket", "cluster", "node"]
+  period: 10s
+  hosts: ["localhost:8091"]
+  enabled: true
+
+#-------------------------------- Docker Module --------------------------------
+- module: docker
+  metricsets:
+    - "container"
+    - "cpu"
+    - "diskio"
+    - "healthcheck"
+    - "info"
+    #- "image"
+    - "memory"
+    - "network"
+  hosts: ["unix:///var/run/docker.sock"]
+  period: 10s
+  enabled: true
+
+  # If set to true, replace dots in labels with `_`.
+  #labels.dedot: false
+
+  # If set to true, collects metrics per core.
+  #cpu.cores: true
+
+  # To connect to Docker over TLS you must specify a client and CA certificate.
+  #ssl:
+    #certificate_authority: "/etc/pki/root/ca.pem"
+    #certificate:           "/etc/pki/client/cert.pem"
+    #key:                   "/etc/pki/client/cert.key"
+
+#------------------------------ Dropwizard Module ------------------------------
+- module: dropwizard
+  metricsets: ["collector"]
+  period: 10s
+  hosts: ["localhost:8080"]
+  metrics_path: /metrics/metrics
+  namespace: example
+  enabled: true
+
+#---------------------------- Elasticsearch Module ----------------------------
+- module: elasticsearch
+  metricsets:
+    - node
+    - node_stats
+    #- index
+    #- index_recovery
+    #- index_summary
+    #- shard
+    #- ml_job
+  period: 10s
+  hosts: ["http://localhost:9200"]
+  #username: "elastic"
+  #password: "changeme"
+  #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
+
+  # Set to false to fetch all entries
+  #index_recovery.active_only: true
+
+#------------------------------ Envoyproxy Module ------------------------------
+- module: envoyproxy
+  metricsets: ["server"]
+  period: 10s
+  hosts: ["localhost:9901"]
+
+#--------------------------------- Etcd Module ---------------------------------
+- module: etcd
+  metricsets: ["leader", "self", "store"]
+  period: 10s
+  hosts: ["localhost:2379"]
+
+#--------------------------------- Foo Module ---------------------------------
+- module: foo
+  metricsets: ["bar"]
+  enabled: false
+  period: 10s
+  hosts: ["localhost"]
+
+
+#-------------------------------- Golang Module --------------------------------
+- module: golang
+  #metricsets:
+  #  - expvar
+  #  - heap
+  period: 10s
+  hosts: ["localhost:6060"]
+  heap.path: "/debug/vars"
+  expvar:
+    namespace: "example"
+    path: "/debug/vars"
+
+#------------------------------- Graphite Module -------------------------------
+- module: graphite
+  metricsets: ["server"]
+  enabled: true
+
+  # Host address to listen on. Default localhost.
+  #host: localhost
+
+  # Listening port. Default 2003.
+  #port: 2003
+
+  # Protocol to listen on. This can be udp or tcp. Default udp.
+  #protocol: "udp"
+
+  # Receive buffer size in bytes
+  #receive_buffer_size: 1024
+
+  #templates:
+  #  - filter: "test.*.bash.*" # This would match metrics like test.localhost.bash.stats
+  #    namespace: "test"
+  #    template: ".host.shell.metric*" # test.localhost.bash.stats would become metric=stats and tags host=localhost,shell=bash
+  #    delimiter: "_"
+
+
+#------------------------------- HAProxy Module -------------------------------
+- module: haproxy
+  metricsets: ["info", "stat"]
+  period: 10s
+  hosts: ["tcp://127.0.0.1:14567"]
+  enabled: true
+
+#--------------------------------- HTTP Module ---------------------------------
+- module: http
+  #metricsets:
+  #  - json
+  period: 10s
+  hosts: ["localhost:80"]
+  namespace: "json_namespace"
+  path: "/"
+  #body: ""
+  #method: "GET"
+  #username: "user"
+  #password: "secret"
+  #request.enabled: false
+  #response.enabled: false
+  #json.is_array: false
+  #dedot.enabled: false
+
+- module: http
+  #metricsets:
+  #  - server
+  host: "localhost"
+  port: "8080"
+  enabled: false
+  #paths:
+  #  - path: "/foo"
+  #    namespace: "foo"
+  #    fields: # added to the the response in root. overwrites existing fields
+  #      key: "value"
+
+#------------------------------- Jolokia Module -------------------------------
+- module: jolokia
+  #metricsets: ["jmx"]
+  period: 10s
+  hosts: ["localhost"]
+  namespace: "metrics"
+  #path: "/jolokia/?ignoreErrors=true&canonicalNaming=false"
+  #username: "user"
+  #password: "secret"
+  jmx.mappings:
+    #- mbean: 'java.lang:type=Runtime'
+    #  attributes:
+    #    - attr: Uptime
+    #      field: uptime
+    #- mbean: 'java.lang:type=Memory'
+    #  attributes:
+    #    - attr: HeapMemoryUsage
+    #      field: memory.heap_usage
+    #    - attr: NonHeapMemoryUsage
+    #      field: memory.non_heap_usage
+    # GC Metrics - this depends on what is available on your JVM
+    #- mbean: 'java.lang:type=GarbageCollector,name=ConcurrentMarkSweep'
+    #  attributes:
+    #    - attr: CollectionTime
+    #      field: gc.cms_collection_time
+    #    - attr: CollectionCount
+    #      field: gc.cms_collection_count
+
+  jmx.application:
+  jmx.instance:
+
+#-------------------------------- Kafka Module --------------------------------
+- module: kafka
+  metricsets: ["consumergroup", "partition"]
+  period: 10s
+  hosts: ["localhost:9092"]
+  enabled: true
+
+  #client_id: metricbeat
+  #retries: 3
+  #backoff: 250ms
+
+  # List of Topics to query metadata for. If empty, all topics will be queried.
+  #topics: []
+
+  # Optional SSL. By default is off.
+  # List of root certificates for HTTPS server verifications
+  #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
+
+  # Certificate for SSL client authentication
+  #ssl.certificate: "/etc/pki/client/cert.pem"
+
+  # Client Certificate Key
+  #ssl.key: "/etc/pki/client/cert.key"
+
+  # SASL authentication
+  #username: ""
+  #password: ""
+
+#-------------------------------- Kibana Module --------------------------------
+- module: kibana
+  metricsets: ["status"]
+  period: 10s
+  hosts: ["localhost:5601"]
+  basepath: ""
+  enabled: true
+
+#------------------------------ Kubernetes Module ------------------------------
+# Node metrics, from kubelet:
+- module: kubernetes
+  metricsets:
+    - container
+    - node
+    - pod
+    - system
+    - volume
+  period: 10s
+  hosts: ["localhost:10255"]
+  enabled: true
+  #bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+  #ssl.certificate_authorities:
+  #  - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt
+  #ssl.certificate: "/etc/pki/client/cert.pem"
+  #ssl.key: "/etc/pki/client/cert.key"
+
+  # Enriching parameters:
+  add_metadata: true
+  in_cluster: true
+  # When used outside the cluster:
+  #host: node_name
+  #kube_config: ~/.kube/config
+
+# State metrics from kube-state-metrics service:
+- module: kubernetes
+  enabled: true
+  metricsets:
+    - state_node
+    - state_deployment
+    - state_replicaset
+    - state_statefulset
+    - state_pod
+    - state_container
+  period: 10s
+  hosts: ["kube-state-metrics:8080"]
+
+  # Enriching parameters:
+  add_metadata: true
+  in_cluster: true
+  # When used outside the cluster:
+  #host: node_name
+  #kube_config: ~/.kube/config
+
+# Kubernetes events
+- module: kubernetes
+  enabled: true
+  metricsets:
+    - event
+
+# Kubernetes API server
+- module: kubernetes
+  enabled: true
+  metricsets:
+    - apiserver
+  hosts: ["https://${KUBERNETES_SERVICE_HOST}:${KUBERNETES_SERVICE_PORT}"]
+
+#--------------------------------- Kvm Module ---------------------------------
+- module: kvm
+  metricsets: ["dommemstat"]
+  enabled: true
+  period: 10s
+  hosts: ["unix:///var/run/libvirt/libvirt-sock"]
+  # For remote hosts, setup network access in libvirtd.conf
+  # and use the tcp scheme:
+  # hosts: [ "tcp://<host>:16509" ]
+
+  # Timeout to connect to Libvirt server
+  #timeout: 1s
+
+#------------------------------- Logstash Module -------------------------------
+- module: logstash
+  metricsets: ["node", "node_stats"]
+  enabled: true
+  period: 10s
+  hosts: ["localhost:9600"]
+
+#------------------------------ Memcached Module ------------------------------
+- module: memcached
+  metricsets: ["stats"]
+  period: 10s
+  hosts: ["localhost:11211"]
+  enabled: true
+
+#------------------------------- MongoDB Module -------------------------------
+- module: mongodb
+  metricsets: ["dbstats", "status", "collstats", "metrics", "replstatus"]
+  period: 10s
+  enabled: true
+
+  # The hosts must be passed as MongoDB URLs in the format:
+  # [mongodb://][user:pass@]host[:port].
+  # The username and password can also be set using the respective configuration
+  # options. The credentials in the URL take precedence over the username and
+  # password configuration options.
+  hosts: ["localhost:27017"]
+
+  # Optional SSL. By default is off.
+  #ssl.enabled: true
+
+  # Mode of verification of server certificate ('none' or 'full')
+  #ssl.verification_mode: 'full'
+
+  # List of root certificates for TLS server verifications
+  #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
+
+  # Certificate for SSL client authentication
+  #ssl.certificate: "/etc/pki/client/cert.pem"
+
+  # Client Certificate Key
+  #ssl.key: "/etc/pki/client/cert.key"
+
+  # Username to use when connecting to MongoDB. Empty by default.
+  #username: user
+
+  # Password to use when connecting to MongoDB. Empty by default.
+  #password: pass
+
+#-------------------------------- Munin Module --------------------------------
+- module: munin
+  metricsets: ["node"]
+  enabled: true
+  period: 10s
+  hosts: ["localhost:4949"]
+  node.namespace: node
+
+#-------------------------------- MySQL Module --------------------------------
+- module: mysql
+  metricsets:
+    - "status"
+  #  - "galera_status"
+  period: 10s
+
+  # Host DSN should be defined as "user:pass@tcp(127.0.0.1:3306)/"
+  # The username and password can either be set in the DSN or using the username
+  # and password config options. Those specified in the DSN take precedence.
+  hosts: ["root:secret@tcp(127.0.0.1:3306)/"]
+
+  # Username of hosts. Empty by default.
+  #username: root
+
+  # Password of hosts. Empty by default.
+  #password: secret
+
+  # By setting raw to true, all raw fields from the status metricset will be added to the event.
+  #raw: false
+
+#-------------------------------- Nginx Module --------------------------------
+- module: nginx
+  metricsets: ["stubstatus"]
+  enabled: true
+  period: 10s
+
+  # Nginx hosts
+  hosts: ["http://127.0.0.1"]
+
+  # Path to server status. Default server-status
+  server_status_path: "server-status"
+
+#------------------------------- PHP_FPM Module -------------------------------
+- module: php_fpm
+  metricsets:
+  - pool
+  #- process
+  enabled: true
+  period: 10s
+  status_path: "/status"
+  hosts: ["localhost:8080"]
+
+#------------------------------ PostgreSQL Module ------------------------------
+- module: postgresql
+  enabled: true
+  metricsets:
+    # Stats about every PostgreSQL database
+    - database
+
+    # Stats about the background writer process's activity
+    - bgwriter
+
+    # Stats about every PostgreSQL process
+    - activity
+
+  period: 10s
+
+  # The host must be passed as PostgreSQL URL. Example:
+  # postgres://localhost:5432?sslmode=disable
+  # The available parameters are documented here:
+  # https://godoc.org/github.com/lib/pq#hdr-Connection_String_Parameters
+  hosts: ["postgres://localhost:5432"]
+
+  # Username to use when connecting to PostgreSQL. Empty by default.
+  #username: user
+
+  # Password to use when connecting to PostgreSQL. Empty by default.
+  #password: pass
+
+#------------------------------ Prometheus Module ------------------------------
+- module: prometheus
+  metricsets: ["stats"]
+  enabled: true
+  period: 10s
+  hosts: ["localhost:9090"]
+  #metrics_path: /metrics
+  #namespace: example
+
+- module: prometheus
+  metricsets: ["collector"]
+  enabled: true
+  period: 10s
+  hosts: ["localhost:9090"]
+  #metrics_path: /metrics
+  #namespace: example
+
+  # This can be used for service account based authorization:
+  #  bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+  #ssl.certificate_authorities:
+  #  - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt
+
+#------------------------------- RabbitMQ Module -------------------------------
+- module: rabbitmq
+  metricsets: ["node", "queue", "connection"]
+  enabled: true
+  period: 10s
+  hosts: ["localhost:15672"]
+
+  # Management path prefix, if `management.path_prefix` is set in RabbitMQ
+  # configuration, it has to be set to the same value.
+  #management_path_prefix: ""
+
+  #username: guest
+  #password: guest
+
+#-------------------------------- Redis Module --------------------------------
+- module: redis
+  metricsets: ["info", "keyspace"]
+  enabled: true
+  period: 10s
+
+  # Redis hosts
+  hosts: ["127.0.0.1:6379"]
+
+  # Timeout after which time a metricset should return an error
+  # Timeout is by default defined as period, as a fetch of a metricset
+  # should never take longer then period, as otherwise calls can pile up.
+  #timeout: 1s
+
+  # Optional fields to be added to each event
+  #fields:
+  #  datacenter: west
+
+  # Network type to be used for redis connection. Default: tcp
+  #network: tcp
+
+  # Max number of concurrent connections. Default: 10
+  #maxconn: 10
+
+  # Filters can be used to reduce the number of fields sent.
+  #processors:
+  #  - include_fields:
+  #      fields: ["beat", "metricset", "redis.info.stats"]
+
+  # Redis AUTH password. Empty by default.
+  #password: foobared
+
+#------------------------------- Traefik Module -------------------------------
+- module: traefik
+  metricsets: ["health"]
+  period: 10s
+  hosts: ["localhost:8080"]
+
+#-------------------------------- Uwsgi Module --------------------------------
+- module: uwsgi
+  metricsets: ["status"]
+  enable: true
+  period: 10s
+  hosts: ["tcp://127.0.0.1:9191"]
+
+#------------------------------- VSphere Module -------------------------------
+- module: vsphere
+  enabled: true
+  metricsets: ["datastore", "host", "virtualmachine"]
+  period: 10s
+  hosts: ["https://localhost/sdk"]
+
+  username: "user"
+  password: "password"
+  # If insecure is true, don't verify the server's certificate chain
+  insecure: false
+  # Get custom fields when using virtualmachine metric set. Default false.
+  # get_custom_fields: false
+
+#------------------------------- Windows Module -------------------------------
+- module: windows
+  metricsets: ["perfmon"]
+  enabled: true
+  period: 10s
+  perfmon.ignore_non_existent_counters: true
+  perfmon.counters:
+  #  - instance_label: processor.name
+  #    instance_name: total
+  #    measurement_label: processor.time.total.pct
+  #    query: '\Processor Information(_Total)\% Processor Time'
+
+- module: windows
+  metricsets: ["service"]
+  enabled: true
+  period: 60s
+
+#------------------------------ ZooKeeper Module ------------------------------
+- module: zookeeper
+  enabled: true
+  metricsets: ["mntr"]
+  period: 10s
+  hosts: ["localhost:2181"]
+
+
+
+#================================ General ======================================
+
+# The name of the shipper that publishes the network data. It can be used to group
+# all the transactions sent by a single shipper in the web interface.
+# If this options is not defined, the hostname is used.
+#name:
+
+# The tags of the shipper are included in their own field with each
+# transaction published. Tags make it easy to group servers by different
+# logical properties.
+#tags: ["service-X", "web-tier"]
+
+# Optional fields that you can specify to add additional information to the
+# output. Fields can be scalar values, arrays, dictionaries, or any nested
+# combination of these.
+#fields:
+#  env: staging
+
+# If this option is set to true, the custom fields are stored as top-level
+# fields in the output document instead of being grouped under a fields
+# sub-dictionary. Default is false.
+#fields_under_root: false
+
+# Internal queue configuration for buffering events to be published.
+#queue:
+  # Queue type by name (default 'mem')
+  # The memory queue will present all available events (up to the outputs
+  # bulk_max_size) to the output, the moment the output is ready to server
+  # another batch of events.
+  #mem:
+    # Max number of events the queue can buffer.
+    #events: 4096
+
+    # Hints the minimum number of events stored in the queue,
+    # before providing a batch of events to the outputs.
+    # The default value is set to 2048.
+    # A value of 0 ensures events are immediately available
+    # to be sent to the outputs.
+    #flush.min_events: 2048
+
+    # Maximum duration after which events are available to the outputs,
+    # if the number of events stored in the queue is < min_flush_events.
+    #flush.timeout: 1s
+
+  # The spool queue will store events in a local spool file, before
+  # forwarding the events to the outputs.
+  #
+  # Beta: spooling to disk is currently a beta feature. Use with care.
+  #
+  # The spool file is a circular buffer, which blocks once the file/buffer is full.
+  # Events are put into a write buffer and flushed once the write buffer
+  # is full or the flush_timeout is triggered.
+  # Once ACKed by the output, events are removed immediately from the queue,
+  # making space for new events to be persisted.
+  #spool:
+    # The file namespace configures the file path and the file creation settings.
+    # Once the file exists, the `size`, `page_size` and `prealloc` settings
+    # will have no more effect.
+    #file:
+      # Location of spool file. The default value is ${path.data}/spool.dat.
+      #path: "${path.data}/spool.dat"
+
+      # Configure file permissions if file is created. The default value is 0600.
+      #permissions: 0600
+
+      # File size hint. The spool blocks, once this limit is reached. The default value is 100 MiB.
+      #size: 100MiB
+
+      # The files page size. A file is split into multiple pages of the same size. The default value is 4KiB.
+      #page_size: 4KiB
+
+      # If prealloc is set, the required space for the file is reserved using
+      # truncate. The default value is true.
+      #prealloc: true
+
+    # Spool writer settings
+    # Events are serialized into a write buffer. The write buffer is flushed if:
+    # - The buffer limit has been reached.
+    # - The configured limit of buffered events is reached.
+    # - The flush timeout is triggered.
+    #write:
+      # Sets the write buffer size.
+      #buffer_size: 1MiB
+
+      # Maximum duration after which events are flushed, if the write buffer
+      # is not full yet. The default value is 1s.
+      #flush.timeout: 1s
+
+      # Number of maximum buffered events. The write buffer is flushed once the
+      # limit is reached.
+      #flush.events: 16384
+
+      # Configure the on-disk event encoding. The encoding can be changed
+      # between restarts.
+      # Valid encodings are: json, ubjson, and cbor.
+      #codec: cbor
+    #read:
+      # Reader flush timeout, waiting for more events to become available, so
+      # to fill a complete batch, as required by the outputs.
+      # If flush_timeout is 0, all available events are forwarded to the
+      # outputs immediately.
+      # The default value is 0s.
+      #flush.timeout: 0s
+
+# Sets the maximum number of CPUs that can be executing simultaneously. The
+# default is the number of logical CPUs available in the system.
+#max_procs:
+
+#================================ Processors ===================================
+
+# Processors are used to reduce the number of fields in the exported event or to
+# enhance the event with external metadata. This section defines a list of
+# processors that are applied one by one and the first one receives the initial
+# event:
+#
+#   event -> filter1 -> event1 -> filter2 ->event2 ...
+#
+# The supported processors are drop_fields, drop_event, include_fields, 
+# decode_json_fields, and add_cloud_metadata.
+#
+# For example, you can use the following processors to keep the fields that
+# contain CPU load percentages, but remove the fields that contain CPU ticks
+# values:
+#
+#processors:
+#- include_fields:
+#    fields: ["cpu"]
+#- drop_fields:
+#    fields: ["cpu.user", "cpu.system"]
+#
+# The following example drops the events that have the HTTP response code 200:
+#
+#processors:
+#- drop_event:
+#    when:
+#       equals:
+#           http.code: 200
+#
+# The following example renames the field a to b:
+#
+#processors:
+#- rename:
+#    fields:
+#       - from: "a"
+#         to: "b"
+#
+# The following example tokenizes the string into fields:
+#
+#processors:
+#- dissect:
+#    tokenizer: "%{key1} - %{key2}"
+#    field: "message"
+#    target_prefix: "dissect"
+#
+# The following example enriches each event with metadata from the cloud
+# provider about the host machine. It works on EC2, GCE, DigitalOcean,
+# Tencent Cloud, and Alibaba Cloud.
+#
+#processors:
+#- add_cloud_metadata: ~
+#
+# The following example enriches each event with the machine's local time zone
+# offset from UTC.
+#
+#processors:
+#- add_locale:
+#    format: offset
+#
+# The following example enriches each event with docker metadata, it matches
+# given fields to an existing container id and adds info from that container:
+#
+#processors:
+#- add_docker_metadata:
+#    host: "unix:///var/run/docker.sock"
+#    match_fields: ["system.process.cgroup.id"]
+#    match_pids: ["process.pid", "process.ppid"]
+#    match_source: true
+#    match_source_index: 4
+#    match_short_id: false
+#    cleanup_timeout: 60
+#    # To connect to Docker over TLS you must specify a client and CA certificate.
+#    #ssl:
+#    #  certificate_authority: "/etc/pki/root/ca.pem"
+#    #  certificate:           "/etc/pki/client/cert.pem"
+#    #  key:                   "/etc/pki/client/cert.key"
+#
+# The following example enriches each event with docker metadata, it matches
+# container id from log path available in `source` field (by default it expects
+# it to be /var/lib/docker/containers/*/*.log).
+#
+#processors:
+#- add_docker_metadata: ~
+#
+# The following example enriches each event with host metadata.
+#
+#processors:
+#- add_host_metadata:
+#   netinfo.enabled: false
+#
+# The following example enriches each event with process metadata using
+# process IDs included in the event.
+#
+#processors:
+#- add_process_metadata:
+#    match_pids: ["system.process.ppid"]
+#    target: system.process.parent
+#
+# The following example decodes fields containing JSON strings 
+# and replaces the strings with valid JSON objects.
+#
+#processors:
+#- decode_json_fields:
+#    fields: ["field1", "field2", ...]
+#    process_array: false
+#    max_depth: 1
+#    target: ""
+#    overwrite_keys: false
+
+#============================= Elastic Cloud ==================================
+
+# These settings simplify using metricbeat with the Elastic Cloud (https://cloud.elastic.co/).
+
+# The cloud.id setting overwrites the `output.elasticsearch.hosts` and
+# `setup.kibana.host` options.
+# You can find the `cloud.id` in the Elastic Cloud web UI.
+#cloud.id:
+
+# The cloud.auth setting overwrites the `output.elasticsearch.username` and
+# `output.elasticsearch.password` settings. The format is `<user>:<pass>`.
+#cloud.auth:
+
+#================================ Outputs ======================================
+
+# Configure what output to use when sending the data collected by the beat.
+
+#-------------------------- Elasticsearch output -------------------------------
+output.elasticsearch:
+  # Boolean flag to enable or disable the output module.
+  #enabled: true
+
+  # Array of hosts to connect to.
+  # Scheme and port can be left out and will be set to the default (http and 9200)
+  # In case you specify and additional path, the scheme is required: http://localhost:9200/path
+  # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200
+  hosts: ["localhost:9200"]
+
+  # Set gzip compression level.
+  #compression_level: 0
+
+  # Configure escaping html symbols in strings.
+  #escape_html: true
+
+  # Optional protocol and basic auth credentials.
+  #protocol: "https"
+  #username: "elastic"
+  #password: "changeme"
+
+  # Dictionary of HTTP parameters to pass within the url with index operations.
+  #parameters:
+    #param1: value1
+    #param2: value2
+
+  # Number of workers per Elasticsearch host.
+  #worker: 1
+
+  # Optional index name. The default is "metricbeat" plus date
+  # and generates [metricbeat-]YYYY.MM.DD keys.
+  # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly.
+  #index: "metricbeat-%{[beat.version]}-%{+yyyy.MM.dd}"
+
+  # Optional ingest node pipeline. By default no pipeline will be used.
+  #pipeline: ""
+
+  # Optional HTTP Path
+  #path: "/elasticsearch"
+
+  # Custom HTTP headers to add to each request
+  #headers:
+  #  X-My-Header: Contents of the header
+
+  # Proxy server url
+  #proxy_url: http://proxy:3128
+
+  # The number of times a particular Elasticsearch index operation is attempted. If
+  # the indexing operation doesn't succeed after this many retries, the events are
+  # dropped. The default is 3.
+  #max_retries: 3
+
+  # The maximum number of events to bulk in a single Elasticsearch bulk API index request.
+  # The default is 50.
+  #bulk_max_size: 50
+
+  # The number of seconds to wait before trying to reconnect to Elasticsearch
+  # after a network error. After waiting backoff.init seconds, the Beat
+  # tries to reconnect. If the attempt fails, the backoff timer is increased
+  # exponentially up to backoff.max. After a successful connection, the backoff
+  # timer is reset. The default is 1s.
+  #backoff.init: 1s
+
+  # The maximum number of seconds to wait before attempting to connect to
+  # Elasticsearch after a network error. The default is 60s.
+  #backoff.max: 60s
+
+  # Configure http request timeout before failing a request to Elasticsearch.
+  #timeout: 90
+
+  # Use SSL settings for HTTPS.
+  #ssl.enabled: true
+
+  # Configure SSL verification mode. If `none` is configured, all server hosts
+  # and certificates will be accepted. In this mode, SSL based connections are
+  # susceptible to man-in-the-middle attacks. Use only for testing. Default is
+  # `full`.
+  #ssl.verification_mode: full
+
+  # List of supported/valid TLS versions. By default all TLS versions 1.0 up to
+  # 1.2 are enabled.
+  #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
+
+  # SSL configuration. By default is off.
+  # List of root certificates for HTTPS server verifications
+  #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
+
+  # Certificate for SSL client authentication
+  #ssl.certificate: "/etc/pki/client/cert.pem"
+
+  # Client Certificate Key
+  #ssl.key: "/etc/pki/client/cert.key"
+
+  # Optional passphrase for decrypting the Certificate Key.
+  #ssl.key_passphrase: ''
+
+  # Configure cipher suites to be used for SSL connections
+  #ssl.cipher_suites: []
+
+  # Configure curve types for ECDHE based cipher suites
+  #ssl.curve_types: []
+
+  # Configure what types of renegotiation are supported. Valid options are
+  # never, once, and freely. Default is never.
+  #ssl.renegotiation: never
+
+
+#----------------------------- Logstash output ---------------------------------
+#output.logstash:
+  # Boolean flag to enable or disable the output module.
+  #enabled: true
+
+  # The Logstash hosts
+  #hosts: ["localhost:5044"]
+
+  # Number of workers per Logstash host.
+  #worker: 1
+
+  # Set gzip compression level.
+  #compression_level: 3
+
+  # Configure escaping html symbols in strings.
+  #escape_html: true
+
+  # Optional maximum time to live for a connection to Logstash, after which the
+  # connection will be re-established.  A value of `0s` (the default) will
+  # disable this feature.
+  #
+  # Not yet supported for async connections (i.e. with the "pipelining" option set)
+  #ttl: 30s
+
+  # Optional load balance the events between the Logstash hosts. Default is false.
+  #loadbalance: false
+
+  # Number of batches to be sent asynchronously to Logstash while processing
+  # new batches.
+  #pipelining: 2
+
+  # If enabled only a subset of events in a batch of events is transferred per
+  # transaction.  The number of events to be sent increases up to `bulk_max_size`
+  # if no error is encountered.
+  #slow_start: false
+
+  # The number of seconds to wait before trying to reconnect to Logstash
+  # after a network error. After waiting backoff.init seconds, the Beat
+  # tries to reconnect. If the attempt fails, the backoff timer is increased
+  # exponentially up to backoff.max. After a successful connection, the backoff
+  # timer is reset. The default is 1s.
+  #backoff.init: 1s
+
+  # The maximum number of seconds to wait before attempting to connect to
+  # Logstash after a network error. The default is 60s.
+  #backoff.max: 60s
+
+  # Optional index name. The default index name is set to metricbeat
+  # in all lowercase.
+  #index: 'metricbeat'
+
+  # SOCKS5 proxy server URL
+  #proxy_url: socks5://user:password@socks5-server:2233
+
+  # Resolve names locally when using a proxy server. Defaults to false.
+  #proxy_use_local_resolver: false
+
+  # Enable SSL support. SSL is automatically enabled, if any SSL setting is set.
+  #ssl.enabled: true
+
+  # Configure SSL verification mode. If `none` is configured, all server hosts
+  # and certificates will be accepted. In this mode, SSL based connections are
+  # susceptible to man-in-the-middle attacks. Use only for testing. Default is
+  # `full`.
+  #ssl.verification_mode: full
+
+  # List of supported/valid TLS versions. By default all TLS versions 1.0 up to
+  # 1.2 are enabled.
+  #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
+
+  # Optional SSL configuration options. SSL is off by default.
+  # List of root certificates for HTTPS server verifications
+  #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
+
+  # Certificate for SSL client authentication
+  #ssl.certificate: "/etc/pki/client/cert.pem"
+
+  # Client Certificate Key
+  #ssl.key: "/etc/pki/client/cert.key"
+
+  # Optional passphrase for decrypting the Certificate Key.
+  #ssl.key_passphrase: ''
+
+  # Configure cipher suites to be used for SSL connections
+  #ssl.cipher_suites: []
+
+  # Configure curve types for ECDHE based cipher suites
+  #ssl.curve_types: []
+
+  # Configure what types of renegotiation are supported. Valid options are
+  # never, once, and freely. Default is never.
+  #ssl.renegotiation: never
+
+  # The number of times to retry publishing an event after a publishing failure.
+  # After the specified number of retries, the events are typically dropped.
+  # Some Beats, such as Filebeat and Winlogbeat, ignore the max_retries setting
+  # and retry until all events are published.  Set max_retries to a value less
+  # than 0 to retry until all events are published. The default is 3.
+  #max_retries: 3
+
+  # The maximum number of events to bulk in a single Logstash request. The
+  # default is 2048.
+  #bulk_max_size: 2048
+
+  # The number of seconds to wait for responses from the Logstash server before
+  # timing out. The default is 30s.
+  #timeout: 30s
+
+#------------------------------- Kafka output ----------------------------------
+#output.kafka:
+  # Boolean flag to enable or disable the output module.
+  #enabled: true
+
+  # The list of Kafka broker addresses from where to fetch the cluster metadata.
+  # The cluster metadata contain the actual Kafka brokers events are published
+  # to.
+  #hosts: ["localhost:9092"]
+
+  # The Kafka topic used for produced events. The setting can be a format string
+  # using any event field. To set the topic from document type use `%{[type]}`.
+  #topic: beats
+
+  # The Kafka event key setting. Use format string to create unique event key.
+  # By default no event key will be generated.
+  #key: ''
+
+  # The Kafka event partitioning strategy. Default hashing strategy is `hash`
+  # using the `output.kafka.key` setting or randomly distributes events if
+  # `output.kafka.key` is not configured.
+  #partition.hash:
+    # If enabled, events will only be published to partitions with reachable
+    # leaders. Default is false.
+    #reachable_only: false
+
+    # Configure alternative event field names used to compute the hash value.
+    # If empty `output.kafka.key` setting will be used.
+    # Default value is empty list.
+    #hash: []
+
+  # Authentication details. Password is required if username is set.
+  #username: ''
+  #password: ''
+
+  # Kafka version metricbeat is assumed to run against. Defaults to the "1.0.0".
+  #version: '1.0.0'
+
+  # Configure JSON encoding
+  #codec.json:
+    # Pretty print json event
+    #pretty: false
+
+    # Configure escaping html symbols in strings.
+    #escape_html: true
+
+  # Metadata update configuration. Metadata do contain leader information
+  # deciding which broker to use when publishing.
+  #metadata:
+    # Max metadata request retry attempts when cluster is in middle of leader
+    # election. Defaults to 3 retries.
+    #retry.max: 3
+
+    # Waiting time between retries during leader elections. Default is 250ms.
+    #retry.backoff: 250ms
+
+    # Refresh metadata interval. Defaults to every 10 minutes.
+    #refresh_frequency: 10m
+
+  # The number of concurrent load-balanced Kafka output workers.
+  #worker: 1
+
+  # The number of times to retry publishing an event after a publishing failure.
+  # After the specified number of retries, the events are typically dropped.
+  # Some Beats, such as Filebeat, ignore the max_retries setting and retry until
+  # all events are published.  Set max_retries to a value less than 0 to retry
+  # until all events are published. The default is 3.
+  #max_retries: 3
+
+  # The maximum number of events to bulk in a single Kafka request. The default
+  # is 2048.
+  #bulk_max_size: 2048
+
+  # The number of seconds to wait for responses from the Kafka brokers before
+  # timing out. The default is 30s.
+  #timeout: 30s
+
+  # The maximum duration a broker will wait for number of required ACKs. The
+  # default is 10s.
+  #broker_timeout: 10s
+
+  # The number of messages buffered for each Kafka broker. The default is 256.
+  #channel_buffer_size: 256
+
+  # The keep-alive period for an active network connection. If 0s, keep-alives
+  # are disabled. The default is 0 seconds.
+  #keep_alive: 0
+
+  # Sets the output compression codec. Must be one of none, snappy and gzip. The
+  # default is gzip.
+  #compression: gzip
+
+  # Set the compression level. Currently only gzip provides a compression level
+  # between 0 and 9. The default value is chosen by the compression algorithm.
+  #compression_level: 4
+
+  # The maximum permitted size of JSON-encoded messages. Bigger messages will be
+  # dropped. The default value is 1000000 (bytes). This value should be equal to
+  # or less than the broker's message.max.bytes.
+  #max_message_bytes: 1000000
+
+  # The ACK reliability level required from broker. 0=no response, 1=wait for
+  # local commit, -1=wait for all replicas to commit. The default is 1.  Note:
+  # If set to 0, no ACKs are returned by Kafka. Messages might be lost silently
+  # on error.
+  #required_acks: 1
+
+  # The configurable ClientID used for logging, debugging, and auditing
+  # purposes.  The default is "beats".
+  #client_id: beats
+
+  # Enable SSL support. SSL is automatically enabled, if any SSL setting is set.
+  #ssl.enabled: true
+
+  # Optional SSL configuration options. SSL is off by default.
+  # List of root certificates for HTTPS server verifications
+  #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
+
+  # Configure SSL verification mode. If `none` is configured, all server hosts
+  # and certificates will be accepted. In this mode, SSL based connections are
+  # susceptible to man-in-the-middle attacks. Use only for testing. Default is
+  # `full`.
+  #ssl.verification_mode: full
+
+  # List of supported/valid TLS versions. By default all TLS versions 1.0 up to
+  # 1.2 are enabled.
+  #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
+
+  # Certificate for SSL client authentication
+  #ssl.certificate: "/etc/pki/client/cert.pem"
+
+  # Client Certificate Key
+  #ssl.key: "/etc/pki/client/cert.key"
+
+  # Optional passphrase for decrypting the Certificate Key.
+  #ssl.key_passphrase: ''
+
+  # Configure cipher suites to be used for SSL connections
+  #ssl.cipher_suites: []
+
+  # Configure curve types for ECDHE based cipher suites
+  #ssl.curve_types: []
+
+  # Configure what types of renegotiation are supported. Valid options are
+  # never, once, and freely. Default is never.
+  #ssl.renegotiation: never
+
+#------------------------------- Redis output ----------------------------------
+#output.redis:
+  # Boolean flag to enable or disable the output module.
+  #enabled: true
+
+  # Configure JSON encoding
+  #codec.json:
+    # Pretty print json event
+    #pretty: false
+
+    # Configure escaping html symbols in strings.
+    #escape_html: true
+
+  # The list of Redis servers to connect to. If load balancing is enabled, the
+  # events are distributed to the servers in the list. If one server becomes
+  # unreachable, the events are distributed to the reachable servers only.
+  #hosts: ["localhost:6379"]
+
+  # The Redis port to use if hosts does not contain a port number. The default
+  # is 6379.
+  #port: 6379
+
+  # The name of the Redis list or channel the events are published to. The
+  # default is metricbeat.
+  #key: metricbeat
+
+  # The password to authenticate with. The default is no authentication.
+  #password:
+
+  # The Redis database number where the events are published. The default is 0.
+  #db: 0
+
+  # The Redis data type to use for publishing events. If the data type is list,
+  # the Redis RPUSH command is used. If the data type is channel, the Redis
+  # PUBLISH command is used. The default value is list.
+  #datatype: list
+
+  # The number of workers to use for each host configured to publish events to
+  # Redis. Use this setting along with the loadbalance option. For example, if
+  # you have 2 hosts and 3 workers, in total 6 workers are started (3 for each
+  # host).
+  #worker: 1
+
+  # If set to true and multiple hosts or workers are configured, the output
+  # plugin load balances published events onto all Redis hosts. If set to false,
+  # the output plugin sends all events to only one host (determined at random)
+  # and will switch to another host if the currently selected one becomes
+  # unreachable. The default value is true.
+  #loadbalance: true
+
+  # The Redis connection timeout in seconds. The default is 5 seconds.
+  #timeout: 5s
+
+  # The number of times to retry publishing an event after a publishing failure.
+  # After the specified number of retries, the events are typically dropped.
+  # Some Beats, such as Filebeat, ignore the max_retries setting and retry until
+  # all events are published. Set max_retries to a value less than 0 to retry
+  # until all events are published. The default is 3.
+  #max_retries: 3
+
+  # The number of seconds to wait before trying to reconnect to Redis
+  # after a network error. After waiting backoff.init seconds, the Beat
+  # tries to reconnect. If the attempt fails, the backoff timer is increased
+  # exponentially up to backoff.max. After a successful connection, the backoff
+  # timer is reset. The default is 1s.
+  #backoff.init: 1s
+
+  # The maximum number of seconds to wait before attempting to connect to
+  # Redis after a network error. The default is 60s.
+  #backoff.max: 60s
+
+  # The maximum number of events to bulk in a single Redis request or pipeline.
+  # The default is 2048.
+  #bulk_max_size: 2048
+
+  # The URL of the SOCKS5 proxy to use when connecting to the Redis servers. The
+  # value must be a URL with a scheme of socks5://.
+  #proxy_url:
+
+  # This option determines whether Redis hostnames are resolved locally when
+  # using a proxy. The default value is false, which means that name resolution
+  # occurs on the proxy server.
+  #proxy_use_local_resolver: false
+
+  # Enable SSL support. SSL is automatically enabled, if any SSL setting is set.
+  #ssl.enabled: true
+
+  # Configure SSL verification mode. If `none` is configured, all server hosts
+  # and certificates will be accepted. In this mode, SSL based connections are
+  # susceptible to man-in-the-middle attacks. Use only for testing. Default is
+  # `full`.
+  #ssl.verification_mode: full
+
+  # List of supported/valid TLS versions. By default all TLS versions 1.0 up to
+  # 1.2 are enabled.
+  #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
+
+  # Optional SSL configuration options. SSL is off by default.
+  # List of root certificates for HTTPS server verifications
+  #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
+
+  # Certificate for SSL client authentication
+  #ssl.certificate: "/etc/pki/client/cert.pem"
+
+  # Client Certificate Key
+  #ssl.key: "/etc/pki/client/cert.key"
+
+  # Optional passphrase for decrypting the Certificate Key.
+  #ssl.key_passphrase: ''
+
+  # Configure cipher suites to be used for SSL connections
+  #ssl.cipher_suites: []
+
+  # Configure curve types for ECDHE based cipher suites
+  #ssl.curve_types: []
+
+  # Configure what types of renegotiation are supported. Valid options are
+  # never, once, and freely. Default is never.
+  #ssl.renegotiation: never
+
+#------------------------------- File output -----------------------------------
+#output.file:
+  # Boolean flag to enable or disable the output module.
+  #enabled: true
+
+  # Configure JSON encoding
+  #codec.json:
+    # Pretty print json event
+    #pretty: false
+
+    # Configure escaping html symbols in strings.
+    #escape_html: true
+
+  # Path to the directory where to save the generated files. The option is
+  # mandatory.
+  #path: "/tmp/metricbeat"
+
+  # Name of the generated files. The default is `metricbeat` and it generates
+  # files: `metricbeat`, `metricbeat.1`, `metricbeat.2`, etc.
+  #filename: metricbeat
+
+  # Maximum size in kilobytes of each file. When this size is reached, and on
+  # every metricbeat restart, the files are rotated. The default value is 10240
+  # kB.
+  #rotate_every_kb: 10000
+
+  # Maximum number of files under path. When this number of files is reached,
+  # the oldest file is deleted and the rest are shifted from last to first. The
+  # default is 7 files.
+  #number_of_files: 7
+
+  # Permissions to use for file creation. The default is 0600.
+  #permissions: 0600
+
+
+#----------------------------- Console output ---------------------------------
+#output.console:
+  # Boolean flag to enable or disable the output module.
+  #enabled: true
+
+  # Configure JSON encoding
+  #codec.json:
+    # Pretty print json event
+    #pretty: false
+
+    # Configure escaping html symbols in strings.
+    #escape_html: true
+
+#================================= Paths ======================================
+
+# The home path for the metricbeat installation. This is the default base path
+# for all other path settings and for miscellaneous files that come with the
+# distribution (for example, the sample dashboards).
+# If not set by a CLI flag or in the configuration file, the default for the
+# home path is the location of the binary.
+#path.home:
+
+# The configuration path for the metricbeat installation. This is the default
+# base path for configuration files, including the main YAML configuration file
+# and the Elasticsearch template file. If not set by a CLI flag or in the
+# configuration file, the default for the configuration path is the home path.
+#path.config: ${path.home}
+
+# The data path for the metricbeat installation. This is the default base path
+# for all the files in which metricbeat needs to store its data. If not set by a
+# CLI flag or in the configuration file, the default for the data path is a data
+# subdirectory inside the home path.
+#path.data: ${path.home}/data
+
+# The logs path for a metricbeat installation. This is the default location for
+# the Beat's log files. If not set by a CLI flag or in the configuration file,
+# the default for the logs path is a logs subdirectory inside the home path.
+#path.logs: ${path.home}/logs
+
+#================================ Keystore ==========================================
+# Location of the Keystore containing the keys and their sensitive values.
+#keystore.path: "${path.config}/beats.keystore"
+
+#============================== Dashboards =====================================
+# These settings control loading the sample dashboards to the Kibana index. Loading
+# the dashboards are disabled by default and can be enabled either by setting the
+# options here, or by using the `-setup` CLI flag or the `setup` command.
+#setup.dashboards.enabled: false
+
+# The directory from where to read the dashboards. The default is the `kibana`
+# folder in the home path.
+#setup.dashboards.directory: ${path.home}/kibana
+
+# The URL from where to download the dashboards archive. It is used instead of
+# the directory if it has a value.
+#setup.dashboards.url:
+
+# The file archive (zip file) from where to read the dashboards. It is used instead
+# of the directory when it has a value.
+#setup.dashboards.file:
+
+# In case the archive contains the dashboards from multiple Beats, this lets you
+# select which one to load. You can load all the dashboards in the archive by
+# setting this to the empty string.
+#setup.dashboards.beat: metricbeat
+
+# The name of the Kibana index to use for setting the configuration. Default is ".kibana"
+#setup.dashboards.kibana_index: .kibana
+
+# The Elasticsearch index name. This overwrites the index name defined in the
+# dashboards and index pattern. Example: testbeat-*
+#setup.dashboards.index:
+
+# Always use the Kibana API for loading the dashboards instead of autodetecting
+# how to install the dashboards by first querying Elasticsearch.
+#setup.dashboards.always_kibana: false
+
+# If true and Kibana is not reachable at the time when dashboards are loaded,
+# it will retry to reconnect to Kibana instead of exiting with an error.
+#setup.dashboards.retry.enabled: false
+
+# Duration interval between Kibana connection retries.
+#setup.dashboards.retry.interval: 1s
+
+# Maximum number of retries before exiting with an error, 0 for unlimited retrying.
+#setup.dashboards.retry.maximum: 0
+
+
+#============================== Template =====================================
+
+# A template is used to set the mapping in Elasticsearch
+# By default template loading is enabled and the template is loaded.
+# These settings can be adjusted to load your own template or overwrite existing ones.
+
+# Set to false to disable template loading.
+#setup.template.enabled: true
+
+# Template name. By default the template name is "metricbeat-%{[beat.version]}"
+# The template name and pattern has to be set in case the elasticsearch index pattern is modified.
+#setup.template.name: "metricbeat-%{[beat.version]}"
+
+# Template pattern. By default the template pattern is "-%{[beat.version]}-*" to apply to the default index settings.
+# The first part is the version of the beat and then -* is used to match all daily indices.
+# The template name and pattern has to be set in case the elasticsearch index pattern is modified.
+#setup.template.pattern: "metricbeat-%{[beat.version]}-*"
+
+# Path to fields.yml file to generate the template
+#setup.template.fields: "${path.config}/fields.yml"
+
+# A list of fields to be added to the template and Kibana index pattern. Also
+# specify setup.template.overwrite: true to overwrite the existing template.
+# This setting is experimental.
+#setup.template.append_fields:
+#- name: field_name
+#  type: field_type
+
+# Enable json template loading. If this is enabled, the fields.yml is ignored.
+#setup.template.json.enabled: false
+
+# Path to the json template file
+#setup.template.json.path: "${path.config}/template.json"
+
+# Name under which the template is stored in Elasticsearch
+#setup.template.json.name: ""
+
+# Overwrite existing template
+#setup.template.overwrite: false
+
+# Elasticsearch template settings
+setup.template.settings:
+
+  # A dictionary of settings to place into the settings.index dictionary
+  # of the Elasticsearch template. For more details, please check
+  # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping.html
+  #index:
+    #number_of_shards: 1
+    #codec: best_compression
+    #number_of_routing_shards: 30
+
+  # A dictionary of settings for the _source field. For more details, please check
+  # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html
+  #_source:
+    #enabled: false
+
+#============================== Kibana =====================================
+
+# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
+# This requires a Kibana endpoint configuration.
+setup.kibana:
+
+  # Kibana Host
+  # Scheme and port can be left out and will be set to the default (http and 5601)
+  # In case you specify and additional path, the scheme is required: http://localhost:5601/path
+  # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
+  #host: "localhost:5601"
+
+  # Optional protocol and basic auth credentials.
+  #protocol: "https"
+  #username: "elastic"
+  #password: "changeme"
+
+  # Optional HTTP Path
+  #path: ""
+
+  # Use SSL settings for HTTPS. Default is true.
+  #ssl.enabled: true
+
+  # Configure SSL verification mode. If `none` is configured, all server hosts
+  # and certificates will be accepted. In this mode, SSL based connections are
+  # susceptible to man-in-the-middle attacks. Use only for testing. Default is
+  # `full`.
+  #ssl.verification_mode: full
+
+  # List of supported/valid TLS versions. By default all TLS versions 1.0 up to
+  # 1.2 are enabled.
+  #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
+
+  # SSL configuration. By default is off.
+  # List of root certificates for HTTPS server verifications
+  #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
+
+  # Certificate for SSL client authentication
+  #ssl.certificate: "/etc/pki/client/cert.pem"
+
+  # Client Certificate Key
+  #ssl.key: "/etc/pki/client/cert.key"
+
+  # Optional passphrase for decrypting the Certificate Key.
+  #ssl.key_passphrase: ''
+
+  # Configure cipher suites to be used for SSL connections
+  #ssl.cipher_suites: []
+
+  # Configure curve types for ECDHE based cipher suites
+  #ssl.curve_types: []
+
+
+
+#================================ Logging ======================================
+# There are four options for the log output: file, stderr, syslog, eventlog
+# The file output is the default.
+
+# Sets log level. The default log level is info.
+# Available log levels are: error, warning, info, debug
+#logging.level: info
+
+# Enable debug output for selected components. To enable all selectors use ["*"]
+# Other available selectors are "beat", "publish", "service"
+# Multiple selectors can be chained.
+#logging.selectors: [ ]
+
+# Send all logging output to syslog. The default is false.
+#logging.to_syslog: false
+
+# Send all logging output to Windows Event Logs. The default is false.
+#logging.to_eventlog: false
+
+# If enabled, metricbeat periodically logs its internal metrics that have changed
+# in the last period. For each metric that changed, the delta from the value at
+# the beginning of the period is logged. Also, the total values for
+# all non-zero internal metrics are logged on shutdown. The default is true.
+#logging.metrics.enabled: true
+
+# The period after which to log the internal metrics. The default is 30s.
+#logging.metrics.period: 30s
+
+# Logging to rotating files. Set logging.to_files to false to disable logging to
+# files.
+logging.to_files: true
+logging.files:
+  # Configure the path where the logs are written. The default is the logs directory
+  # under the home path (the binary location).
+  #path: /var/log/metricbeat
+
+  # The name of the files where the logs are written to.
+  #name: metricbeat
+
+  # Configure log file size limit. If limit is reached, log file will be
+  # automatically rotated
+  #rotateeverybytes: 10485760 # = 10MB
+
+  # Number of rotated log files to keep. Oldest files will be deleted first.
+  #keepfiles: 7
+
+  # The permissions mask to apply when rotating log files. The default value is 0600.
+  # Must be a valid Unix-style file permissions mask expressed in octal notation.
+  #permissions: 0600
+
+  # Enable log file rotation on time intervals in addition to size-based rotation.
+  # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h
+  # are boundary-aligned with minutes, hours, days, weeks, months, and years as
+  # reported by the local system clock. All other intervals are calculated from the
+  # unix epoch. Defaults to disabled.
+  #interval: 0
+
+# Set to true to log messages in json format.
+#logging.json: false
+
+
+#============================== Xpack Monitoring =====================================
+# metricbeat can export internal metrics to a central Elasticsearch monitoring cluster.
+# This requires xpack monitoring to be enabled in Elasticsearch.
+# The reporting is disabled by default.
+
+# Set to true to enable the monitoring reporter.
+#xpack.monitoring.enabled: false
+
+# Uncomment to send the metrics to Elasticsearch. Most settings from the
+# Elasticsearch output are accepted here as well. Any setting that is not set is
+# automatically inherited from the Elasticsearch output configuration, so if you
+# have the Elasticsearch output configured, you can simply uncomment the
+# following line, and leave the rest commented out.
+#xpack.monitoring.elasticsearch:
+
+  # Array of hosts to connect to.
+  # Scheme and port can be left out and will be set to the default (http and 9200)
+  # In case you specify and additional path, the scheme is required: http://localhost:9200/path
+  # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200
+  #hosts: ["localhost:9200"]
+
+  # Set gzip compression level.
+  #compression_level: 0
+
+  # Optional protocol and basic auth credentials.
+  #protocol: "https"
+  #username: "beats_system"
+  #password: "changeme"
+
+  # Dictionary of HTTP parameters to pass within the url with index operations.
+  #parameters:
+    #param1: value1
+    #param2: value2
+
+  # Custom HTTP headers to add to each request
+  #headers:
+  #  X-My-Header: Contents of the header
+
+  # Proxy server url
+  #proxy_url: http://proxy:3128
+
+  # The number of times a particular Elasticsearch index operation is attempted. If
+  # the indexing operation doesn't succeed after this many retries, the events are
+  # dropped. The default is 3.
+  #max_retries: 3
+
+  # The maximum number of events to bulk in a single Elasticsearch bulk API index request.
+  # The default is 50.
+  #bulk_max_size: 50
+
+  # The number of seconds to wait before trying to reconnect to Elasticsearch
+  # after a network error. After waiting backoff.init seconds, the Beat
+  # tries to reconnect. If the attempt fails, the backoff timer is increased
+  # exponentially up to backoff.max. After a successful connection, the backoff
+  # timer is reset. The default is 1s.
+  #backoff.init: 1s
+
+  # The maximum number of seconds to wait before attempting to connect to
+  # Elasticsearch after a network error. The default is 60s.
+  #backoff.max: 60s
+
+  # Configure http request timeout before failing an request to Elasticsearch.
+  #timeout: 90
+
+  # Use SSL settings for HTTPS.
+  #ssl.enabled: true
+
+  # Configure SSL verification mode. If `none` is configured, all server hosts
+  # and certificates will be accepted. In this mode, SSL based connections are
+  # susceptible to man-in-the-middle attacks. Use only for testing. Default is
+  # `full`.
+  #ssl.verification_mode: full
+
+  # List of supported/valid TLS versions. By default all TLS versions 1.0 up to
+  # 1.2 are enabled.
+  #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
+
+  # SSL configuration. By default is off.
+  # List of root certificates for HTTPS server verifications
+  #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
+
+  # Certificate for SSL client authentication
+  #ssl.certificate: "/etc/pki/client/cert.pem"
+
+  # Client Certificate Key
+  #ssl.key: "/etc/pki/client/cert.key"
+
+  # Optional passphrase for decrypting the Certificate Key.
+  #ssl.key_passphrase: ''
+
+  # Configure cipher suites to be used for SSL connections
+  #ssl.cipher_suites: []
+
+  # Configure curve types for ECDHE based cipher suites
+  #ssl.curve_types: []
+
+  # Configure what types of renegotiation are supported. Valid options are
+  # never, once, and freely. Default is never.
+  #ssl.renegotiation: never
+
+  #metrics.period: 10s
+  #state.period: 1m
+
+#================================ HTTP Endpoint ======================================
+# Each beat can expose internal metrics through a HTTP endpoint. For security
+# reasons the endpoint is disabled by default. This feature is currently experimental.
+# Stats can be access through http://localhost:5066/stats . For pretty JSON output
+# append ?pretty to the URL.
+
+# Defines if the HTTP endpoint is enabled.
+#http.enabled: false
+
+# The HTTP endpoint will bind to this hostname or IP address. It is recommended to use only localhost.
+#http.host: localhost
+
+# Port on which the HTTP endpoint will bind. Default is 5066.
+#http.port: 5066
+
+#============================= Process Security ================================
+
+# Enable or disable seccomp system call filtering on Linux. Default is enabled.
+#seccomp.enabled: true
diff --git a/x-pack/metricbeat/metricbeat.yml b/x-pack/metricbeat/metricbeat.yml
new file mode 100644
index 000000000000..259092c66dff
--- /dev/null
+++ b/x-pack/metricbeat/metricbeat.yml
@@ -0,0 +1,148 @@
+###################### Metricbeat Configuration Example #######################
+
+# This file is an example configuration file highlighting only the most common
+# options. The metricbeat.reference.yml file from the same directory contains all the
+# supported options with more comments. You can use it as a reference.
+#
+# You can find the full configuration reference here:
+# https://www.elastic.co/guide/en/beats/metricbeat/index.html
+
+#==========================  Modules configuration ============================
+
+metricbeat.config.modules:
+  # Glob pattern for configuration loading
+  path: ${path.config}/modules.d/*.yml
+
+  # Set to true to enable config reloading
+  reload.enabled: false
+
+  # Period on which files under path should be checked for changes
+  #reload.period: 10s
+
+#==================== Elasticsearch template setting ==========================
+
+setup.template.settings:
+  index.number_of_shards: 1
+  index.codec: best_compression
+  #_source.enabled: false
+
+#================================ General =====================================
+
+# The name of the shipper that publishes the network data. It can be used to group
+# all the transactions sent by a single shipper in the web interface.
+#name:
+
+# The tags of the shipper are included in their own field with each
+# transaction published.
+#tags: ["service-X", "web-tier"]
+
+# Optional fields that you can specify to add additional information to the
+# output.
+#fields:
+#  env: staging
+
+
+#============================== Dashboards =====================================
+# These settings control loading the sample dashboards to the Kibana index. Loading
+# the dashboards is disabled by default and can be enabled either by setting the
+# options here, or by using the `-setup` CLI flag or the `setup` command.
+#setup.dashboards.enabled: false
+
+# The URL from where to download the dashboards archive. By default this URL
+# has a value which is computed based on the Beat name and version. For released
+# versions, this URL points to the dashboard archive on the artifacts.elastic.co
+# website.
+#setup.dashboards.url:
+
+#============================== Kibana =====================================
+
+# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
+# This requires a Kibana endpoint configuration.
+setup.kibana:
+
+  # Kibana Host
+  # Scheme and port can be left out and will be set to the default (http and 5601)
+  # In case you specify and additional path, the scheme is required: http://localhost:5601/path
+  # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
+  #host: "localhost:5601"
+
+  # Kibana Space ID
+  # ID of the Kibana Space into which the dashboards should be loaded. By default,
+  # the Default Space will be used.
+  #space.id:
+
+#============================= Elastic Cloud ==================================
+
+# These settings simplify using metricbeat with the Elastic Cloud (https://cloud.elastic.co/).
+
+# The cloud.id setting overwrites the `output.elasticsearch.hosts` and
+# `setup.kibana.host` options.
+# You can find the `cloud.id` in the Elastic Cloud web UI.
+#cloud.id:
+
+# The cloud.auth setting overwrites the `output.elasticsearch.username` and
+# `output.elasticsearch.password` settings. The format is `<user>:<pass>`.
+#cloud.auth:
+
+#================================ Outputs =====================================
+
+# Configure what output to use when sending the data collected by the beat.
+
+#-------------------------- Elasticsearch output ------------------------------
+output.elasticsearch:
+  # Array of hosts to connect to.
+  hosts: ["localhost:9200"]
+
+  # Optional protocol and basic auth credentials.
+  #protocol: "https"
+  #username: "elastic"
+  #password: "changeme"
+
+#----------------------------- Logstash output --------------------------------
+#output.logstash:
+  # The Logstash hosts
+  #hosts: ["localhost:5044"]
+
+  # Optional SSL. By default is off.
+  # List of root certificates for HTTPS server verifications
+  #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
+
+  # Certificate for SSL client authentication
+  #ssl.certificate: "/etc/pki/client/cert.pem"
+
+  # Client Certificate Key
+  #ssl.key: "/etc/pki/client/cert.key"
+
+#================================ Procesors =====================================
+
+# Configure processors to enhance or manipulate events generated by the beat.
+
+processors:
+  - add_host_metadata: ~
+  - add_cloud_metadata: ~
+
+#================================ Logging =====================================
+
+# Sets log level. The default log level is info.
+# Available log levels are: error, warning, info, debug
+#logging.level: debug
+
+# At debug level, you can selectively enable logging only for some components.
+# To enable all selectors use ["*"]. Examples of other selectors are "beat",
+# "publish", "service".
+#logging.selectors: ["*"]
+
+#============================== Xpack Monitoring ===============================
+# metricbeat can export internal metrics to a central Elasticsearch monitoring
+# cluster.  This requires xpack monitoring to be enabled in Elasticsearch.  The
+# reporting is disabled by default.
+
+# Set to true to enable the monitoring reporter.
+#xpack.monitoring.enabled: false
+
+# Uncomment to send the metrics to Elasticsearch. Most settings from the
+# Elasticsearch output are accepted here as well. Any setting that is not set is
+# automatically inherited from the Elasticsearch output configuration, so if you
+# have the Elasticsearch output configured, you can simply uncomment the
+# following line.
+#xpack.monitoring.elasticsearch: