From 61490f05c7056bc2268c8891aad188f0cad38631 Mon Sep 17 00:00:00 2001 From: Evan Pease Date: Thu, 18 Aug 2016 15:13:00 -0700 Subject: [PATCH 01/16] Packaging w/o s3 upload - initial commit --- Makefile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Makefile b/Makefile index 2951e175a4347..dacaf4eedb76a 100644 --- a/Makefile +++ b/Makefile @@ -29,6 +29,9 @@ build-for-docker: package: ./scripts/build.py --package --version="$(VERSION)" --platform=linux --arch=all --upload +package-wavefront: + ./scripts/build.py --package --version="$(VERSION)" --platform=linux --arch=all + # Get dependencies and use gdm to checkout changesets prepare: go get github.com/sparrc/gdm From c181ae8fb6b8f17ec9d992f97be661d813fb3582 Mon Sep 17 00:00:00 2001 From: Evan Pease Date: Thu, 18 Aug 2016 18:59:05 -0700 Subject: [PATCH 02/16] First cut of wavefront storage driver - quotes tag values --- plugins/outputs/all/all.go | 1 + plugins/outputs/wavefront/wavefront.go | 172 ++++++++++++++++++++ plugins/outputs/wavefront/wavefront_test.go | 80 +++++++++ 3 files changed, 253 insertions(+) create mode 100644 plugins/outputs/wavefront/wavefront.go create mode 100644 plugins/outputs/wavefront/wavefront_test.go diff --git a/plugins/outputs/all/all.go b/plugins/outputs/all/all.go index 27f8958fe69d4..d235802ee80e6 100644 --- a/plugins/outputs/all/all.go +++ b/plugins/outputs/all/all.go @@ -18,4 +18,5 @@ import ( _ "github.com/influxdata/telegraf/plugins/outputs/opentsdb" _ "github.com/influxdata/telegraf/plugins/outputs/prometheus_client" _ "github.com/influxdata/telegraf/plugins/outputs/riemann" + _ "github.com/influxdata/telegraf/plugins/outputs/wavefront" ) diff --git a/plugins/outputs/wavefront/wavefront.go b/plugins/outputs/wavefront/wavefront.go new file mode 100644 index 0000000000000..2f59fdfec4135 --- /dev/null +++ b/plugins/outputs/wavefront/wavefront.go @@ -0,0 +1,172 @@ +package wavefront + +import ( + "fmt" + "net" + "sort" + "strconv" + "strings" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/outputs" +) + +type Wavefront struct { + Prefix string + + Host string + Port int + + Debug bool +} + +var sanitizedChars = strings.NewReplacer("@", "-", "*", "-", " ", "_", + `%`, "-", "#", "-", "$", "-", ":", "_") + +var sampleConfig = ` + ## prefix for metrics keys + prefix = "my.specific.prefix." + + ## Telnet Mode ## + ## DNS name of the wavefront proxy server in telnet mode + host = "wavefront.example.com" + + ## Port of the Wavefront proxy server in telnet mode + port = 4242 + + ## Debug true - Prints Wavefront communication + debug = false +` + +type MetricLine struct { + Metric string + Timestamp int64 + Value string + Tags string +} + +func (w *Wavefront) Connect() error { + // Test Connection to OpenTSDB Server + uri := fmt.Sprintf("%s:%d", w.Host, w.Port) + tcpAddr, err := net.ResolveTCPAddr("tcp", uri) + if err != nil { + return fmt.Errorf("Wavefront: TCP address cannot be resolved") + } + connection, err := net.DialTCP("tcp", nil, tcpAddr) + if err != nil { + return fmt.Errorf("Wavefront: TCP connect fail") + } + defer connection.Close() + return nil +} + +func (w *Wavefront) Write(metrics []telegraf.Metric) error { + if len(metrics) == 0 { + return nil + } + now := time.Now() + + // Send Data with telnet / socket communication + uri := fmt.Sprintf("%s:%d", w.Host, w.Port) + tcpAddr, _ := net.ResolveTCPAddr("tcp", uri) + connection, err := net.DialTCP("tcp", nil, tcpAddr) + if err != nil { + return fmt.Errorf("Wavefront: TCP connect fail") + } + defer connection.Close() + + for _, m := range metrics { + for _, metric := range buildMetrics(m, now, w.Prefix) { + messageLine := fmt.Sprintf("put %s %v %s %s\n", + metric.Metric, metric.Timestamp, metric.Value, metric.Tags) + if w.Debug { + fmt.Print(messageLine) + } + _, err := connection.Write([]byte(messageLine)) + if err != nil { + return fmt.Errorf("Wavefront: TCP writing error %s", err.Error()) + } + } + } + + return nil +} + +func buildTags(mTags map[string]string) []string { + tags := make([]string, len(mTags)) + index := 0 + for k, v := range mTags { + tags[index] = sanitizedChars.Replace(fmt.Sprintf("%s=\"%s\"", k, v)) + index++ + } + sort.Strings(tags) + return tags +} + +func buildMetrics(m telegraf.Metric, now time.Time, prefix string) []*MetricLine { + ret := []*MetricLine{} + for fieldName, value := range m.Fields() { + metric := &MetricLine{ + Metric: sanitizedChars.Replace(fmt.Sprintf("%s%s_%s", + prefix, m.Name(), fieldName)), + Timestamp: now.Unix(), + } + + metricValue, buildError := buildValue(value) + if buildError != nil { + fmt.Printf("Wavefront: %s\n", buildError.Error()) + continue + } + metric.Value = metricValue + tagsSlice := buildTags(m.Tags()) + metric.Tags = fmt.Sprint(strings.Join(tagsSlice, " ")) + ret = append(ret, metric) + } + return ret +} + +func buildValue(v interface{}) (string, error) { + var retv string + switch p := v.(type) { + case int64: + retv = IntToString(int64(p)) + case uint64: + retv = UIntToString(uint64(p)) + case float64: + retv = FloatToString(float64(p)) + default: + return retv, fmt.Errorf("unexpected type %T with value %v for Wavefront", v, v) + } + return retv, nil +} + +func IntToString(input_num int64) string { + return strconv.FormatInt(input_num, 10) +} + +func UIntToString(input_num uint64) string { + return strconv.FormatUint(input_num, 10) +} + +func FloatToString(input_num float64) string { + return strconv.FormatFloat(input_num, 'f', 6, 64) +} + +func (w *Wavefront) SampleConfig() string { + return sampleConfig +} + +func (w *Wavefront) Description() string { + return "Configuration for OpenTSDB server to send metrics to" +} + +func (w *Wavefront) Close() error { + return nil +} + +func init() { + outputs.Add("wavefront", func() telegraf.Output { + return &Wavefront{} + }) +} diff --git a/plugins/outputs/wavefront/wavefront_test.go b/plugins/outputs/wavefront/wavefront_test.go new file mode 100644 index 0000000000000..db69005213ea5 --- /dev/null +++ b/plugins/outputs/wavefront/wavefront_test.go @@ -0,0 +1,80 @@ +package wavefront + +import ( + "reflect" + "testing" + // "github.com/influxdata/telegraf/testutil" + // "github.com/stretchr/testify/require" +) + +func TestBuildTagsTelnet(t *testing.T) { + var tagtests = []struct { + ptIn map[string]string + outTags []string + }{ + { + map[string]string{"one": "two", "three": "four"}, + []string{"one=two", "three=four"}, + }, + { + map[string]string{"aaa": "bbb"}, + []string{"aaa=bbb"}, + }, + { + map[string]string{"one": "two", "aaa": "bbb"}, + []string{"aaa=bbb", "one=two"}, + }, + { + map[string]string{"Sp%ci@l Chars": "g$t repl#ced"}, + []string{"Sp-ci-l_Chars=g-t_repl-ced"}, + }, + { + map[string]string{}, + []string{}, + }, + } + for _, tt := range tagtests { + tags := buildTags(tt.ptIn) + if !reflect.DeepEqual(tags, tt.outTags) { + t.Errorf("\nexpected %+v\ngot %+v\n", tt.outTags, tags) + } + } +} + +// func TestWrite(t *testing.T) { +// if testing.Short() { +// t.Skip("Skipping integration test in short mode") +// } + +// o := &OpenTSDB{ +// Host: testutil.GetLocalHost(), +// Port: 4242, +// Prefix: "prefix.test.", +// } + +// // Verify that we can connect to the OpenTSDB instance +// err := o.Connect() +// require.NoError(t, err) + +// // Verify that we can successfully write data to OpenTSDB +// err = o.Write(testutil.MockMetrics()) +// require.NoError(t, err) + +// // Verify postive and negative test cases of writing data +// metrics := testutil.MockMetrics() +// metrics = append(metrics, testutil.TestMetric(float64(1.0), +// "justametric.float")) +// metrics = append(metrics, testutil.TestMetric(int64(123456789), +// "justametric.int")) +// metrics = append(metrics, testutil.TestMetric(uint64(123456789012345), +// "justametric.uint")) +// metrics = append(metrics, testutil.TestMetric("Lorem Ipsum", +// "justametric.string")) +// metrics = append(metrics, testutil.TestMetric(float64(42.0), +// "justametric.anotherfloat")) +// metrics = append(metrics, testutil.TestMetric(float64(42.0), +// "metric w/ specialchars")) + +// err = o.Write(metrics) +// require.NoError(t, err) +// } From 0f4371f3297145003056b67bfbf6eef7e5f7e3de Mon Sep 17 00:00:00 2001 From: Evan Pease Date: Thu, 15 Sep 2016 11:08:02 -0700 Subject: [PATCH 03/16] committing latest changes before rebasing --- plugins/outputs/wavefront/wavefront.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/plugins/outputs/wavefront/wavefront.go b/plugins/outputs/wavefront/wavefront.go index 2f59fdfec4135..851609c751bd9 100644 --- a/plugins/outputs/wavefront/wavefront.go +++ b/plugins/outputs/wavefront/wavefront.go @@ -21,8 +21,7 @@ type Wavefront struct { Debug bool } -var sanitizedChars = strings.NewReplacer("@", "-", "*", "-", " ", "_", - `%`, "-", "#", "-", "$", "-", ":", "_") +var sanitizedChars = strings.NewReplacer("*", "-", `%`, "-", "#", "-") var sampleConfig = ` ## prefix for metrics keys @@ -112,7 +111,6 @@ func buildMetrics(m telegraf.Metric, now time.Time, prefix string) []*MetricLine prefix, m.Name(), fieldName)), Timestamp: now.Unix(), } - metricValue, buildError := buildValue(value) if buildError != nil { fmt.Printf("Wavefront: %s\n", buildError.Error()) From 1ce9f4e38c9a4a46bf0647a9a1555ec2820aa2b1 Mon Sep 17 00:00:00 2001 From: Pierre Tessier Date: Wed, 12 Oct 2016 12:03:18 -0400 Subject: [PATCH 04/16] Added Metric_separator configuration property --- plugins/outputs/wavefront/wavefront.go | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/plugins/outputs/wavefront/wavefront.go b/plugins/outputs/wavefront/wavefront.go index 851609c751bd9..5c36afe59d8ec 100644 --- a/plugins/outputs/wavefront/wavefront.go +++ b/plugins/outputs/wavefront/wavefront.go @@ -19,6 +19,7 @@ type Wavefront struct { Port int Debug bool + Metric_separator string } var sanitizedChars = strings.NewReplacer("*", "-", `%`, "-", "#", "-") @@ -47,6 +48,9 @@ type MetricLine struct { func (w *Wavefront) Connect() error { // Test Connection to OpenTSDB Server + if w.Metric_separator == "" { + w.Metric_separator = "_" + } uri := fmt.Sprintf("%s:%d", w.Host, w.Port) tcpAddr, err := net.ResolveTCPAddr("tcp", uri) if err != nil { @@ -76,7 +80,7 @@ func (w *Wavefront) Write(metrics []telegraf.Metric) error { defer connection.Close() for _, m := range metrics { - for _, metric := range buildMetrics(m, now, w.Prefix) { + for _, metric := range buildMetrics(m, now, w) { messageLine := fmt.Sprintf("put %s %v %s %s\n", metric.Metric, metric.Timestamp, metric.Value, metric.Tags) if w.Debug { @@ -103,15 +107,15 @@ func buildTags(mTags map[string]string) []string { return tags } -func buildMetrics(m telegraf.Metric, now time.Time, prefix string) []*MetricLine { +func buildMetrics(m telegraf.Metric, now time.Time, w *Wavefront) []*MetricLine { ret := []*MetricLine{} for fieldName, value := range m.Fields() { metric := &MetricLine{ - Metric: sanitizedChars.Replace(fmt.Sprintf("%s%s_%s", - prefix, m.Name(), fieldName)), + Metric: sanitizedChars.Replace(fmt.Sprintf("%s%s%s%s", + w.Prefix, m.Name(), w.Metric_separator, fieldName)), Timestamp: now.Unix(), } - metricValue, buildError := buildValue(value) + metricValue, buildError := buildValue(value, metric.Metric) if buildError != nil { fmt.Printf("Wavefront: %s\n", buildError.Error()) continue @@ -124,7 +128,7 @@ func buildMetrics(m telegraf.Metric, now time.Time, prefix string) []*MetricLine return ret } -func buildValue(v interface{}) (string, error) { +func buildValue(v interface{}, name string) (string, error) { var retv string switch p := v.(type) { case int64: @@ -134,7 +138,7 @@ func buildValue(v interface{}) (string, error) { case float64: retv = FloatToString(float64(p)) default: - return retv, fmt.Errorf("unexpected type %T with value %v for Wavefront", v, v) + return retv, fmt.Errorf("unexpected type: %T, with value: %v, for: %s", v, v, name) } return retv, nil } From d672f8c973dda9479096221fe27606ec93e93648 Mon Sep 17 00:00:00 2001 From: Pierre Tessier Date: Fri, 21 Oct 2016 14:35:39 -0700 Subject: [PATCH 05/16] Move to Wavefront format - Added convert_groups config parameter - Cleaned up error messages - Removed all references to OpenTSDB format --- plugins/outputs/wavefront/wavefront.go | 39 ++++++++++++++------- plugins/outputs/wavefront/wavefront_test.go | 14 ++++---- 2 files changed, 34 insertions(+), 19 deletions(-) diff --git a/plugins/outputs/wavefront/wavefront.go b/plugins/outputs/wavefront/wavefront.go index 5c36afe59d8ec..775f92c8fa0f1 100644 --- a/plugins/outputs/wavefront/wavefront.go +++ b/plugins/outputs/wavefront/wavefront.go @@ -14,15 +14,16 @@ import ( type Wavefront struct { Prefix string - Host string Port int + Metric_separator string + Convert_groups bool Debug bool - Metric_separator string } var sanitizedChars = strings.NewReplacer("*", "-", `%`, "-", "#", "-") +var groupReplacer = strings.NewReplacer("_", "_") var sampleConfig = ` ## prefix for metrics keys @@ -33,7 +34,14 @@ var sampleConfig = ` host = "wavefront.example.com" ## Port of the Wavefront proxy server in telnet mode - port = 4242 + port = 2878 + + ## character to use between metric and field name. defaults to _ (underscore) + metric_separator = "." + + ## Convert metric name groups to use metric_seperator character + ## When true will convert all _ (underscore) chartacters in final metric name + convert_groups = true ## Debug true - Prints Wavefront communication debug = false @@ -47,18 +55,22 @@ type MetricLine struct { } func (w *Wavefront) Connect() error { - // Test Connection to OpenTSDB Server if w.Metric_separator == "" { w.Metric_separator = "_" } + if w.Convert_groups { + groupReplacer = strings.NewReplacer("_", w.Metric_separator) + } + + // Test Connection to Wavefront Server uri := fmt.Sprintf("%s:%d", w.Host, w.Port) tcpAddr, err := net.ResolveTCPAddr("tcp", uri) if err != nil { - return fmt.Errorf("Wavefront: TCP address cannot be resolved") + return fmt.Errorf("Wavefront: TCP address cannot be resolved %s", err.Error()) } connection, err := net.DialTCP("tcp", nil, tcpAddr) if err != nil { - return fmt.Errorf("Wavefront: TCP connect fail") + return fmt.Errorf("Wavefront: TCP connect fail %s", err.Error()) } defer connection.Close() return nil @@ -75,14 +87,14 @@ func (w *Wavefront) Write(metrics []telegraf.Metric) error { tcpAddr, _ := net.ResolveTCPAddr("tcp", uri) connection, err := net.DialTCP("tcp", nil, tcpAddr) if err != nil { - return fmt.Errorf("Wavefront: TCP connect fail") + return fmt.Errorf("Wavefront: TCP connect fail %s", err.Error()) } defer connection.Close() for _, m := range metrics { for _, metric := range buildMetrics(m, now, w) { - messageLine := fmt.Sprintf("put %s %v %s %s\n", - metric.Metric, metric.Timestamp, metric.Value, metric.Tags) + messageLine := fmt.Sprintf("%s %s %v %s\n", + metric.Metric, metric.Value, metric.Timestamp, metric.Tags) if w.Debug { fmt.Print(messageLine) } @@ -110,9 +122,12 @@ func buildTags(mTags map[string]string) []string { func buildMetrics(m telegraf.Metric, now time.Time, w *Wavefront) []*MetricLine { ret := []*MetricLine{} for fieldName, value := range m.Fields() { + name := sanitizedChars.Replace(fmt.Sprintf("%s%s%s%s", w.Prefix, m.Name(), w.Metric_separator, fieldName)) + if w.Convert_groups { + name = groupReplacer.Replace(name) + } metric := &MetricLine{ - Metric: sanitizedChars.Replace(fmt.Sprintf("%s%s%s%s", - w.Prefix, m.Name(), w.Metric_separator, fieldName)), + Metric: name, Timestamp: now.Unix(), } metricValue, buildError := buildValue(value, metric.Metric) @@ -160,7 +175,7 @@ func (w *Wavefront) SampleConfig() string { } func (w *Wavefront) Description() string { - return "Configuration for OpenTSDB server to send metrics to" + return "Configuration for Wavefront server to send metrics to" } func (w *Wavefront) Close() error { diff --git a/plugins/outputs/wavefront/wavefront_test.go b/plugins/outputs/wavefront/wavefront_test.go index db69005213ea5..cb04110a6a0c2 100644 --- a/plugins/outputs/wavefront/wavefront_test.go +++ b/plugins/outputs/wavefront/wavefront_test.go @@ -46,18 +46,18 @@ func TestBuildTagsTelnet(t *testing.T) { // t.Skip("Skipping integration test in short mode") // } -// o := &OpenTSDB{ +// w := &Wavefront{ // Host: testutil.GetLocalHost(), -// Port: 4242, +// Port: 2878, // Prefix: "prefix.test.", // } -// // Verify that we can connect to the OpenTSDB instance -// err := o.Connect() +// // Verify that we can connect to the Wavefront instance +// err := w.Connect() // require.NoError(t, err) -// // Verify that we can successfully write data to OpenTSDB -// err = o.Write(testutil.MockMetrics()) +// // Verify that we can successfully write data to Wavefront +// err = w.Write(testutil.MockMetrics()) // require.NoError(t, err) // // Verify postive and negative test cases of writing data @@ -75,6 +75,6 @@ func TestBuildTagsTelnet(t *testing.T) { // metrics = append(metrics, testutil.TestMetric(float64(42.0), // "metric w/ specialchars")) -// err = o.Write(metrics) +// err = w.Write(metrics) // require.NoError(t, err) // } From 23657b8b3ffd75b379d072864c0f4c40d73d72f7 Mon Sep 17 00:00:00 2001 From: Pierre Tessier Date: Tue, 25 Oct 2016 10:53:12 -0400 Subject: [PATCH 06/16] config names + special char handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Updated configuration names and default values - Updated special character handling and added new regex option - Added new “hidden” DebugAll option to print original metric and field names --- plugins/outputs/wavefront/wavefront.go | 113 +++++++++++++------- plugins/outputs/wavefront/wavefront_test.go | 61 +++++++++-- 2 files changed, 126 insertions(+), 48 deletions(-) diff --git a/plugins/outputs/wavefront/wavefront.go b/plugins/outputs/wavefront/wavefront.go index 775f92c8fa0f1..b66987b81ebc8 100644 --- a/plugins/outputs/wavefront/wavefront.go +++ b/plugins/outputs/wavefront/wavefront.go @@ -6,63 +6,78 @@ import ( "sort" "strconv" "strings" - "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/outputs" + "regexp" ) type Wavefront struct { - Prefix string - Host string - Port int - Metric_separator string - Convert_groups bool - - Debug bool + Host string + Port int + Prefix string + MetricSeparator string + ConvertPaths bool + UseRegex bool + + Debug bool + DebugAll bool } -var sanitizedChars = strings.NewReplacer("*", "-", `%`, "-", "#", "-") -var groupReplacer = strings.NewReplacer("_", "_") +// catch many of the invalid chars that could appear in a metric or tag name +var sanitizedChars = strings.NewReplacer( + "!", "-", "@", "-", "#", "-", "$", "-", "%", "-", "^", "-", "&", "-", + "*", "-", "(", "-", ")", "-", "+", "-", "`", "-", "'", "-", "\"", "-", + "[", "-", "]", "-", "{", "-", "}", "-", ":", "-", ";", "-", "<", "-", + ">", "-", ",", "-", "?", "-", "/", "-", "\\", "-", "|", "-", " ", "-", + ) +// instead of Replacer which may miss some special characters we can use a regex pattern, but this is significantly slower than Replacer +var sanitizedRegex, _ = regexp.Compile("[^a-zA-Z\\d_.-]") + +var pathReplacer = strings.NewReplacer("_", "_") var sampleConfig = ` ## prefix for metrics keys prefix = "my.specific.prefix." - ## Telnet Mode ## - ## DNS name of the wavefront proxy server in telnet mode + ## DNS name of the wavefront proxy server host = "wavefront.example.com" - ## Port of the Wavefront proxy server in telnet mode + ## Port that the Wavefront proxy server listens on port = 2878 - ## character to use between metric and field name. defaults to _ (underscore) - metric_separator = "." + ## character to use between metric and field name. defaults to . (dot) + metricSeparator = "." - ## Convert metric name groups to use metric_seperator character - ## When true will convert all _ (underscore) chartacters in final metric name - convert_groups = true + ## Convert metric name paths to use metricSeperator character + ## When true (edfault) will convert all _ (underscore) chartacters in final metric name + convertPaths = true - ## Debug true - Prints Wavefront communication + ## Use Regex to sanitize metric and tag names from invalid characters + ## Regex is more thorough, but significantly slower + useRegex = false + + ## Print all Wavefront communication debug = false ` type MetricLine struct { Metric string - Timestamp int64 Value string + Timestamp int64 Tags string } func (w *Wavefront) Connect() error { - if w.Metric_separator == "" { - w.Metric_separator = "_" + + if w.ConvertPaths && w.MetricSeparator == "_" { + w.ConvertPaths = false } - if w.Convert_groups { - groupReplacer = strings.NewReplacer("_", w.Metric_separator) + if w.ConvertPaths { + pathReplacer = strings.NewReplacer("_", w.MetricSeparator) } - // Test Connection to Wavefront Server + // Test Connection to Wavefront proxy Server uri := fmt.Sprintf("%s:%d", w.Host, w.Port) tcpAddr, err := net.ResolveTCPAddr("tcp", uri) if err != nil { @@ -80,9 +95,8 @@ func (w *Wavefront) Write(metrics []telegraf.Metric) error { if len(metrics) == 0 { return nil } - now := time.Now() - // Send Data with telnet / socket communication + // Send Data to Wavefront proxy Server uri := fmt.Sprintf("%s:%d", w.Host, w.Port) tcpAddr, _ := net.ResolveTCPAddr("tcp", uri) connection, err := net.DialTCP("tcp", nil, tcpAddr) @@ -92,9 +106,8 @@ func (w *Wavefront) Write(metrics []telegraf.Metric) error { defer connection.Close() for _, m := range metrics { - for _, metric := range buildMetrics(m, now, w) { - messageLine := fmt.Sprintf("%s %s %v %s\n", - metric.Metric, metric.Value, metric.Timestamp, metric.Tags) + for _, metric := range buildMetrics(m, w) { + messageLine := fmt.Sprintf("%s %s %v %s\n", metric.Metric, metric.Value, metric.Timestamp, metric.Tags) if w.Debug { fmt.Print(messageLine) } @@ -108,27 +121,46 @@ func (w *Wavefront) Write(metrics []telegraf.Metric) error { return nil } -func buildTags(mTags map[string]string) []string { +func buildTags(mTags map[string]string, w *Wavefront) []string { tags := make([]string, len(mTags)) index := 0 for k, v := range mTags { - tags[index] = sanitizedChars.Replace(fmt.Sprintf("%s=\"%s\"", k, v)) + if w.UseRegex { + tags[index] = fmt.Sprintf("%s=\"%s\"", sanitizedRegex.ReplaceAllString(k, "-"), sanitizedRegex.ReplaceAllString(v, "-")) + } else { + tags[index] = fmt.Sprintf("%s=\"%s\"", sanitizedChars.Replace(k), sanitizedChars.Replace(v)) + } index++ } sort.Strings(tags) return tags } -func buildMetrics(m telegraf.Metric, now time.Time, w *Wavefront) []*MetricLine { +func buildMetrics(m telegraf.Metric, w *Wavefront) []*MetricLine { + if w.DebugAll { + fmt.Printf("Original name: %s\n", m.Name()) + } + ret := []*MetricLine{} for fieldName, value := range m.Fields() { - name := sanitizedChars.Replace(fmt.Sprintf("%s%s%s%s", w.Prefix, m.Name(), w.Metric_separator, fieldName)) - if w.Convert_groups { - name = groupReplacer.Replace(name) + if w.DebugAll { + fmt.Printf("Original field: %s\n", fieldName) + } + + name := fmt.Sprintf("%s%s%s%s", w.Prefix, m.Name(), w.MetricSeparator, fieldName) + if w.UseRegex { + name = sanitizedRegex.ReplaceAllLiteralString(name, "-") + } else { + name = sanitizedChars.Replace(name) } + + if w.ConvertPaths { + name = pathReplacer.Replace(name) + } + metric := &MetricLine{ Metric: name, - Timestamp: now.Unix(), + Timestamp: m.UnixNano() / 1000000000, } metricValue, buildError := buildValue(value, metric.Metric) if buildError != nil { @@ -136,7 +168,7 @@ func buildMetrics(m telegraf.Metric, now time.Time, w *Wavefront) []*MetricLine continue } metric.Value = metricValue - tagsSlice := buildTags(m.Tags()) + tagsSlice := buildTags(m.Tags(), w) metric.Tags = fmt.Sprint(strings.Join(tagsSlice, " ")) ret = append(ret, metric) } @@ -184,6 +216,9 @@ func (w *Wavefront) Close() error { func init() { outputs.Add("wavefront", func() telegraf.Output { - return &Wavefront{} + return &Wavefront{ + MetricSeparator: ".", + ConvertPaths: true, + } }) } diff --git a/plugins/outputs/wavefront/wavefront_test.go b/plugins/outputs/wavefront/wavefront_test.go index cb04110a6a0c2..0ae424d2bc662 100644 --- a/plugins/outputs/wavefront/wavefront_test.go +++ b/plugins/outputs/wavefront/wavefront_test.go @@ -3,30 +3,73 @@ package wavefront import ( "reflect" "testing" - // "github.com/influxdata/telegraf/testutil" - // "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/testutil" + "github.com/influxdata/telegraf" + "strings" ) -func TestBuildTagsTelnet(t *testing.T) { +func defaultWavefront() *Wavefront { + return &Wavefront{ + Host: "localhost", + Port: 2878, + Prefix: "testWF.", + MetricSeparator: ".", + ConvertPaths: true, + UseRegex: false, + Debug: true, + } +} + +func TestBuildMetrics(t *testing.T) { + w := defaultWavefront() + w.UseRegex = false + w.Prefix = "testthis." + pathReplacer = strings.NewReplacer("_", w.MetricSeparator) + + var metricTests = []struct { + metric telegraf.Metric + metricLines []MetricLine + } { + { + testutil.TestMetric(float64(1.0), "testing_just*a%metric:float"), + []MetricLine{{Metric: w.Prefix + "testing.just-a-metric-float.value"}}, + }, + } + + for _, mt := range metricTests { + ml := buildMetrics(mt.metric, w) + for i, line := range ml { + if mt.metricLines[i].Metric != line.Metric { + t.Errorf("\nexpected\t%+v\nreceived\t%+v\n", mt.metricLines[i].Metric, line.Metric) + } + } + } + +} + +func TestBuildTags(t *testing.T) { + + w := defaultWavefront() + var tagtests = []struct { ptIn map[string]string outTags []string }{ { map[string]string{"one": "two", "three": "four"}, - []string{"one=two", "three=four"}, + []string{"one=\"two\"", "three=\"four\""}, }, { map[string]string{"aaa": "bbb"}, - []string{"aaa=bbb"}, + []string{"aaa=\"bbb\""}, }, { map[string]string{"one": "two", "aaa": "bbb"}, - []string{"aaa=bbb", "one=two"}, + []string{"aaa=\"bbb\"", "one=\"two\""}, }, { map[string]string{"Sp%ci@l Chars": "g$t repl#ced"}, - []string{"Sp-ci-l_Chars=g-t_repl-ced"}, + []string{"Sp-ci-l-Chars=\"g-t-repl-ced\""}, }, { map[string]string{}, @@ -34,9 +77,9 @@ func TestBuildTagsTelnet(t *testing.T) { }, } for _, tt := range tagtests { - tags := buildTags(tt.ptIn) + tags := buildTags(tt.ptIn, w) if !reflect.DeepEqual(tags, tt.outTags) { - t.Errorf("\nexpected %+v\ngot %+v\n", tt.outTags, tags) + t.Errorf("\nexpected\t%+v\nreceived\t%+v\n", tt.outTags, tags) } } } From 34335bbcdacc1b7f416f9bf9669cbdf570e18f85 Mon Sep 17 00:00:00 2001 From: Pierre Tessier Date: Tue, 25 Oct 2016 18:37:11 -0400 Subject: [PATCH 07/16] change host to source + simple value fields MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Change tag host to be source per Wavefront data format - Added SimpleFields property which when true will use “value” as field name when specified (default false) - Cleanup of sample config --- plugins/outputs/wavefront/wavefront.go | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/plugins/outputs/wavefront/wavefront.go b/plugins/outputs/wavefront/wavefront.go index b66987b81ebc8..4dc80b5de05b8 100644 --- a/plugins/outputs/wavefront/wavefront.go +++ b/plugins/outputs/wavefront/wavefront.go @@ -3,19 +3,20 @@ package wavefront import ( "fmt" "net" + "regexp" "sort" "strconv" "strings" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/outputs" - "regexp" ) type Wavefront struct { Host string Port int Prefix string + SimpleFields bool MetricSeparator string ConvertPaths bool UseRegex bool @@ -46,16 +47,19 @@ var sampleConfig = ` ## Port that the Wavefront proxy server listens on port = 2878 + ## wether to use "value" for name of simple fields + simple_fields = false + ## character to use between metric and field name. defaults to . (dot) - metricSeparator = "." + metric_separator = "." ## Convert metric name paths to use metricSeperator character ## When true (edfault) will convert all _ (underscore) chartacters in final metric name - convertPaths = true + convert_paths = true ## Use Regex to sanitize metric and tag names from invalid characters ## Regex is more thorough, but significantly slower - useRegex = false + use_regex = false ## Print all Wavefront communication debug = false @@ -125,11 +129,16 @@ func buildTags(mTags map[string]string, w *Wavefront) []string { tags := make([]string, len(mTags)) index := 0 for k, v := range mTags { + if k == "host" { + k = "source" + } + if w.UseRegex { tags[index] = fmt.Sprintf("%s=\"%s\"", sanitizedRegex.ReplaceAllString(k, "-"), sanitizedRegex.ReplaceAllString(v, "-")) } else { tags[index] = fmt.Sprintf("%s=\"%s\"", sanitizedChars.Replace(k), sanitizedChars.Replace(v)) } + index++ } sort.Strings(tags) @@ -147,7 +156,13 @@ func buildMetrics(m telegraf.Metric, w *Wavefront) []*MetricLine { fmt.Printf("Original field: %s\n", fieldName) } - name := fmt.Sprintf("%s%s%s%s", w.Prefix, m.Name(), w.MetricSeparator, fieldName) + var name string + if !w.SimpleFields && fieldName == "value" { + name = fmt.Sprintf("%s%s", w.Prefix, m.Name()) + } else { + name = fmt.Sprintf("%s%s%s%s", w.Prefix, m.Name(), w.MetricSeparator, fieldName) + } + if w.UseRegex { name = sanitizedRegex.ReplaceAllLiteralString(name, "-") } else { From d2a68c7eb4742ba51505c6162e9d8be7d8ff6b25 Mon Sep 17 00:00:00 2001 From: Pierre Tessier Date: Tue, 25 Oct 2016 18:37:30 -0400 Subject: [PATCH 08/16] Added new test cases for SimpleFields option --- plugins/outputs/wavefront/wavefront_test.go | 75 ++++++++++++++++++--- 1 file changed, 67 insertions(+), 8 deletions(-) diff --git a/plugins/outputs/wavefront/wavefront_test.go b/plugins/outputs/wavefront/wavefront_test.go index 0ae424d2bc662..abde142627563 100644 --- a/plugins/outputs/wavefront/wavefront_test.go +++ b/plugins/outputs/wavefront/wavefront_test.go @@ -6,6 +6,7 @@ import ( "github.com/influxdata/telegraf/testutil" "github.com/influxdata/telegraf" "strings" + "time" ) func defaultWavefront() *Wavefront { @@ -13,6 +14,7 @@ func defaultWavefront() *Wavefront { Host: "localhost", Port: 2878, Prefix: "testWF.", + SimpleFields: false, MetricSeparator: ".", ConvertPaths: true, UseRegex: false, @@ -20,27 +22,80 @@ func defaultWavefront() *Wavefront { } } -func TestBuildMetrics(t *testing.T) { +func TestBuildMetricsNoSimpleFields(t *testing.T) { w := defaultWavefront() w.UseRegex = false w.Prefix = "testthis." + w.SimpleFields = false + pathReplacer = strings.NewReplacer("_", w.MetricSeparator) + testMetric1, _ := telegraf.NewMetric( + "test.simple.metric", + map[string]string{"tag1": "value1"}, + map[string]interface{}{"value": 123}, + time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + var metricTests = []struct { metric telegraf.Metric metricLines []MetricLine } { { testutil.TestMetric(float64(1.0), "testing_just*a%metric:float"), - []MetricLine{{Metric: w.Prefix + "testing.just-a-metric-float.value"}}, + []MetricLine{{Metric: w.Prefix + "testing.just-a-metric-float", Value: "1.000000"}}, + }, + { + testMetric1, + []MetricLine{{Metric: w.Prefix + "test.simple.metric", Value: "123"}}, }, } for _, mt := range metricTests { ml := buildMetrics(mt.metric, w) for i, line := range ml { - if mt.metricLines[i].Metric != line.Metric { - t.Errorf("\nexpected\t%+v\nreceived\t%+v\n", mt.metricLines[i].Metric, line.Metric) + if mt.metricLines[i].Metric != line.Metric || mt.metricLines[i].Value != line.Value { + t.Errorf("\nexpected\t%+v\nreceived\t%+v\n", mt.metricLines[i].Metric + " " + mt.metricLines[i].Value, line.Metric + " " + line.Value) + } + } + } + +} + +func TestBuildMetricsWithSimpleFields(t *testing.T) { + w := defaultWavefront() + w.UseRegex = false + w.Prefix = "testthis." + w.SimpleFields = true + + pathReplacer = strings.NewReplacer("_", w.MetricSeparator) + + testMetric1, _ := telegraf.NewMetric( + "test.simple.metric", + map[string]string{"tag1": "value1"}, + map[string]interface{}{"value": 123}, + time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + + var metricTests = []struct { + metric telegraf.Metric + metricLines []MetricLine + } { + { + testutil.TestMetric(float64(1.0), "testing_just*a%metric:float"), + []MetricLine{{Metric: w.Prefix + "testing.just-a-metric-float.value", Value: "1.000000"}}, + }, + { + testMetric1, + []MetricLine{{Metric: w.Prefix + "test.simple.metric.value", Value: "123"}}, + }, + } + + for _, mt := range metricTests { + ml := buildMetrics(mt.metric, w) + for i, line := range ml { + if mt.metricLines[i].Metric != line.Metric || mt.metricLines[i].Value != line.Value { + t.Errorf("\nexpected\t%+v\nreceived\t%+v\n", mt.metricLines[i].Metric + " " + mt.metricLines[i].Value, line.Metric + " " + line.Value) } } } @@ -64,12 +119,16 @@ func TestBuildTags(t *testing.T) { []string{"aaa=\"bbb\""}, }, { - map[string]string{"one": "two", "aaa": "bbb"}, - []string{"aaa=\"bbb\"", "one=\"two\""}, + map[string]string{"bbb": "789", "aaa": "123"}, + []string{"aaa=\"123\"", "bbb=\"789\""}, + }, + { + map[string]string{"host": "aaa", "dc": "bbb"}, + []string{"dc=\"bbb\"", "source=\"aaa\""}, }, { - map[string]string{"Sp%ci@l Chars": "g$t repl#ced"}, - []string{"Sp-ci-l-Chars=\"g-t-repl-ced\""}, + map[string]string{"Sp%ci@l Chars": "\"g$t repl#ced"}, + []string{"Sp-ci-l-Chars=\"-g-t-repl-ced\""}, }, { map[string]string{}, From e03bf270ad4c2436c29e9c958059e5abee8b36e0 Mon Sep 17 00:00:00 2001 From: Pierre Tessier Date: Thu, 27 Oct 2016 13:18:45 -0400 Subject: [PATCH 09/16] updated output to use log. to stay consistent with all logging --- plugins/outputs/wavefront/wavefront.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/plugins/outputs/wavefront/wavefront.go b/plugins/outputs/wavefront/wavefront.go index 4dc80b5de05b8..733c6c19684a2 100644 --- a/plugins/outputs/wavefront/wavefront.go +++ b/plugins/outputs/wavefront/wavefront.go @@ -10,6 +10,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/outputs" + "log" ) type Wavefront struct { @@ -113,7 +114,7 @@ func (w *Wavefront) Write(metrics []telegraf.Metric) error { for _, metric := range buildMetrics(m, w) { messageLine := fmt.Sprintf("%s %s %v %s\n", metric.Metric, metric.Value, metric.Timestamp, metric.Tags) if w.Debug { - fmt.Print(messageLine) + log.Printf("DEBUG: output [wavefront] %s", messageLine) } _, err := connection.Write([]byte(messageLine)) if err != nil { @@ -147,13 +148,13 @@ func buildTags(mTags map[string]string, w *Wavefront) []string { func buildMetrics(m telegraf.Metric, w *Wavefront) []*MetricLine { if w.DebugAll { - fmt.Printf("Original name: %s\n", m.Name()) + log.Printf("DEBUG: output [wavefront] original name: %s\n", m.Name()) } ret := []*MetricLine{} for fieldName, value := range m.Fields() { if w.DebugAll { - fmt.Printf("Original field: %s\n", fieldName) + log.Printf("DEBUG: output [wavefront] original field: %s\n", fieldName) } var name string @@ -179,7 +180,7 @@ func buildMetrics(m telegraf.Metric, w *Wavefront) []*MetricLine { } metricValue, buildError := buildValue(value, metric.Metric) if buildError != nil { - fmt.Printf("Wavefront: %s\n", buildError.Error()) + log.Printf("ERROR: output [wavefront] %s\n", buildError.Error()) continue } metric.Value = metricValue From a0f412c79628d9e855f99afe8a3d0a88bacf1797 Mon Sep 17 00:00:00 2001 From: Pierre Tessier Date: Thu, 3 Nov 2016 12:16:57 -0400 Subject: [PATCH 10/16] Merge remote-tracking branch 'wavefrontHQ/master' # Conflicts: # Makefile # plugins/outputs/wavefront/wavefront.go # plugins/outputs/wavefront/wavefront_test.go --- CHANGELOG.md | 57 +- CONTRIBUTING.md | 190 ++- Godeps | 9 +- Makefile | 4 + README.md | 70 +- accumulator.go | 12 +- agent/accumulator.go | 179 +- agent/accumulator_test.go | 513 ++---- agent/agent.go | 189 ++- aggregator.go | 22 + circle.yml | 9 +- cmd/telegraf/telegraf.go | 165 +- docs/CONFIGURATION.md | 129 +- docs/DATA_FORMATS_INPUT.md | 10 + etc/telegraf.conf | 190 ++- etc/telegraf_windows.conf | 130 +- internal/buffer/buffer.go | 6 + internal/config/config.go | 365 +++- internal/globpath/globpath.go | 32 +- internal/internal.go | 15 +- internal/internal_test.go | 19 + internal/models/filter.go | 2 +- internal/models/makemetric.go | 154 ++ internal/models/running_aggregator.go | 164 ++ internal/models/running_aggregator_test.go | 256 +++ internal/models/running_input.go | 62 +- internal/models/running_input_test.go | 352 ++++ internal/models/running_output.go | 4 +- internal/models/running_output_test.go | 4 - internal/models/running_processor.go | 44 + internal/models/running_processor_test.go | 117 ++ logger/logger.go | 58 + metric.go | 44 +- plugins/aggregators/all/all.go | 5 + plugins/aggregators/minmax/minmax.go | 119 ++ plugins/aggregators/minmax/minmax_test.go | 162 ++ plugins/aggregators/registry.go | 11 + plugins/inputs/aerospike/aerospike.go | 4 +- plugins/inputs/all/all.go | 1 + plugins/inputs/cassandra/README.md | 64 +- plugins/inputs/cassandra/cassandra.go | 2 +- plugins/inputs/ceph/ceph.go | 6 +- plugins/inputs/cgroup/README.md | 9 +- plugins/inputs/cgroup/cgroup.go | 21 +- plugins/inputs/cgroup/cgroup_linux.go | 5 +- plugins/inputs/cgroup/cgroup_test.go | 84 +- plugins/inputs/conntrack/conntrack.go | 5 +- plugins/inputs/docker/README.md | 6 + plugins/inputs/docker/docker.go | 5 +- plugins/inputs/docker/docker_test.go | 6 + plugins/inputs/haproxy/README.md | 37 + plugins/inputs/haproxy/haproxy.go | 63 +- plugins/inputs/haproxy/haproxy_test.go | 143 +- plugins/inputs/http_listener/bufferpool.go | 43 + plugins/inputs/http_listener/http_listener.go | 272 ++- .../http_listener/http_listener_test.go | 178 +- .../http_listener/stoppableListener/LICENSE | 10 - .../stoppableListener/listener.go | 62 - .../inputs/http_listener/testdata/testmsgs.gz | Bin 0 -> 97 bytes plugins/inputs/influxdb/influxdb.go | 33 +- .../inputs/kafka_consumer/kafka_consumer.go | 10 +- plugins/inputs/kubernetes/README.md | 265 +++ plugins/inputs/kubernetes/kubernetes.go | 242 +++ .../inputs/kubernetes/kubernetes_metrics.go | 93 + plugins/inputs/kubernetes/kubernetes_test.go | 289 ++++ plugins/inputs/logparser/grok/grok.go | 14 +- plugins/inputs/logparser/grok/grok_test.go | 25 + .../inputs/logparser/grok/influx_patterns.go | 4 +- .../logparser/grok/patterns/influx-patterns | 4 +- plugins/inputs/logparser/logparser.go | 6 +- plugins/inputs/mailchimp/chimp_api.go | 4 +- plugins/inputs/mesos/README.md | 49 +- plugins/inputs/mesos/mesos.go | 29 +- plugins/inputs/mesos/mesos_test.go | 94 +- plugins/inputs/mongodb/mongodb_server.go | 6 +- plugins/inputs/mqtt_consumer/mqtt_consumer.go | 8 +- plugins/inputs/mysql/mysql.go | 126 +- plugins/inputs/mysql/mysql_test.go | 22 +- plugins/inputs/mysql/parse_dsn.go | 85 - plugins/inputs/nats_consumer/nats_consumer.go | 67 +- .../nats_consumer/nats_consumer_test.go | 5 + plugins/inputs/nsq_consumer/nsq_consumer.go | 2 +- plugins/inputs/ntpq/ntpq.go | 10 +- plugins/inputs/phpfpm/phpfpm.go | 3 + plugins/inputs/ping/ping.go | 15 +- .../postgresql_extensible.go | 5 +- plugins/inputs/powerdns/powerdns.go | 17 +- plugins/inputs/powerdns/powerdns_test.go | 145 +- plugins/inputs/procstat/README.md | 2 +- plugins/inputs/procstat/procstat.go | 2 +- plugins/inputs/rabbitmq/rabbitmq.go | 4 +- plugins/inputs/snmp/CONFIG-EXAMPLES.md | 65 + plugins/inputs/snmp/DEBUGGING.md | 53 + plugins/inputs/snmp/README.md | 47 +- plugins/inputs/snmp/snmp.go | 257 ++- plugins/inputs/snmp/snmp_mocks_generate.go | 100 ++ plugins/inputs/snmp/snmp_mocks_test.go | 85 + plugins/inputs/snmp/snmp_test.go | 182 +- plugins/inputs/snmp_legacy/snmp_legacy.go | 10 +- plugins/inputs/sqlserver/sqlserver.go | 36 +- plugins/inputs/statsd/README.md | 1 - plugins/inputs/statsd/statsd.go | 36 +- plugins/inputs/statsd/statsd_test.go | 3 +- plugins/inputs/sysstat/sysstat.go | 4 + plugins/inputs/system/processes.go | 6 +- plugins/inputs/tail/tail.go | 8 +- plugins/inputs/tcp_listener/tcp_listener.go | 26 +- plugins/inputs/udp_listener/udp_listener.go | 21 +- plugins/inputs/varnish/varnish.go | 4 +- .../webhooks/filestack/filestack_webhooks.go | 2 +- .../inputs/webhooks/github/github_webhooks.go | 4 +- .../webhooks/mandrill/mandrill_webhooks.go | 2 +- .../webhooks/rollbar/rollbar_webhooks.go | 2 +- plugins/inputs/webhooks/webhooks.go | 6 +- plugins/inputs/win_perf_counters/README.md | 2 +- plugins/outputs/amon/amon.go | 2 +- plugins/outputs/amqp/amqp.go | 6 +- plugins/outputs/cloudwatch/cloudwatch.go | 4 +- plugins/outputs/datadog/datadog.go | 2 +- plugins/outputs/graphite/graphite.go | 4 +- plugins/outputs/graylog/graylog.go | 9 +- plugins/outputs/influxdb/influxdb.go | 6 +- plugins/outputs/instrumental/instrumental.go | 8 +- plugins/outputs/kinesis/kinesis.go | 16 +- plugins/outputs/librato/librato.go | 31 +- plugins/outputs/nats/nats.go | 11 +- plugins/outputs/opentsdb/opentsdb.go | 17 +- plugins/outputs/opentsdb/opentsdb_http.go | 8 +- .../prometheus_client/prometheus_client.go | 22 +- plugins/outputs/riemann/riemann.go | 5 + plugins/outputs/wavefront/wavefront.go | 17 + plugins/outputs/wavefront/wavefront_test.go | 20 + plugins/parsers/graphite/parser.go | 81 +- plugins/parsers/graphite/parser_test.go | 42 + plugins/parsers/influx/parser.go | 29 +- plugins/parsers/json/parser.go | 4 + plugins/processors/all/all.go | 5 + plugins/processors/printer/printer.go | 35 + plugins/processors/printer/printer_test.go | 1 + plugins/processors/registry.go | 11 + processor.go | 12 + scripts/circle-test.sh | 4 + scripts/telegraf.service | 4 +- telegraf.conf | 1498 +++++++++++++++++ testutil/accumulator.go | 7 + 145 files changed, 7413 insertions(+), 2054 deletions(-) create mode 100644 aggregator.go create mode 100644 internal/models/makemetric.go create mode 100644 internal/models/running_aggregator.go create mode 100644 internal/models/running_aggregator_test.go create mode 100644 internal/models/running_input_test.go create mode 100644 internal/models/running_processor.go create mode 100644 internal/models/running_processor_test.go create mode 100644 logger/logger.go create mode 100644 plugins/aggregators/all/all.go create mode 100644 plugins/aggregators/minmax/minmax.go create mode 100644 plugins/aggregators/minmax/minmax_test.go create mode 100644 plugins/aggregators/registry.go create mode 100644 plugins/inputs/haproxy/README.md create mode 100644 plugins/inputs/http_listener/bufferpool.go delete mode 100644 plugins/inputs/http_listener/stoppableListener/LICENSE delete mode 100644 plugins/inputs/http_listener/stoppableListener/listener.go create mode 100644 plugins/inputs/http_listener/testdata/testmsgs.gz create mode 100644 plugins/inputs/kubernetes/README.md create mode 100644 plugins/inputs/kubernetes/kubernetes.go create mode 100644 plugins/inputs/kubernetes/kubernetes_metrics.go create mode 100644 plugins/inputs/kubernetes/kubernetes_test.go delete mode 100644 plugins/inputs/mysql/parse_dsn.go create mode 100644 plugins/inputs/snmp/CONFIG-EXAMPLES.md create mode 100644 plugins/inputs/snmp/DEBUGGING.md create mode 100644 plugins/inputs/snmp/snmp_mocks_generate.go create mode 100644 plugins/inputs/snmp/snmp_mocks_test.go create mode 100644 plugins/processors/all/all.go create mode 100644 plugins/processors/printer/printer.go create mode 100644 plugins/processors/printer/printer_test.go create mode 100644 plugins/processors/registry.go create mode 100644 processor.go create mode 100644 telegraf.conf diff --git a/CHANGELOG.md b/CHANGELOG.md index c5045196cdfd5..83aecc135a2bd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,8 +2,21 @@ ### Release Notes +- Telegraf now supports two new types of plugins: processors & aggregators. + +- On systemd Telegraf will no longer redirect it's stdout to /var/log/telegraf/telegraf.log. +On most systems, the logs will be directed to the systemd journal and can be +accessed by `journalctl -u telegraf.service`. Consult the systemd journal +documentation for configuring journald. There is also a [`logfile` config option](https://github.com/influxdata/telegraf/blob/master/etc/telegraf.conf#L70) +available in 1.1, which will allow users to easily configure telegraf to +continue sending logs to /var/log/telegraf/telegraf.log. + ### Features +- [#1726](https://github.com/influxdata/telegraf/issues/1726): Processor & Aggregator plugin support. +- [#1861](https://github.com/influxdata/telegraf/pull/1861): adding the tags in the graylog output plugin +- [#1732](https://github.com/influxdata/telegraf/pull/1732): Telegraf systemd service, log to journal. +- [#1782](https://github.com/influxdata/telegraf/pull/1782): Allow numeric and non-string values for tag_keys. - [#1694](https://github.com/influxdata/telegraf/pull/1694): Adding Gauge and Counter metric types. - [#1606](https://github.com/influxdata/telegraf/pull/1606): Remove carraige returns from exec plugin output on Windows - [#1674](https://github.com/influxdata/telegraf/issues/1674): elasticsearch input: configurable timeout. @@ -17,18 +30,58 @@ - [#1542](https://github.com/influxdata/telegraf/pull/1542): Add filestack webhook plugin. - [#1599](https://github.com/influxdata/telegraf/pull/1599): Add server hostname for each docker measurements. - [#1697](https://github.com/influxdata/telegraf/pull/1697): Add NATS output plugin. -- [#1407](https://github.com/influxdata/telegraf/pull/1407): HTTP service listener input plugin. +- [#1407](https://github.com/influxdata/telegraf/pull/1407) & [#1915](https://github.com/influxdata/telegraf/pull/1915): HTTP service listener input plugin. - [#1699](https://github.com/influxdata/telegraf/pull/1699): Add database blacklist option for Postgresql +- [#1791](https://github.com/influxdata/telegraf/pull/1791): Add Docker container state metrics to Docker input plugin output +- [#1755](https://github.com/influxdata/telegraf/issues/1755): Add support to SNMP for IP & MAC address conversion. +- [#1729](https://github.com/influxdata/telegraf/issues/1729): Add support to SNMP for OID index suffixes. +- [#1813](https://github.com/influxdata/telegraf/pull/1813): Change default arguments for SNMP plugin. +- [#1686](https://github.com/influxdata/telegraf/pull/1686): Mesos input plugin: very high-cardinality mesos-task metrics removed. +- [#1838](https://github.com/influxdata/telegraf/pull/1838): Logging overhaul to centralize the logger & log levels, & provide a logfile config option. +- [#1700](https://github.com/influxdata/telegraf/pull/1700): HAProxy plugin socket glob matching. +- [#1847](https://github.com/influxdata/telegraf/pull/1847): Add Kubernetes plugin for retrieving pod metrics. ### Bugfixes +- [#1955](https://github.com/influxdata/telegraf/issues/1955): Fix NATS plug-ins reconnection logic. +- [#1936](https://github.com/influxdata/telegraf/issues/1936): Set required default values in udp_listener & tcp_listener. +- [#1926](https://github.com/influxdata/telegraf/issues/1926): Fix toml unmarshal panic in Duration objects. +- [#1746](https://github.com/influxdata/telegraf/issues/1746): Fix handling of non-string values for JSON keys listed in tag_keys. - [#1628](https://github.com/influxdata/telegraf/issues/1628): Fix mongodb input panic on version 2.2. -- [#1738](https://github.com/influxdata/telegraf/issues/1738): Fix unmarshal of influxdb metrics with null tags - [#1733](https://github.com/influxdata/telegraf/issues/1733): Fix statsd scientific notation parsing - [#1716](https://github.com/influxdata/telegraf/issues/1716): Sensors plugin strconv.ParseFloat: parsing "": invalid syntax - [#1530](https://github.com/influxdata/telegraf/issues/1530): Fix prometheus_client reload panic - [#1764](https://github.com/influxdata/telegraf/issues/1764): Fix kafka consumer panic when nil error is returned down errs channel. - [#1768](https://github.com/influxdata/telegraf/pull/1768): Speed up statsd parsing. +- [#1751](https://github.com/influxdata/telegraf/issues/1751): Fix powerdns integer parse error handling. +- [#1752](https://github.com/influxdata/telegraf/issues/1752): Fix varnish plugin defaults not being used. +- [#1517](https://github.com/influxdata/telegraf/issues/1517): Fix windows glob paths. +- [#1137](https://github.com/influxdata/telegraf/issues/1137): Fix issue loading config directory on windows. +- [#1772](https://github.com/influxdata/telegraf/pull/1772): Windows remote management interactive service fix. +- [#1702](https://github.com/influxdata/telegraf/issues/1702): sqlserver, fix issue when case sensitive collation is activated. +- [#1823](https://github.com/influxdata/telegraf/issues/1823): Fix huge allocations in http_listener when dealing with huge payloads. +- [#1833](https://github.com/influxdata/telegraf/issues/1833): Fix translating SNMP fields not in MIB. +- [#1835](https://github.com/influxdata/telegraf/issues/1835): Fix SNMP emitting empty fields. +- [#1854](https://github.com/influxdata/telegraf/pull/1853): SQL Server waitstats truncation bug. +- [#1810](https://github.com/influxdata/telegraf/issues/1810): Fix logparser common log format: numbers in ident. +- [#1793](https://github.com/influxdata/telegraf/pull/1793): Fix JSON Serialization in OpenTSDB output. +- [#1731](https://github.com/influxdata/telegraf/issues/1731): Fix Graphite template ordering, use most specific. +- [#1836](https://github.com/influxdata/telegraf/pull/1836): Fix snmp table field initialization for non-automatic table. +- [#1724](https://github.com/influxdata/telegraf/issues/1724): cgroups path being parsed as metric. +- [#1886](https://github.com/influxdata/telegraf/issues/1886): Fix phpfpm fcgi client panic when URL does not exist. +- [#1344](https://github.com/influxdata/telegraf/issues/1344): Fix config file parse error logging. +- [#1771](https://github.com/influxdata/telegraf/issues/1771): Delete nil fields in the metric maker. +- [#870](https://github.com/influxdata/telegraf/issues/870): Fix MySQL special characters in DSN parsing. +- [#1742](https://github.com/influxdata/telegraf/issues/1742): Ping input odd timeout behavior. + +## v1.0.1 [2016-09-26] + +### Bugfixes + +- [#1775](https://github.com/influxdata/telegraf/issues/1775): Prometheus output: Fix bug with multi-batch writes. +- [#1738](https://github.com/influxdata/telegraf/issues/1738): Fix unmarshal of influxdb metrics with null tags. +- [#1773](https://github.com/influxdata/telegraf/issues/1773): Add configurable timeout to influxdb input plugin. +- [#1785](https://github.com/influxdata/telegraf/pull/1785): Fix statsd no default value panic. ## v1.0 [2016-09-08] diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index ec7a353633eb2..5aefbfdcbcc98 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -2,7 +2,7 @@ 1. [Sign the CLA](http://influxdb.com/community/cla.html) 1. Make changes or write plugin (see below for details) -1. Add your plugin to `plugins/inputs/all/all.go` or `plugins/outputs/all/all.go` +1. Add your plugin to one of: `plugins/{inputs,outputs,aggregators,processors}/all/all.go` 1. If your plugin requires a new Go package, [add it](https://github.com/influxdata/telegraf/blob/master/CONTRIBUTING.md#adding-a-dependency) 1. Write a README for your plugin, if it's an input plugin, it should be structured @@ -16,8 +16,8 @@ for a good example. ## GoDoc -Public interfaces for inputs, outputs, metrics, and the accumulator can be found -on the GoDoc +Public interfaces for inputs, outputs, processors, aggregators, metrics, +and the accumulator can be found on the GoDoc [![GoDoc](https://godoc.org/github.com/influxdata/telegraf?status.svg)](https://godoc.org/github.com/influxdata/telegraf) @@ -46,7 +46,7 @@ and submit new inputs. ### Input Plugin Guidelines -* A plugin must conform to the `telegraf.Input` interface. +* A plugin must conform to the [`telegraf.Input`](https://godoc.org/github.com/influxdata/telegraf#Input) interface. * Input Plugins should call `inputs.Add` in their `init` function to register themselves. See below for a quick example. * Input Plugins must be added to the @@ -177,7 +177,7 @@ similar constructs. ### Output Plugin Guidelines -* An output must conform to the `outputs.Output` interface. +* An output must conform to the [`telegraf.Output`](https://godoc.org/github.com/influxdata/telegraf#Output) interface. * Outputs should call `outputs.Add` in their `init` function to register themselves. See below for a quick example. * To be available within Telegraf itself, plugins must add themselves to the @@ -275,6 +275,186 @@ and `Stop()` methods. * Same as the `Output` guidelines, except that they must conform to the `output.ServiceOutput` interface. +## Processor Plugins + +This section is for developers who want to create a new processor plugin. + +### Processor Plugin Guidelines + +* A processor must conform to the [`telegraf.Processor`](https://godoc.org/github.com/influxdata/telegraf#Processor) interface. +* Processors should call `processors.Add` in their `init` function to register themselves. +See below for a quick example. +* To be available within Telegraf itself, plugins must add themselves to the +`github.com/influxdata/telegraf/plugins/processors/all/all.go` file. +* The `SampleConfig` function should return valid toml that describes how the +processor can be configured. This is include in `telegraf -sample-config`. +* The `Description` function should say in one line what this processor does. + +### Processor Example + +```go +package printer + +// printer.go + +import ( + "fmt" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/processors" +) + +type Printer struct { +} + +var sampleConfig = ` +` + +func (p *Printer) SampleConfig() string { + return sampleConfig +} + +func (p *Printer) Description() string { + return "Print all metrics that pass through this filter." +} + +func (p *Printer) Apply(in ...telegraf.Metric) []telegraf.Metric { + for _, metric := range in { + fmt.Println(metric.String()) + } + return in +} + +func init() { + processors.Add("printer", func() telegraf.Processor { + return &Printer{} + }) +} +``` + +## Aggregator Plugins + +This section is for developers who want to create a new aggregator plugin. + +### Aggregator Plugin Guidelines + +* A aggregator must conform to the [`telegraf.Aggregator`](https://godoc.org/github.com/influxdata/telegraf#Aggregator) interface. +* Aggregators should call `aggregators.Add` in their `init` function to register themselves. +See below for a quick example. +* To be available within Telegraf itself, plugins must add themselves to the +`github.com/influxdata/telegraf/plugins/aggregators/all/all.go` file. +* The `SampleConfig` function should return valid toml that describes how the +aggregator can be configured. This is include in `telegraf -sample-config`. +* The `Description` function should say in one line what this aggregator does. +* The Aggregator plugin will need to keep caches of metrics that have passed +through it. This should be done using the builtin `HashID()` function of each +metric. +* When the `Reset()` function is called, all caches should be cleared. + +### Aggregator Example + +```go +package min + +// min.go + +import ( + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/aggregators" +) + +type Min struct { + // caches for metric fields, names, and tags + fieldCache map[uint64]map[string]float64 + nameCache map[uint64]string + tagCache map[uint64]map[string]string +} + +func NewMin() telegraf.Aggregator { + m := &Min{} + m.Reset() + return m +} + +var sampleConfig = ` + ## period is the flush & clear interval of the aggregator. + period = "30s" + ## If true drop_original will drop the original metrics and + ## only send aggregates. + drop_original = false +` + +func (m *Min) SampleConfig() string { + return sampleConfig +} + +func (m *Min) Description() string { + return "Keep the aggregate min of each metric passing through." +} + +func (m *Min) Add(in telegraf.Metric) { + id := in.HashID() + if _, ok := m.nameCache[id]; !ok { + // hit an uncached metric, create caches for first time: + m.nameCache[id] = in.Name() + m.tagCache[id] = in.Tags() + m.fieldCache[id] = make(map[string]float64) + for k, v := range in.Fields() { + if fv, ok := convert(v); ok { + m.fieldCache[id][k] = fv + } + } + } else { + for k, v := range in.Fields() { + if fv, ok := convert(v); ok { + if _, ok := m.fieldCache[id][k]; !ok { + // hit an uncached field of a cached metric + m.fieldCache[id][k] = fv + continue + } + if fv < m.fieldCache[id][k] { + // set new minimum + m.fieldCache[id][k] = fv + } + } + } + } +} + +func (m *Min) Push(acc telegraf.Accumulator) { + for id, _ := range m.nameCache { + fields := map[string]interface{}{} + for k, v := range m.fieldCache[id] { + fields[k+"_min"] = v + } + acc.AddFields(m.nameCache[id], fields, m.tagCache[id]) + } +} + +func (m *Min) Reset() { + m.fieldCache = make(map[uint64]map[string]float64) + m.nameCache = make(map[uint64]string) + m.tagCache = make(map[uint64]map[string]string) +} + +func convert(in interface{}) (float64, bool) { + switch v := in.(type) { + case float64: + return v, true + case int64: + return float64(v), true + default: + return 0, false + } +} + +func init() { + aggregators.Add("min", func() telegraf.Aggregator { + return NewMin() + }) +} +``` + ## Unit Tests ### Execute short tests diff --git a/Godeps b/Godeps index 3a4e9fb1b4f13..6dc0cec2deca8 100644 --- a/Godeps +++ b/Godeps @@ -19,7 +19,7 @@ github.com/eclipse/paho.mqtt.golang 0f7a459f04f13a41b7ed752d47944528d4bf9a86 github.com/go-sql-driver/mysql 1fca743146605a172a266e1654e01e5cd5669bee github.com/gobwas/glob 49571a1557cd20e6a2410adc6421f85b66c730b5 github.com/golang/protobuf 552c7b9542c194800fd493123b3798ef0a832032 -github.com/golang/snappy 427fb6fc07997f43afa32f35e850833760e489a7 +github.com/golang/snappy d9eb7a3d35ec988b8585d4a0068e462c27d28380 github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2 github.com/gorilla/context 1ea25387ff6f684839d82767c1733ff4d4d15d0a github.com/gorilla/mux c9e326e2bdec29039a3761c07bece13133863e1e @@ -27,8 +27,9 @@ github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478 github.com/hashicorp/consul 5aa90455ce78d4d41578bafc86305e6e6b28d7d2 github.com/hpcloud/tail b2940955ab8b26e19d43a43c4da0475dd81bdb56 github.com/influxdata/config b79f6829346b8d6e78ba73544b1e1038f1f1c9da -github.com/influxdata/influxdb e094138084855d444195b252314dfee9eae34cab +github.com/influxdata/influxdb fc57c0f7c635df3873f3d64f0ed2100ddc94d5ae github.com/influxdata/toml af4df43894b16e3fd2b788d01bd27ad0776ef2d0 +github.com/influxdata/wlog 7c63b0a71ef8300adc255344d275e10e5c3a71ec github.com/kardianos/osext 29ae4ffbc9a6fe9fb2bc5029050ce6996ea1d3bc github.com/kardianos/service 5e335590050d6d00f3aa270217d288dda1c94d0a github.com/klauspost/crc32 19b0b332c9e4516a6370a0456e6182c3b5036720 @@ -47,7 +48,7 @@ github.com/prometheus/common e8eabff8812b05acf522b45fdcd725a785188e37 github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8 github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f github.com/shirou/gopsutil 4d0c402af66c78735c5ccf820dc2ca7de5e4ff08 -github.com/soniah/gosnmp eb32571c2410868d85849ad67d1e51d01273eb84 +github.com/soniah/gosnmp 3fe3beb30fa9700988893c56a63b1df8e1b68c26 github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744 github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c github.com/vjeantet/grok 83bfdfdfd1a8146795b28e547a8e3c8b28a466c2 @@ -55,7 +56,7 @@ github.com/wvanbergen/kafka 46f9a1cf3f670edec492029fadded9c2d9e18866 github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8 github.com/yuin/gopher-lua bf3808abd44b1e55143a2d7f08571aaa80db1808 github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363 -golang.org/x/crypto 5dc8cb4b8a8eb076cbb5a06bc3b8682c15bdbbd3 +golang.org/x/crypto c197bcf24cde29d3f73c7b4ac6fd41f4384e8af6 golang.org/x/net 6acef71eb69611914f7a30939ea9f6e194c78172 golang.org/x/text a71fd10341b064c10f4a81ceac72bcf70f26ea34 gopkg.in/dancannon/gorethink.v1 7d1af5be49cb5ecc7b177bf387d232050299d6ef diff --git a/Makefile b/Makefile index 2812c0bdf9c8c..807fb4d573331 100644 --- a/Makefile +++ b/Makefile @@ -34,7 +34,11 @@ package: package-wavefront: ./scripts/build.py --package --version="$(VERSION)" --platform=linux --arch=all +<<<<<<< HEAD +======= + +>>>>>>> wavefrontHQ/master # Get dependencies and use gdm to checkout changesets prepare: go get github.com/sparrc/gdm diff --git a/README.md b/README.md index ebe3ed516b529..efba9e6c70f85 100644 --- a/README.md +++ b/README.md @@ -20,12 +20,12 @@ new plugins. ### Linux deb and rpm Packages: Latest: -* https://dl.influxdata.com/telegraf/releases/telegraf_1.0.0_amd64.deb -* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0.x86_64.rpm +* https://dl.influxdata.com/telegraf/releases/telegraf_1.0.1_amd64.deb +* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1.x86_64.rpm Latest (arm): -* https://dl.influxdata.com/telegraf/releases/telegraf_1.0.0_armhf.deb -* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0.armhf.rpm +* https://dl.influxdata.com/telegraf/releases/telegraf_1.0.1_armhf.deb +* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1.armhf.rpm ##### Package Instructions: @@ -46,14 +46,14 @@ to use this repo to install & update telegraf. ### Linux tarballs: Latest: -* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0_linux_amd64.tar.gz -* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0_linux_i386.tar.gz -* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0_linux_armhf.tar.gz +* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1_linux_amd64.tar.gz +* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1_linux_i386.tar.gz +* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1_linux_armhf.tar.gz ### FreeBSD tarball: Latest: -* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0_freebsd_amd64.tar.gz +* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1_freebsd_amd64.tar.gz ### Ansible Role: @@ -69,7 +69,7 @@ brew install telegraf ### Windows Binaries (EXPERIMENTAL) Latest: -* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0_windows_amd64.zip +* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1_windows_amd64.zip ### From Source: @@ -85,44 +85,42 @@ if you don't have it already. You also must build with golang version 1.5+. ## How to use it: -```console -$ telegraf -help -Telegraf, The plugin-driven server agent for collecting and reporting metrics. +See usage with: -Usage: +``` +telegraf --help +``` - telegraf +### Generate a telegraf config file: -The flags are: +``` +telegraf config > telegraf.conf +``` - -config configuration file to load - -test gather metrics once, print them to stdout, and exit - -sample-config print out full sample configuration to stdout - -config-directory directory containing additional *.conf files - -input-filter filter the input plugins to enable, separator is : - -output-filter filter the output plugins to enable, separator is : - -usage print usage for a plugin, ie, 'telegraf -usage mysql' - -debug print metrics as they're generated to stdout - -quiet run in quiet mode - -version print the version to stdout +### Generate config with only cpu input & influxdb output plugins defined -Examples: +``` +telegraf --input-filter cpu --output-filter influxdb config +``` - # generate a telegraf config file: - telegraf -sample-config > telegraf.conf +### Run a single telegraf collection, outputing metrics to stdout - # generate config with only cpu input & influxdb output plugins defined - telegraf -sample-config -input-filter cpu -output-filter influxdb +``` +telegraf --config telegraf.conf -test +``` + +### Run telegraf with all plugins defined in config file - # run a single telegraf collection, outputing metrics to stdout - telegraf -config telegraf.conf -test +``` +telegraf --config telegraf.conf +``` - # run telegraf with all plugins defined in config file - telegraf -config telegraf.conf +### Run telegraf, enabling the cpu & memory input, and influxdb output plugins - # run telegraf, enabling the cpu & memory input, and influxdb output plugins - telegraf -config telegraf.conf -input-filter cpu:mem -output-filter influxdb ``` +telegraf --config telegraf.conf -input-filter cpu:mem -output-filter influxdb +``` + ## Configuration diff --git a/accumulator.go b/accumulator.go index bb6e4dc85091d..13fd6e5711852 100644 --- a/accumulator.go +++ b/accumulator.go @@ -2,9 +2,8 @@ package telegraf import "time" -// Accumulator is an interface for "accumulating" metrics from input plugin(s). -// The metrics are sent down a channel shared between all input plugins and then -// flushed on the configured flush_interval. +// Accumulator is an interface for "accumulating" metrics from plugin(s). +// The metrics are sent down a channel shared between all plugins. type Accumulator interface { // AddFields adds a metric to the accumulator with the given measurement // name, fields, and tags (and timestamp). If a timestamp is not provided, @@ -29,12 +28,7 @@ type Accumulator interface { tags map[string]string, t ...time.Time) - AddError(err error) - - Debug() bool - SetDebug(enabled bool) - SetPrecision(precision, interval time.Duration) - DisablePrecision() + AddError(err error) } diff --git a/agent/accumulator.go b/agent/accumulator.go index a0d0461a4cef4..0d682d2857b6d 100644 --- a/agent/accumulator.go +++ b/agent/accumulator.go @@ -1,37 +1,40 @@ package agent import ( - "fmt" "log" - "math" "sync/atomic" "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal/models" ) +type MetricMaker interface { + Name() string + MakeMetric( + measurement string, + fields map[string]interface{}, + tags map[string]string, + mType telegraf.ValueType, + t time.Time, + ) telegraf.Metric +} + func NewAccumulator( - inputConfig *models.InputConfig, + maker MetricMaker, metrics chan telegraf.Metric, ) *accumulator { - acc := accumulator{} - acc.metrics = metrics - acc.inputConfig = inputConfig - acc.precision = time.Nanosecond + acc := accumulator{ + maker: maker, + metrics: metrics, + precision: time.Nanosecond, + } return &acc } type accumulator struct { metrics chan telegraf.Metric - defaultTags map[string]string - - debug bool - // print every point added to the accumulator - trace bool - - inputConfig *models.InputConfig + maker MetricMaker precision time.Duration @@ -44,7 +47,7 @@ func (ac *accumulator) AddFields( tags map[string]string, t ...time.Time, ) { - if m := ac.makeMetric(measurement, fields, tags, telegraf.Untyped, t...); m != nil { + if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Untyped, ac.getTime(t)); m != nil { ac.metrics <- m } } @@ -55,7 +58,7 @@ func (ac *accumulator) AddGauge( tags map[string]string, t ...time.Time, ) { - if m := ac.makeMetric(measurement, fields, tags, telegraf.Gauge, t...); m != nil { + if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Gauge, ac.getTime(t)); m != nil { ac.metrics <- m } } @@ -66,114 +69,11 @@ func (ac *accumulator) AddCounter( tags map[string]string, t ...time.Time, ) { - if m := ac.makeMetric(measurement, fields, tags, telegraf.Counter, t...); m != nil { + if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Counter, ac.getTime(t)); m != nil { ac.metrics <- m } } -// makeMetric either returns a metric, or returns nil if the metric doesn't -// need to be created (because of filtering, an error, etc.) -func (ac *accumulator) makeMetric( - measurement string, - fields map[string]interface{}, - tags map[string]string, - mType telegraf.ValueType, - t ...time.Time, -) telegraf.Metric { - if len(fields) == 0 || len(measurement) == 0 { - return nil - } - if tags == nil { - tags = make(map[string]string) - } - - // Override measurement name if set - if len(ac.inputConfig.NameOverride) != 0 { - measurement = ac.inputConfig.NameOverride - } - // Apply measurement prefix and suffix if set - if len(ac.inputConfig.MeasurementPrefix) != 0 { - measurement = ac.inputConfig.MeasurementPrefix + measurement - } - if len(ac.inputConfig.MeasurementSuffix) != 0 { - measurement = measurement + ac.inputConfig.MeasurementSuffix - } - - // Apply plugin-wide tags if set - for k, v := range ac.inputConfig.Tags { - if _, ok := tags[k]; !ok { - tags[k] = v - } - } - // Apply daemon-wide tags if set - for k, v := range ac.defaultTags { - if _, ok := tags[k]; !ok { - tags[k] = v - } - } - - // Apply the metric filter(s) - if ok := ac.inputConfig.Filter.Apply(measurement, fields, tags); !ok { - return nil - } - - for k, v := range fields { - // Validate uint64 and float64 fields - switch val := v.(type) { - case uint64: - // InfluxDB does not support writing uint64 - if val < uint64(9223372036854775808) { - fields[k] = int64(val) - } else { - fields[k] = int64(9223372036854775807) - } - continue - case float64: - // NaNs are invalid values in influxdb, skip measurement - if math.IsNaN(val) || math.IsInf(val, 0) { - if ac.debug { - log.Printf("Measurement [%s] field [%s] has a NaN or Inf "+ - "field, skipping", - measurement, k) - } - delete(fields, k) - continue - } - } - - fields[k] = v - } - - var timestamp time.Time - if len(t) > 0 { - timestamp = t[0] - } else { - timestamp = time.Now() - } - timestamp = timestamp.Round(ac.precision) - - var m telegraf.Metric - var err error - switch mType { - case telegraf.Counter: - m, err = telegraf.NewCounterMetric(measurement, tags, fields, timestamp) - case telegraf.Gauge: - m, err = telegraf.NewGaugeMetric(measurement, tags, fields, timestamp) - default: - m, err = telegraf.NewMetric(measurement, tags, fields, timestamp) - } - if err != nil { - log.Printf("Error adding point [%s]: %s\n", measurement, err.Error()) - return nil - } - - if ac.trace { - fmt.Println("> " + m.String()) - } - - return m -} - // AddError passes a runtime error to the accumulator. // The error will be tagged with the plugin name and written to the log. func (ac *accumulator) AddError(err error) { @@ -182,23 +82,7 @@ func (ac *accumulator) AddError(err error) { } atomic.AddUint64(&ac.errCount, 1) //TODO suppress/throttle consecutive duplicate errors? - log.Printf("ERROR in input [%s]: %s", ac.inputConfig.Name, err) -} - -func (ac *accumulator) Debug() bool { - return ac.debug -} - -func (ac *accumulator) SetDebug(debug bool) { - ac.debug = debug -} - -func (ac *accumulator) Trace() bool { - return ac.trace -} - -func (ac *accumulator) SetTrace(trace bool) { - ac.trace = trace + log.Printf("E! Error in plugin [%s]: %s", ac.maker.Name(), err) } // SetPrecision takes two time.Duration objects. If the first is non-zero, @@ -222,17 +106,12 @@ func (ac *accumulator) SetPrecision(precision, interval time.Duration) { } } -func (ac *accumulator) DisablePrecision() { - ac.precision = time.Nanosecond -} - -func (ac *accumulator) setDefaultTags(tags map[string]string) { - ac.defaultTags = tags -} - -func (ac *accumulator) addDefaultTag(key, value string) { - if ac.defaultTags == nil { - ac.defaultTags = make(map[string]string) +func (ac accumulator) getTime(t []time.Time) time.Time { + var timestamp time.Time + if len(t) > 0 { + timestamp = t[0] + } else { + timestamp = time.Now() } - ac.defaultTags[key] = value + return timestamp.Round(ac.precision) } diff --git a/agent/accumulator_test.go b/agent/accumulator_test.go index ef5a34ec92d6e..ef8d9eb202b86 100644 --- a/agent/accumulator_test.go +++ b/agent/accumulator_test.go @@ -4,24 +4,21 @@ import ( "bytes" "fmt" "log" - "math" "os" "testing" "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal/models" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestAdd(t *testing.T) { - a := accumulator{} now := time.Now() - a.metrics = make(chan telegraf.Metric, 10) - defer close(a.metrics) - a.inputConfig = &models.InputConfig{} + metrics := make(chan telegraf.Metric, 10) + defer close(metrics) + a := NewAccumulator(&TestMetricMaker{}, metrics) a.AddFields("acctest", map[string]interface{}{"value": float64(101)}, @@ -33,99 +30,80 @@ func TestAdd(t *testing.T) { map[string]interface{}{"value": float64(101)}, map[string]string{"acc": "test"}, now) - testm := <-a.metrics + testm := <-metrics actual := testm.String() assert.Contains(t, actual, "acctest value=101") - testm = <-a.metrics + testm = <-metrics actual = testm.String() assert.Contains(t, actual, "acctest,acc=test value=101") - testm = <-a.metrics + testm = <-metrics actual = testm.String() assert.Equal(t, fmt.Sprintf("acctest,acc=test value=101 %d", now.UnixNano()), actual) } -func TestAddGauge(t *testing.T) { - a := accumulator{} +func TestAddFields(t *testing.T) { now := time.Now() - a.metrics = make(chan telegraf.Metric, 10) - defer close(a.metrics) - a.inputConfig = &models.InputConfig{} + metrics := make(chan telegraf.Metric, 10) + defer close(metrics) + a := NewAccumulator(&TestMetricMaker{}, metrics) - a.AddGauge("acctest", - map[string]interface{}{"value": float64(101)}, - map[string]string{}) - a.AddGauge("acctest", - map[string]interface{}{"value": float64(101)}, - map[string]string{"acc": "test"}) - a.AddGauge("acctest", - map[string]interface{}{"value": float64(101)}, - map[string]string{"acc": "test"}, now) + fields := map[string]interface{}{ + "usage": float64(99), + } + a.AddFields("acctest", fields, map[string]string{}) + a.AddGauge("acctest", fields, map[string]string{"acc": "test"}) + a.AddCounter("acctest", fields, map[string]string{"acc": "test"}, now) - testm := <-a.metrics + testm := <-metrics actual := testm.String() - assert.Contains(t, actual, "acctest value=101") - assert.Equal(t, testm.Type(), telegraf.Gauge) + assert.Contains(t, actual, "acctest usage=99") - testm = <-a.metrics + testm = <-metrics actual = testm.String() - assert.Contains(t, actual, "acctest,acc=test value=101") - assert.Equal(t, testm.Type(), telegraf.Gauge) + assert.Contains(t, actual, "acctest,acc=test usage=99") - testm = <-a.metrics + testm = <-metrics actual = testm.String() assert.Equal(t, - fmt.Sprintf("acctest,acc=test value=101 %d", now.UnixNano()), + fmt.Sprintf("acctest,acc=test usage=99 %d", now.UnixNano()), actual) - assert.Equal(t, testm.Type(), telegraf.Gauge) } -func TestAddCounter(t *testing.T) { - a := accumulator{} - now := time.Now() - a.metrics = make(chan telegraf.Metric, 10) - defer close(a.metrics) - a.inputConfig = &models.InputConfig{} - - a.AddCounter("acctest", - map[string]interface{}{"value": float64(101)}, - map[string]string{}) - a.AddCounter("acctest", - map[string]interface{}{"value": float64(101)}, - map[string]string{"acc": "test"}) - a.AddCounter("acctest", - map[string]interface{}{"value": float64(101)}, - map[string]string{"acc": "test"}, now) +func TestAccAddError(t *testing.T) { + errBuf := bytes.NewBuffer(nil) + log.SetOutput(errBuf) + defer log.SetOutput(os.Stderr) - testm := <-a.metrics - actual := testm.String() - assert.Contains(t, actual, "acctest value=101") - assert.Equal(t, testm.Type(), telegraf.Counter) + metrics := make(chan telegraf.Metric, 10) + defer close(metrics) + a := NewAccumulator(&TestMetricMaker{}, metrics) - testm = <-a.metrics - actual = testm.String() - assert.Contains(t, actual, "acctest,acc=test value=101") - assert.Equal(t, testm.Type(), telegraf.Counter) + a.AddError(fmt.Errorf("foo")) + a.AddError(fmt.Errorf("bar")) + a.AddError(fmt.Errorf("baz")) - testm = <-a.metrics - actual = testm.String() - assert.Equal(t, - fmt.Sprintf("acctest,acc=test value=101 %d", now.UnixNano()), - actual) - assert.Equal(t, testm.Type(), telegraf.Counter) + errs := bytes.Split(errBuf.Bytes(), []byte{'\n'}) + assert.EqualValues(t, 3, a.errCount) + require.Len(t, errs, 4) // 4 because of trailing newline + assert.Contains(t, string(errs[0]), "TestPlugin") + assert.Contains(t, string(errs[0]), "foo") + assert.Contains(t, string(errs[1]), "TestPlugin") + assert.Contains(t, string(errs[1]), "bar") + assert.Contains(t, string(errs[2]), "TestPlugin") + assert.Contains(t, string(errs[2]), "baz") } -func TestAddNoPrecisionWithInterval(t *testing.T) { - a := accumulator{} +func TestAddNoIntervalWithPrecision(t *testing.T) { now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC) - a.metrics = make(chan telegraf.Metric, 10) - defer close(a.metrics) - a.inputConfig = &models.InputConfig{} - + metrics := make(chan telegraf.Metric, 10) + defer close(metrics) + a := NewAccumulator(&TestMetricMaker{}, metrics) a.SetPrecision(0, time.Second) + a.AddFields("acctest", map[string]interface{}{"value": float64(101)}, map[string]string{}) @@ -151,14 +129,13 @@ func TestAddNoPrecisionWithInterval(t *testing.T) { actual) } -func TestAddNoIntervalWithPrecision(t *testing.T) { - a := accumulator{} +func TestAddDisablePrecision(t *testing.T) { now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC) - a.metrics = make(chan telegraf.Metric, 10) - defer close(a.metrics) - a.inputConfig = &models.InputConfig{} + metrics := make(chan telegraf.Metric, 10) + defer close(metrics) + a := NewAccumulator(&TestMetricMaker{}, metrics) - a.SetPrecision(time.Second, time.Millisecond) + a.SetPrecision(time.Nanosecond, 0) a.AddFields("acctest", map[string]interface{}{"value": float64(101)}, map[string]string{}) @@ -180,19 +157,17 @@ func TestAddNoIntervalWithPrecision(t *testing.T) { testm = <-a.metrics actual = testm.String() assert.Equal(t, - fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800000000000)), + fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800082912748)), actual) } -func TestAddDisablePrecision(t *testing.T) { - a := accumulator{} +func TestAddNoPrecisionWithInterval(t *testing.T) { now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC) - a.metrics = make(chan telegraf.Metric, 10) - defer close(a.metrics) - a.inputConfig = &models.InputConfig{} + metrics := make(chan telegraf.Metric, 10) + defer close(metrics) + a := NewAccumulator(&TestMetricMaker{}, metrics) - a.SetPrecision(time.Second, time.Millisecond) - a.DisablePrecision() + a.SetPrecision(0, time.Second) a.AddFields("acctest", map[string]interface{}{"value": float64(101)}, map[string]string{}) @@ -214,16 +189,15 @@ func TestAddDisablePrecision(t *testing.T) { testm = <-a.metrics actual = testm.String() assert.Equal(t, - fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800082912748)), + fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800000000000)), actual) } func TestDifferentPrecisions(t *testing.T) { - a := accumulator{} now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC) - a.metrics = make(chan telegraf.Metric, 10) - defer close(a.metrics) - a.inputConfig = &models.InputConfig{} + metrics := make(chan telegraf.Metric, 10) + defer close(metrics) + a := NewAccumulator(&TestMetricMaker{}, metrics) a.SetPrecision(0, time.Second) a.AddFields("acctest", @@ -266,349 +240,100 @@ func TestDifferentPrecisions(t *testing.T) { actual) } -func TestAddDefaultTags(t *testing.T) { - a := accumulator{} - a.addDefaultTag("default", "tag") +func TestAddGauge(t *testing.T) { now := time.Now() - a.metrics = make(chan telegraf.Metric, 10) - defer close(a.metrics) - a.inputConfig = &models.InputConfig{} + metrics := make(chan telegraf.Metric, 10) + defer close(metrics) + a := NewAccumulator(&TestMetricMaker{}, metrics) - a.AddFields("acctest", + a.AddGauge("acctest", map[string]interface{}{"value": float64(101)}, map[string]string{}) - a.AddFields("acctest", - map[string]interface{}{"value": float64(101)}, - map[string]string{"acc": "test"}) - a.AddFields("acctest", + a.AddGauge("acctest", map[string]interface{}{"value": float64(101)}, - map[string]string{"acc": "test"}, now) - - testm := <-a.metrics - actual := testm.String() - assert.Contains(t, actual, "acctest,default=tag value=101") - - testm = <-a.metrics - actual = testm.String() - assert.Contains(t, actual, "acctest,acc=test,default=tag value=101") - - testm = <-a.metrics - actual = testm.String() - assert.Equal(t, - fmt.Sprintf("acctest,acc=test,default=tag value=101 %d", now.UnixNano()), - actual) -} - -func TestAddFields(t *testing.T) { - a := accumulator{} - now := time.Now() - a.metrics = make(chan telegraf.Metric, 10) - defer close(a.metrics) - a.inputConfig = &models.InputConfig{} - - fields := map[string]interface{}{ - "usage": float64(99), - } - a.AddFields("acctest", fields, map[string]string{}) - a.AddFields("acctest", fields, map[string]string{"acc": "test"}) - a.AddFields("acctest", fields, map[string]string{"acc": "test"}, now) - - testm := <-a.metrics - actual := testm.String() - assert.Contains(t, actual, "acctest usage=99") - - testm = <-a.metrics - actual = testm.String() - assert.Contains(t, actual, "acctest,acc=test usage=99") - - testm = <-a.metrics - actual = testm.String() - assert.Equal(t, - fmt.Sprintf("acctest,acc=test usage=99 %d", now.UnixNano()), - actual) -} - -// Test that all Inf fields get dropped, and not added to metrics channel -func TestAddInfFields(t *testing.T) { - inf := math.Inf(1) - ninf := math.Inf(-1) - - a := accumulator{} - now := time.Now() - a.metrics = make(chan telegraf.Metric, 10) - defer close(a.metrics) - a.inputConfig = &models.InputConfig{} - - fields := map[string]interface{}{ - "usage": inf, - "nusage": ninf, - } - a.AddFields("acctest", fields, map[string]string{}) - a.AddFields("acctest", fields, map[string]string{"acc": "test"}) - a.AddFields("acctest", fields, map[string]string{"acc": "test"}, now) - - assert.Len(t, a.metrics, 0) - - // test that non-inf fields are kept and not dropped - fields["notinf"] = float64(100) - a.AddFields("acctest", fields, map[string]string{}) - testm := <-a.metrics - actual := testm.String() - assert.Contains(t, actual, "acctest notinf=100") -} - -// Test that nan fields are dropped and not added -func TestAddNaNFields(t *testing.T) { - nan := math.NaN() - - a := accumulator{} - now := time.Now() - a.metrics = make(chan telegraf.Metric, 10) - defer close(a.metrics) - a.inputConfig = &models.InputConfig{} - - fields := map[string]interface{}{ - "usage": nan, - } - a.AddFields("acctest", fields, map[string]string{}) - a.AddFields("acctest", fields, map[string]string{"acc": "test"}) - a.AddFields("acctest", fields, map[string]string{"acc": "test"}, now) - - assert.Len(t, a.metrics, 0) - - // test that non-nan fields are kept and not dropped - fields["notnan"] = float64(100) - a.AddFields("acctest", fields, map[string]string{}) - testm := <-a.metrics - actual := testm.String() - assert.Contains(t, actual, "acctest notnan=100") -} - -func TestAddUint64Fields(t *testing.T) { - a := accumulator{} - now := time.Now() - a.metrics = make(chan telegraf.Metric, 10) - defer close(a.metrics) - a.inputConfig = &models.InputConfig{} - - fields := map[string]interface{}{ - "usage": uint64(99), - } - a.AddFields("acctest", fields, map[string]string{}) - a.AddFields("acctest", fields, map[string]string{"acc": "test"}) - a.AddFields("acctest", fields, map[string]string{"acc": "test"}, now) - - testm := <-a.metrics - actual := testm.String() - assert.Contains(t, actual, "acctest usage=99i") - - testm = <-a.metrics - actual = testm.String() - assert.Contains(t, actual, "acctest,acc=test usage=99i") - - testm = <-a.metrics - actual = testm.String() - assert.Equal(t, - fmt.Sprintf("acctest,acc=test usage=99i %d", now.UnixNano()), - actual) -} - -func TestAddUint64Overflow(t *testing.T) { - a := accumulator{} - now := time.Now() - a.metrics = make(chan telegraf.Metric, 10) - defer close(a.metrics) - a.inputConfig = &models.InputConfig{} - - fields := map[string]interface{}{ - "usage": uint64(9223372036854775808), - } - a.AddFields("acctest", fields, map[string]string{}) - a.AddFields("acctest", fields, map[string]string{"acc": "test"}) - a.AddFields("acctest", fields, map[string]string{"acc": "test"}, now) - - testm := <-a.metrics - actual := testm.String() - assert.Contains(t, actual, "acctest usage=9223372036854775807i") - - testm = <-a.metrics - actual = testm.String() - assert.Contains(t, actual, "acctest,acc=test usage=9223372036854775807i") - - testm = <-a.metrics - actual = testm.String() - assert.Equal(t, - fmt.Sprintf("acctest,acc=test usage=9223372036854775807i %d", now.UnixNano()), - actual) -} - -func TestAddInts(t *testing.T) { - a := accumulator{} - a.addDefaultTag("default", "tag") - now := time.Now() - a.metrics = make(chan telegraf.Metric, 10) - defer close(a.metrics) - a.inputConfig = &models.InputConfig{} - - a.AddFields("acctest", - map[string]interface{}{"value": int(101)}, - map[string]string{}) - a.AddFields("acctest", - map[string]interface{}{"value": int32(101)}, - map[string]string{"acc": "test"}) - a.AddFields("acctest", - map[string]interface{}{"value": int64(101)}, - map[string]string{"acc": "test"}, now) - - testm := <-a.metrics - actual := testm.String() - assert.Contains(t, actual, "acctest,default=tag value=101i") - - testm = <-a.metrics - actual = testm.String() - assert.Contains(t, actual, "acctest,acc=test,default=tag value=101i") - - testm = <-a.metrics - actual = testm.String() - assert.Equal(t, - fmt.Sprintf("acctest,acc=test,default=tag value=101i %d", now.UnixNano()), - actual) -} - -func TestAddFloats(t *testing.T) { - a := accumulator{} - a.addDefaultTag("default", "tag") - now := time.Now() - a.metrics = make(chan telegraf.Metric, 10) - defer close(a.metrics) - a.inputConfig = &models.InputConfig{} - - a.AddFields("acctest", - map[string]interface{}{"value": float32(101)}, map[string]string{"acc": "test"}) - a.AddFields("acctest", + a.AddGauge("acctest", map[string]interface{}{"value": float64(101)}, map[string]string{"acc": "test"}, now) - testm := <-a.metrics - actual := testm.String() - assert.Contains(t, actual, "acctest,acc=test,default=tag value=101") - - testm = <-a.metrics - actual = testm.String() - assert.Equal(t, - fmt.Sprintf("acctest,acc=test,default=tag value=101 %d", now.UnixNano()), - actual) -} - -func TestAddStrings(t *testing.T) { - a := accumulator{} - a.addDefaultTag("default", "tag") - now := time.Now() - a.metrics = make(chan telegraf.Metric, 10) - defer close(a.metrics) - a.inputConfig = &models.InputConfig{} - - a.AddFields("acctest", - map[string]interface{}{"value": "test"}, - map[string]string{"acc": "test"}) - a.AddFields("acctest", - map[string]interface{}{"value": "foo"}, - map[string]string{"acc": "test"}, now) - - testm := <-a.metrics + testm := <-metrics actual := testm.String() - assert.Contains(t, actual, "acctest,acc=test,default=tag value=\"test\"") + assert.Contains(t, actual, "acctest value=101") + assert.Equal(t, testm.Type(), telegraf.Gauge) - testm = <-a.metrics + testm = <-metrics actual = testm.String() - assert.Equal(t, - fmt.Sprintf("acctest,acc=test,default=tag value=\"foo\" %d", now.UnixNano()), - actual) -} - -func TestAddBools(t *testing.T) { - a := accumulator{} - a.addDefaultTag("default", "tag") - now := time.Now() - a.metrics = make(chan telegraf.Metric, 10) - defer close(a.metrics) - a.inputConfig = &models.InputConfig{} - - a.AddFields("acctest", - map[string]interface{}{"value": true}, map[string]string{"acc": "test"}) - a.AddFields("acctest", - map[string]interface{}{"value": false}, map[string]string{"acc": "test"}, now) - - testm := <-a.metrics - actual := testm.String() - assert.Contains(t, actual, "acctest,acc=test,default=tag value=true") + assert.Contains(t, actual, "acctest,acc=test value=101") + assert.Equal(t, testm.Type(), telegraf.Gauge) - testm = <-a.metrics + testm = <-metrics actual = testm.String() assert.Equal(t, - fmt.Sprintf("acctest,acc=test,default=tag value=false %d", now.UnixNano()), + fmt.Sprintf("acctest,acc=test value=101 %d", now.UnixNano()), actual) + assert.Equal(t, testm.Type(), telegraf.Gauge) } -// Test that tag filters get applied to metrics. -func TestAccFilterTags(t *testing.T) { - a := accumulator{} +func TestAddCounter(t *testing.T) { now := time.Now() - a.metrics = make(chan telegraf.Metric, 10) - defer close(a.metrics) - filter := models.Filter{ - TagExclude: []string{"acc"}, - } - assert.NoError(t, filter.Compile()) - a.inputConfig = &models.InputConfig{} - a.inputConfig.Filter = filter + metrics := make(chan telegraf.Metric, 10) + defer close(metrics) + a := NewAccumulator(&TestMetricMaker{}, metrics) - a.AddFields("acctest", + a.AddCounter("acctest", map[string]interface{}{"value": float64(101)}, map[string]string{}) - a.AddFields("acctest", + a.AddCounter("acctest", map[string]interface{}{"value": float64(101)}, map[string]string{"acc": "test"}) - a.AddFields("acctest", + a.AddCounter("acctest", map[string]interface{}{"value": float64(101)}, map[string]string{"acc": "test"}, now) - testm := <-a.metrics + testm := <-metrics actual := testm.String() assert.Contains(t, actual, "acctest value=101") + assert.Equal(t, testm.Type(), telegraf.Counter) - testm = <-a.metrics + testm = <-metrics actual = testm.String() - assert.Contains(t, actual, "acctest value=101") + assert.Contains(t, actual, "acctest,acc=test value=101") + assert.Equal(t, testm.Type(), telegraf.Counter) - testm = <-a.metrics + testm = <-metrics actual = testm.String() assert.Equal(t, - fmt.Sprintf("acctest value=101 %d", now.UnixNano()), + fmt.Sprintf("acctest,acc=test value=101 %d", now.UnixNano()), actual) + assert.Equal(t, testm.Type(), telegraf.Counter) } -func TestAccAddError(t *testing.T) { - errBuf := bytes.NewBuffer(nil) - log.SetOutput(errBuf) - defer log.SetOutput(os.Stderr) - - a := accumulator{} - a.inputConfig = &models.InputConfig{} - a.inputConfig.Name = "mock_plugin" - - a.AddError(fmt.Errorf("foo")) - a.AddError(fmt.Errorf("bar")) - a.AddError(fmt.Errorf("baz")) +type TestMetricMaker struct { +} - errs := bytes.Split(errBuf.Bytes(), []byte{'\n'}) - assert.EqualValues(t, 3, a.errCount) - require.Len(t, errs, 4) // 4 because of trailing newline - assert.Contains(t, string(errs[0]), "mock_plugin") - assert.Contains(t, string(errs[0]), "foo") - assert.Contains(t, string(errs[1]), "mock_plugin") - assert.Contains(t, string(errs[1]), "bar") - assert.Contains(t, string(errs[2]), "mock_plugin") - assert.Contains(t, string(errs[2]), "baz") +func (tm *TestMetricMaker) Name() string { + return "TestPlugin" +} +func (tm *TestMetricMaker) MakeMetric( + measurement string, + fields map[string]interface{}, + tags map[string]string, + mType telegraf.ValueType, + t time.Time, +) telegraf.Metric { + switch mType { + case telegraf.Untyped: + if m, err := telegraf.NewMetric(measurement, tags, fields, t); err == nil { + return m + } + case telegraf.Counter: + if m, err := telegraf.NewCounterMetric(measurement, tags, fields, t); err == nil { + return m + } + case telegraf.Gauge: + if m, err := telegraf.NewGaugeMetric(measurement, tags, fields, t); err == nil { + return m + } + } + return nil } diff --git a/agent/agent.go b/agent/agent.go index d86037e79edeb..1a205e218b203 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -49,18 +49,16 @@ func (a *Agent) Connect() error { switch ot := o.Output.(type) { case telegraf.ServiceOutput: if err := ot.Start(); err != nil { - log.Printf("Service for output %s failed to start, exiting\n%s\n", + log.Printf("E! Service for output %s failed to start, exiting\n%s\n", o.Name, err.Error()) return err } } - if a.Config.Agent.Debug { - log.Printf("Attempting connection to output: %s\n", o.Name) - } + log.Printf("D! Attempting connection to output: %s\n", o.Name) err := o.Output.Connect() if err != nil { - log.Printf("Failed to connect to output %s, retrying in 15s, "+ + log.Printf("E! Failed to connect to output %s, retrying in 15s, "+ "error was '%s' \n", o.Name, err) time.Sleep(15 * time.Second) err = o.Output.Connect() @@ -68,9 +66,7 @@ func (a *Agent) Connect() error { return err } } - if a.Config.Agent.Debug { - log.Printf("Successfully connected to output: %s\n", o.Name) - } + log.Printf("D! Successfully connected to output: %s\n", o.Name) } return nil } @@ -92,9 +88,9 @@ func panicRecover(input *models.RunningInput) { if err := recover(); err != nil { trace := make([]byte, 2048) runtime.Stack(trace, true) - log.Printf("FATAL: Input [%s] panicked: %s, Stack:\n%s\n", - input.Name, err, trace) - log.Println("PLEASE REPORT THIS PANIC ON GITHUB with " + + log.Printf("E! FATAL: Input [%s] panicked: %s, Stack:\n%s\n", + input.Name(), err, trace) + log.Println("E! PLEASE REPORT THIS PANIC ON GITHUB with " + "stack trace, configuration, and OS information: " + "https://github.com/influxdata/telegraf/issues/new") } @@ -107,20 +103,18 @@ func (a *Agent) gatherer( input *models.RunningInput, interval time.Duration, metricC chan telegraf.Metric, -) error { +) { defer panicRecover(input) ticker := time.NewTicker(interval) defer ticker.Stop() for { - var outerr error - - acc := NewAccumulator(input.Config, metricC) - acc.SetDebug(a.Config.Agent.Debug) + acc := NewAccumulator(input, metricC) acc.SetPrecision(a.Config.Agent.Precision.Duration, a.Config.Agent.Interval.Duration) - acc.setDefaultTags(a.Config.Tags) + input.SetDebug(a.Config.Agent.Debug) + input.SetDefaultTags(a.Config.Tags) internal.RandomSleep(a.Config.Agent.CollectionJitter.Duration, shutdown) @@ -128,17 +122,12 @@ func (a *Agent) gatherer( gatherWithTimeout(shutdown, input, acc, interval) elapsed := time.Since(start) - if outerr != nil { - return outerr - } - if a.Config.Agent.Debug { - log.Printf("Input [%s] gathered metrics, (%s interval) in %s\n", - input.Name, interval, elapsed) - } + log.Printf("D! Input [%s] gathered metrics, (%s interval) in %s\n", + input.Name(), interval, elapsed) select { case <-shutdown: - return nil + return case <-ticker.C: continue } @@ -167,13 +156,13 @@ func gatherWithTimeout( select { case err := <-done: if err != nil { - log.Printf("ERROR in input [%s]: %s", input.Name, err) + log.Printf("E! ERROR in input [%s]: %s", input.Name(), err) } return case <-ticker.C: - log.Printf("ERROR: input [%s] took longer to collect than "+ + log.Printf("E! ERROR: input [%s] took longer to collect than "+ "collection interval (%s)", - input.Name, timeout) + input.Name(), timeout) continue case <-shutdown: return @@ -201,13 +190,13 @@ func (a *Agent) Test() error { }() for _, input := range a.Config.Inputs { - acc := NewAccumulator(input.Config, metricC) - acc.SetTrace(true) + acc := NewAccumulator(input, metricC) acc.SetPrecision(a.Config.Agent.Precision.Duration, a.Config.Agent.Interval.Duration) - acc.setDefaultTags(a.Config.Tags) + input.SetTrace(true) + input.SetDefaultTags(a.Config.Tags) - fmt.Printf("* Plugin: %s, Collection 1\n", input.Name) + fmt.Printf("* Plugin: %s, Collection 1\n", input.Name()) if input.Config.Interval != 0 { fmt.Printf("* Internal: %s\n", input.Config.Interval) } @@ -221,10 +210,10 @@ func (a *Agent) Test() error { // Special instructions for some inputs. cpu, for example, needs to be // run twice in order to return cpu usage percentages. - switch input.Name { + switch input.Name() { case "cpu", "mongodb", "procstat": time.Sleep(500 * time.Millisecond) - fmt.Printf("* Plugin: %s, Collection 2\n", input.Name) + fmt.Printf("* Plugin: %s, Collection 2\n", input.Name()) if err := input.Input.Gather(acc); err != nil { return err } @@ -244,7 +233,7 @@ func (a *Agent) flush() { defer wg.Done() err := output.Write() if err != nil { - log.Printf("Error writing to output [%s]: %s\n", + log.Printf("E! Error writing to output [%s]: %s\n", output.Name, err.Error()) } }(o) @@ -257,72 +246,97 @@ func (a *Agent) flush() { func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric) error { // Inelegant, but this sleep is to allow the Gather threads to run, so that // the flusher will flush after metrics are collected. - time.Sleep(time.Millisecond * 200) + time.Sleep(time.Millisecond * 300) - ticker := time.NewTicker(a.Config.Agent.FlushInterval.Duration) + // create an output metric channel and a gorouting that continously passes + // each metric onto the output plugins & aggregators. + outMetricC := make(chan telegraf.Metric, 100) + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + for { + select { + case <-shutdown: + if len(outMetricC) > 0 { + // keep going until outMetricC is flushed + continue + } + return + case m := <-outMetricC: + // if dropOriginal is set to true, then we will only send this + // metric to the aggregators, not the outputs. + var dropOriginal bool + if !m.IsAggregate() { + for _, agg := range a.Config.Aggregators { + if ok := agg.Add(copyMetric(m)); ok { + dropOriginal = true + } + } + } + if !dropOriginal { + for i, o := range a.Config.Outputs { + if i == len(a.Config.Outputs)-1 { + o.AddMetric(m) + } else { + o.AddMetric(copyMetric(m)) + } + } + } + } + } + }() + ticker := time.NewTicker(a.Config.Agent.FlushInterval.Duration) for { select { case <-shutdown: - log.Println("Hang on, flushing any cached metrics before shutdown") + log.Println("I! Hang on, flushing any cached metrics before shutdown") + // wait for outMetricC to get flushed before flushing outputs + wg.Wait() a.flush() return nil case <-ticker.C: internal.RandomSleep(a.Config.Agent.FlushJitter.Duration, shutdown) a.flush() - case m := <-metricC: - for i, o := range a.Config.Outputs { - if i == len(a.Config.Outputs)-1 { - o.AddMetric(m) - } else { - o.AddMetric(copyMetric(m)) - } + case metric := <-metricC: + // NOTE potential bottleneck here as we put each metric through the + // processors serially. + mS := []telegraf.Metric{metric} + for _, processor := range a.Config.Processors { + mS = processor.Apply(mS...) + } + for _, m := range mS { + outMetricC <- m } } } } -func copyMetric(m telegraf.Metric) telegraf.Metric { - t := time.Time(m.Time()) - - tags := make(map[string]string) - fields := make(map[string]interface{}) - for k, v := range m.Tags() { - tags[k] = v - } - for k, v := range m.Fields() { - fields[k] = v - } - - out, _ := telegraf.NewMetric(m.Name(), tags, fields, t) - return out -} - // Run runs the agent daemon, gathering every Interval func (a *Agent) Run(shutdown chan struct{}) error { var wg sync.WaitGroup - log.Printf("Agent Config: Interval:%s, Debug:%#v, Quiet:%#v, Hostname:%#v, "+ + log.Printf("I! Agent Config: Interval:%s, Quiet:%#v, Hostname:%#v, "+ "Flush Interval:%s \n", - a.Config.Agent.Interval.Duration, a.Config.Agent.Debug, a.Config.Agent.Quiet, + a.Config.Agent.Interval.Duration, a.Config.Agent.Quiet, a.Config.Agent.Hostname, a.Config.Agent.FlushInterval.Duration) // channel shared between all input threads for accumulating metrics - metricC := make(chan telegraf.Metric, 10000) + metricC := make(chan telegraf.Metric, 100) + // Start all ServicePlugins for _, input := range a.Config.Inputs { - // Start service of any ServicePlugins switch p := input.Input.(type) { case telegraf.ServiceInput: - acc := NewAccumulator(input.Config, metricC) - acc.SetDebug(a.Config.Agent.Debug) + acc := NewAccumulator(input, metricC) // Service input plugins should set their own precision of their // metrics. - acc.DisablePrecision() - acc.setDefaultTags(a.Config.Tags) + acc.SetPrecision(time.Nanosecond, 0) + input.SetDefaultTags(a.Config.Tags) if err := p.Start(acc); err != nil { - log.Printf("Service for input %s failed to start, exiting\n%s\n", - input.Name, err.Error()) + log.Printf("E! Service for input %s failed to start, exiting\n%s\n", + input.Name(), err.Error()) return err } defer p.Stop() @@ -339,11 +353,22 @@ func (a *Agent) Run(shutdown chan struct{}) error { go func() { defer wg.Done() if err := a.flusher(shutdown, metricC); err != nil { - log.Printf("Flusher routine failed, exiting: %s\n", err.Error()) + log.Printf("E! Flusher routine failed, exiting: %s\n", err.Error()) close(shutdown) } }() + wg.Add(len(a.Config.Aggregators)) + for _, aggregator := range a.Config.Aggregators { + go func(agg *models.RunningAggregator) { + defer wg.Done() + acc := NewAccumulator(agg, metricC) + acc.SetPrecision(a.Config.Agent.Precision.Duration, + a.Config.Agent.Interval.Duration) + agg.Run(acc, shutdown) + }(aggregator) + } + wg.Add(len(a.Config.Inputs)) for _, input := range a.Config.Inputs { interval := a.Config.Agent.Interval.Duration @@ -353,12 +378,26 @@ func (a *Agent) Run(shutdown chan struct{}) error { } go func(in *models.RunningInput, interv time.Duration) { defer wg.Done() - if err := a.gatherer(shutdown, in, interv, metricC); err != nil { - log.Printf(err.Error()) - } + a.gatherer(shutdown, in, interv, metricC) }(input, interval) } wg.Wait() return nil } + +func copyMetric(m telegraf.Metric) telegraf.Metric { + t := time.Time(m.Time()) + + tags := make(map[string]string) + fields := make(map[string]interface{}) + for k, v := range m.Tags() { + tags[k] = v + } + for k, v := range m.Fields() { + fields[k] = v + } + + out, _ := telegraf.NewMetric(m.Name(), tags, fields, t) + return out +} diff --git a/aggregator.go b/aggregator.go new file mode 100644 index 0000000000000..48aa8e4bf48ae --- /dev/null +++ b/aggregator.go @@ -0,0 +1,22 @@ +package telegraf + +// Aggregator is an interface for implementing an Aggregator plugin. +// the RunningAggregator wraps this interface and guarantees that +// Add, Push, and Reset can not be called concurrently, so locking is not +// required when implementing an Aggregator plugin. +type Aggregator interface { + // SampleConfig returns the default configuration of the Input. + SampleConfig() string + + // Description returns a one-sentence description on the Input. + Description() string + + // Add the metric to the aggregator. + Add(in Metric) + + // Push pushes the current aggregates to the accumulator. + Push(acc Accumulator) + + // Reset resets the aggregators caches and aggregates. + Reset() +} diff --git a/circle.yml b/circle.yml index 4d5ede7255abe..7196c64f14877 100644 --- a/circle.yml +++ b/circle.yml @@ -4,17 +4,14 @@ machine: post: - sudo service zookeeper stop - go version - - go version | grep 1.7.1 || sudo rm -rf /usr/local/go - - wget https://storage.googleapis.com/golang/go1.7.1.linux-amd64.tar.gz - - sudo tar -C /usr/local -xzf go1.7.1.linux-amd64.tar.gz + - go version | grep 1.7.3 || sudo rm -rf /usr/local/go + - wget https://storage.googleapis.com/golang/go1.7.3.linux-amd64.tar.gz + - sudo tar -C /usr/local -xzf go1.7.3.linux-amd64.tar.gz - go version dependencies: override: - docker info - post: - - gem install fpm - - sudo apt-get install -y rpm python-boto test: override: diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index 022280d6bbbe4..0f94c6e2c8127 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -12,15 +12,18 @@ import ( "github.com/influxdata/telegraf/agent" "github.com/influxdata/telegraf/internal/config" + "github.com/influxdata/telegraf/logger" + _ "github.com/influxdata/telegraf/plugins/aggregators/all" "github.com/influxdata/telegraf/plugins/inputs" _ "github.com/influxdata/telegraf/plugins/inputs/all" "github.com/influxdata/telegraf/plugins/outputs" _ "github.com/influxdata/telegraf/plugins/outputs/all" + _ "github.com/influxdata/telegraf/plugins/processors/all" "github.com/kardianos/service" ) var fDebug = flag.Bool("debug", false, - "show metrics as they're generated to stdout") + "turn on debug logging") var fQuiet = flag.Bool("quiet", false, "run in quiet mode") var fTest = flag.Bool("test", false, "gather metrics, print them out, and exit") @@ -39,6 +42,10 @@ var fOutputFilters = flag.String("output-filter", "", "filter the outputs to enable, separator is :") var fOutputList = flag.Bool("output-list", false, "print available output plugins.") +var fAggregatorFilters = flag.String("aggregator-filter", "", + "filter the aggregators to enable, separator is :") +var fProcessorFilters = flag.String("processor-filter", "", + "filter the processors to enable, separator is :") var fUsage = flag.String("usage", "", "print usage for a plugin, ie, 'telegraf -usage mysql'") var fService = flag.String("service", "", @@ -66,55 +73,43 @@ const usage = `Telegraf, The plugin-driven server agent for collecting and repor Usage: - telegraf - -The flags are: - - -config configuration file to load - -test gather metrics once, print them to stdout, and exit - -sample-config print out full sample configuration to stdout - -config-directory directory containing additional *.conf files - -input-filter filter the input plugins to enable, separator is : - -input-list print all the plugins inputs - -output-filter filter the output plugins to enable, separator is : - -output-list print all the available outputs - -usage print usage for a plugin, ie, 'telegraf -usage mysql' - -debug print metrics as they're generated to stdout - -quiet run in quiet mode - -version print the version to stdout - -service Control the service, ie, 'telegraf -service install (windows only)' - -In addition to the -config flag, telegraf will also load the config file from -an environment variable or default location. Precedence is: - 1. -config flag - 2. $TELEGRAF_CONFIG_PATH environment variable - 3. $HOME/.telegraf/telegraf.conf - 4. /etc/telegraf/telegraf.conf + telegraf [commands|flags] + +The commands & flags are: + + config print out full sample configuration to stdout + version print the version to stdout + + --config configuration file to load + --test gather metrics once, print them to stdout, and exit + --config-directory directory containing additional *.conf files + --input-filter filter the input plugins to enable, separator is : + --output-filter filter the output plugins to enable, separator is : + --usage print usage for a plugin, ie, 'telegraf --usage mysql' + --debug print metrics as they're generated to stdout + --quiet run in quiet mode Examples: # generate a telegraf config file: - telegraf -sample-config > telegraf.conf + telegraf config > telegraf.conf # generate config with only cpu input & influxdb output plugins defined - telegraf -sample-config -input-filter cpu -output-filter influxdb + telegraf --input-filter cpu --output-filter influxdb config # run a single telegraf collection, outputing metrics to stdout - telegraf -config telegraf.conf -test + telegraf --config telegraf.conf -test # run telegraf with all plugins defined in config file - telegraf -config telegraf.conf + telegraf --config telegraf.conf # run telegraf, enabling the cpu & memory input, and influxdb output plugins - telegraf -config telegraf.conf -input-filter cpu:mem -output-filter influxdb + telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb ` -var logger service.Logger - var stop chan struct{} var srvc service.Service -var svcConfig *service.Config type program struct{} @@ -129,7 +124,6 @@ func reloadLoop(stop chan struct{}, s service.Service) { reload <- true for <-reload { reload <- false - flag.Usage = func() { usageExit(0) } flag.Parse() args := flag.Args() @@ -143,6 +137,16 @@ func reloadLoop(stop chan struct{}, s service.Service) { outputFilter := strings.TrimSpace(*fOutputFilters) outputFilters = strings.Split(":"+outputFilter+":", ":") } + var aggregatorFilters []string + if *fAggregatorFilters != "" { + aggregatorFilter := strings.TrimSpace(*fAggregatorFilters) + aggregatorFilters = strings.Split(":"+aggregatorFilter+":", ":") + } + var processorFilters []string + if *fProcessorFilters != "" { + processorFilter := strings.TrimSpace(*fProcessorFilters) + processorFilters = strings.Split(":"+processorFilter+":", ":") + } if len(args) > 0 { switch args[0] { @@ -150,7 +154,12 @@ func reloadLoop(stop chan struct{}, s service.Service) { fmt.Printf("Telegraf v%s (git: %s %s)\n", version, branch, commit) return case "config": - config.PrintSampleConfig(inputFilters, outputFilters) + config.PrintSampleConfig( + inputFilters, + outputFilters, + aggregatorFilters, + processorFilters, + ) return } } @@ -173,24 +182,20 @@ func reloadLoop(stop chan struct{}, s service.Service) { fmt.Printf("Telegraf v%s (git: %s %s)\n", version, branch, commit) return case *fSampleConfig: - config.PrintSampleConfig(inputFilters, outputFilters) + config.PrintSampleConfig( + inputFilters, + outputFilters, + aggregatorFilters, + processorFilters, + ) return case *fUsage != "": if err := config.PrintInputConfig(*fUsage); err != nil { if err2 := config.PrintOutputConfig(*fUsage); err2 != nil { - log.Fatalf("%s and %s", err, err2) + log.Fatalf("E! %s and %s", err, err2) } } return - case *fService != "" && runtime.GOOS == "windows": - if *fConfig != "" { - (*svcConfig).Arguments = []string{"-config", *fConfig} - } - err := service.Control(s, *fService) - if err != nil { - log.Fatal(err) - } - return } // If no other options are specified, load the config file and run. @@ -199,47 +204,45 @@ func reloadLoop(stop chan struct{}, s service.Service) { c.InputFilters = inputFilters err := c.LoadConfig(*fConfig) if err != nil { - fmt.Println(err) - os.Exit(1) + log.Fatal("E! " + err.Error()) } if *fConfigDirectory != "" { err = c.LoadDirectory(*fConfigDirectory) if err != nil { - log.Fatal(err) + log.Fatal("E! " + err.Error()) } } if len(c.Outputs) == 0 { - log.Fatalf("Error: no outputs found, did you provide a valid config file?") + log.Fatalf("E! Error: no outputs found, did you provide a valid config file?") } if len(c.Inputs) == 0 { - log.Fatalf("Error: no inputs found, did you provide a valid config file?") + log.Fatalf("E! Error: no inputs found, did you provide a valid config file?") } ag, err := agent.NewAgent(c) if err != nil { - log.Fatal(err) + log.Fatal("E! " + err.Error()) } - if *fDebug { - ag.Config.Agent.Debug = true - } - - if *fQuiet { - ag.Config.Agent.Quiet = true - } + // Setup logging + logger.SetupLogging( + ag.Config.Agent.Debug || *fDebug, + ag.Config.Agent.Quiet || *fQuiet, + ag.Config.Agent.Logfile, + ) if *fTest { err = ag.Test() if err != nil { - log.Fatal(err) + log.Fatal("E! " + err.Error()) } return } err = ag.Connect() if err != nil { - log.Fatal(err) + log.Fatal("E! " + err.Error()) } shutdown := make(chan struct{}) @@ -252,7 +255,7 @@ func reloadLoop(stop chan struct{}, s service.Service) { close(shutdown) } if sig == syscall.SIGHUP { - log.Printf("Reloading Telegraf config\n") + log.Printf("I! Reloading Telegraf config\n") <-reload reload <- true close(shutdown) @@ -262,15 +265,15 @@ func reloadLoop(stop chan struct{}, s service.Service) { } }() - log.Printf("Starting Telegraf (version %s)\n", version) - log.Printf("Loaded outputs: %s", strings.Join(c.OutputNames(), " ")) - log.Printf("Loaded inputs: %s", strings.Join(c.InputNames(), " ")) - log.Printf("Tags enabled: %s", c.ListTags()) + log.Printf("I! Starting Telegraf (version %s)\n", version) + log.Printf("I! Loaded outputs: %s", strings.Join(c.OutputNames(), " ")) + log.Printf("I! Loaded inputs: %s", strings.Join(c.InputNames(), " ")) + log.Printf("I! Tags enabled: %s", c.ListTags()) if *fPidfile != "" { f, err := os.Create(*fPidfile) if err != nil { - log.Fatalf("Unable to create pidfile: %s", err) + log.Fatalf("E! Unable to create pidfile: %s", err) } fmt.Fprintf(f, "%d\n", os.Getpid()) @@ -302,8 +305,10 @@ func (p *program) Stop(s service.Service) error { } func main() { + flag.Usage = func() { usageExit(0) } + flag.Parse() if runtime.GOOS == "windows" { - svcConfig = &service.Config{ + svcConfig := &service.Config{ Name: "telegraf", DisplayName: "Telegraf Data Collector Service", Description: "Collects data using a series of plugins and publishes it to" + @@ -314,15 +319,23 @@ func main() { prg := &program{} s, err := service.New(prg, svcConfig) if err != nil { - log.Fatal(err) - } - logger, err = s.Logger(nil) - if err != nil { - log.Fatal(err) + log.Fatal("E! " + err.Error()) } - err = s.Run() - if err != nil { - logger.Error(err) + // Handle the -service flag here to prevent any issues with tooling that + // may not have an interactive session, e.g. installing from Ansible. + if *fService != "" { + if *fConfig != "" { + (*svcConfig).Arguments = []string{"-config", *fConfig} + } + err := service.Control(s, *fService) + if err != nil { + log.Fatal("E! " + err.Error()) + } + } else { + err = s.Run() + if err != nil { + log.Println("E! " + err.Error()) + } } } else { stop = make(chan struct{}) diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index 46f044ab766a1..9b2eb99d88ab9 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -1,38 +1,38 @@ # Telegraf Configuration +You can see the latest config file with all available plugins here: +[telegraf.conf](https://github.com/influxdata/telegraf/blob/master/etc/telegraf.conf) + ## Generating a Configuration File -A default Telegraf config file can be generated using the -sample-config flag: +A default Telegraf config file can be auto-generated by telegraf: ``` -telegraf -sample-config > telegraf.conf +telegraf config > telegraf.conf ``` To generate a file with specific inputs and outputs, you can use the --input-filter and -output-filter flags: +--input-filter and --output-filter flags: ``` -telegraf -sample-config -input-filter cpu:mem:net:swap -output-filter influxdb:kafka +telegraf --input-filter cpu:mem:net:swap --output-filter influxdb:kafka config ``` -You can see the latest config file with all available plugins here: -[telegraf.conf](https://github.com/influxdata/telegraf/blob/master/etc/telegraf.conf) - ## Environment Variables Environment variables can be used anywhere in the config file, simply prepend them with $. For strings the variable must be within quotes (ie, "$STR_VAR"), for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR) -## `[global_tags]` Configuration +# Global Tags Global tags can be specified in the `[global_tags]` section of the config file in key="value" format. All metrics being gathered on this host will be tagged with the tags specified here. -## `[agent]` Configuration +## Agent Configuration -Telegraf has a few options you can configure under the `agent` section of the +Telegraf has a few options you can configure under the `[agent]` section of the config. * **interval**: Default data collection interval for all inputs @@ -56,13 +56,63 @@ interval. Maximum flush_interval will be flush_interval + flush_jitter This is primarily to avoid large write spikes for users running a large number of telegraf instances. ie, a jitter of 5s and flush_interval 10s means flushes will happen every 10-15s. +* **precision**: By default, precision will be set to the same timestamp order +as the collection interval, with the maximum being 1s. Precision will NOT +be used for service inputs, such as logparser and statsd. Valid values are +"ns", "us" (or "µs"), "ms", "s". +* **logfile**: Specify the log file name. The empty string means to log to stdout. * **debug**: Run telegraf in debug mode. -* **quiet**: Run telegraf in quiet mode. +* **quiet**: Run telegraf in quiet mode (error messages only). * **hostname**: Override default hostname, if empty use os.Hostname(). +* **omit_hostname**: If true, do no set the "host" tag in the telegraf agent. + +## Input Configuration + +The following config parameters are available for all inputs: + +* **interval**: How often to gather this metric. Normal plugins use a single +global interval, but if one particular input should be run less or more often, +you can configure that here. +* **name_override**: Override the base name of the measurement. +(Default is the name of the input). +* **name_prefix**: Specifies a prefix to attach to the measurement name. +* **name_suffix**: Specifies a suffix to attach to the measurement name. +* **tags**: A map of tags to apply to a specific input's measurements. + +## Output Configuration + +There are no generic configuration options available for all outputs. + +## Aggregator Configuration + +The following config parameters are available for all aggregators: + +* **period**: The period on which to flush & clear each aggregator. All metrics +that are sent with timestamps outside of this period will be ignored by the +aggregator. +* **delay**: The delay before each aggregator is flushed. This is to control +how long for aggregators to wait before receiving metrics from input plugins, +in the case that aggregators are flushing and inputs are gathering on the +same interval. +* **drop_original**: If true, the original metric will be dropped by the +aggregator and will not get sent to the output plugins. +* **name_override**: Override the base name of the measurement. +(Default is the name of the input). +* **name_prefix**: Specifies a prefix to attach to the measurement name. +* **name_suffix**: Specifies a suffix to attach to the measurement name. +* **tags**: A map of tags to apply to a specific input's measurements. + +## Processor Configuration + +The following config parameters are available for all processors: + +* **order**: This is the order in which the processor(s) get executed. If this +is not specified then processor execution order will be random. #### Measurement Filtering -Filters can be configured per input or output, see below for examples. +Filters can be configured per input, output, processor, or aggregator, +see below for examples. * **namepass**: An array of strings that is used to filter metrics generated by the current input. Each string in the array is tested as a glob match against @@ -90,19 +140,6 @@ the tag keys in the final measurement. the plugin definition, otherwise subsequent plugin config options will be interpreted as part of the tagpass/tagdrop map. -## Input Configuration - -Some configuration options are configurable per input: - -* **name_override**: Override the base name of the measurement. -(Default is the name of the input). -* **name_prefix**: Specifies a prefix to attach to the measurement name. -* **name_suffix**: Specifies a suffix to attach to the measurement name. -* **tags**: A map of tags to apply to a specific input's measurements. -* **interval**: How often to gather this metric. Normal plugins use a single -global interval, but if one particular input should be run less or more often, -you can configure that here. - #### Input Configuration Examples This is a full working config that will output CPU data to an InfluxDB instance @@ -254,11 +291,7 @@ to avoid measurement collisions: fielddrop = ["cpu_time*"] ``` -## Output Configuration - -Telegraf also supports specifying multiple output sinks to send data to, -configuring each output sink is different, but examples can be -found by running `telegraf -sample-config`. +#### Output Configuration Examples: ```toml [[outputs.influxdb]] @@ -283,3 +316,39 @@ found by running `telegraf -sample-config`. [outputs.influxdb.tagpass] cpu = ["cpu0"] ``` + +#### Aggregator Configuration Examples: + +This will collect and emit the min/max of the system load1 metric every +30s, dropping the originals. + +```toml +[[inputs.system]] + fieldpass = ["load1"] # collects system load1 metric. + +[[aggregators.minmax]] + period = "30s" # send & clear the aggregate every 30s. + drop_original = true # drop the original metrics. + +[[outputs.file]] + files = ["stdout"] +``` + +This will collect and emit the min/max of the swap metrics every +30s, dropping the originals. The aggregator will not be applied +to the system load metrics due to the `namepass` parameter. + +```toml +[[inputs.swap]] + +[[inputs.system]] + fieldpass = ["load1"] # collects system load1 metric. + +[[aggregators.minmax]] + period = "30s" # send & clear the aggregate every 30s. + drop_original = true # drop the original metrics. + namepass = ["swap"] # only "pass" swap metrics through the aggregator. + +[[outputs.file]] + files = ["stdout"] +``` \ No newline at end of file diff --git a/docs/DATA_FORMATS_INPUT.md b/docs/DATA_FORMATS_INPUT.md index 2e3a479ac7e9b..c14752d9cdf81 100644 --- a/docs/DATA_FORMATS_INPUT.md +++ b/docs/DATA_FORMATS_INPUT.md @@ -232,6 +232,16 @@ us.west.cpu.load 100 => cpu.load,region=us.west value=100 ``` +Multiple templates can also be specified, but these should be differentiated +using _filters_ (see below for more details) + +```toml +templates = [ + "*.*.* region.region.measurement", # <- all 3-part measurements will match this one. + "*.*.*.* region.region.host.measurement", # <- all 4-part measurements will match this one. +] +``` + #### Field Templates: The field keyword tells Telegraf to give the metric that field name. diff --git a/etc/telegraf.conf b/etc/telegraf.conf index f6e9b2ffed30b..2ad0bcbae66d2 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -30,12 +30,15 @@ ## ie, if interval="10s" then always collect on :00, :10, :20, etc. round_interval = true - ## Telegraf will send metrics to outputs in batches of at - ## most metric_batch_size metrics. + ## Telegraf will send metrics to outputs in batches of at most + ## metric_batch_size metrics. + ## This controls the size of writes that Telegraf sends to output plugins. metric_batch_size = 1000 + ## For failed writes, telegraf will cache metric_buffer_limit metrics for each ## output, and will flush this buffer on a successful write. Oldest metrics ## are dropped first when this buffer fills. + ## This buffer only fills when writes fail to output plugin(s). metric_buffer_limit = 10000 ## Collection jitter is used to jitter the collection by a random amount. @@ -57,10 +60,15 @@ ## Precision will NOT be used for service inputs, such as logparser and statsd. ## Valid values are "ns", "us" (or "µs"), "ms", "s". precision = "" - ## Run telegraf in debug mode + + ## Logging configuration: + ## Run telegraf with debug log messages. debug = false - ## Run telegraf in quiet mode + ## Run telegraf in quiet mode (error log messages only). quiet = false + ## Specify the log file name. The empty string means to log to stdout. + logfile = "" + ## Override default hostname, if empty use os.Hostname() hostname = "" ## If set to true, do no set the "host" tag in the telegraf agent. @@ -357,6 +365,30 @@ # data_format = "influx" +# # Send telegraf measurements to NATS +# [[outputs.nats]] +# ## URLs of NATS servers +# servers = ["nats://localhost:4222"] +# ## Optional credentials +# # username = "" +# # password = "" +# ## NATS subject for producer messages +# subject = "telegraf" +# +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Data format to output. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + # # Send telegraf measurements to NSQD # [[outputs.nsq]] # ## Location of nsqd instance listening on TCP @@ -376,13 +408,18 @@ # ## prefix for metrics keys # prefix = "my.specific.prefix." # -# ## Telnet Mode ## -# ## DNS name of the OpenTSDB server in telnet mode +# ## DNS name of the OpenTSDB server +# ## Using "opentsdb.example.com" or "tcp://opentsdb.example.com" will use the +# ## telnet API. "http://opentsdb.example.com" will use the Http API. # host = "opentsdb.example.com" # -# ## Port of the OpenTSDB server in telnet mode +# ## Port of the OpenTSDB server # port = 4242 # +# ## Number of data points to send to OpenTSDB in Http requests. +# ## Not used with telnet API. +# httpBatchSize = 50 +# # ## Debug true - Prints OpenTSDB communication # debug = false @@ -414,8 +451,8 @@ percpu = true ## Whether to report total system cpu stats or not totalcpu = true - ## Comment this line if you want the raw CPU time metrics - fielddrop = ["time_*"] + ## If true, collect raw CPU time metrics. + collect_cpu_time = false # Read metrics about disk usage by mount point @@ -530,14 +567,7 @@ # ## suffix used to identify socket files # socket_suffix = "asok" # -# ## Ceph user to authenticate as, ceph will search for the corresponding keyring -# ## e.g. client.admin.keyring in /etc/ceph, or the explicit path defined in the -# ## client section of ceph.conf for example: -# ## -# ## [client.telegraf] -# ## keyring = /etc/ceph/client.telegraf.keyring -# ## -# ## Consult the ceph documentation for more detail on keyring generation. +# ## Ceph user to authenticate as # ceph_user = "client.admin" # # ## Ceph configuration to use to locate the cluster @@ -546,8 +576,7 @@ # ## Whether to gather statistics via the admin socket # gather_admin_socket_stats = true # -# ## Whether to gather statistics via ceph commands, requires ceph_user and ceph_config -# ## to be specified +# ## Whether to gather statistics via ceph commands # gather_cluster_stats = true @@ -601,6 +630,11 @@ # ## Metric Statistic Namespace (required) # namespace = 'AWS/ELB' # +# ## Maximum requests per second. Note that the global default AWS rate limit is +# ## 10 reqs/sec, so if you define multiple namespaces, these should add up to a +# ## maximum of 10. Optional - default value is 10. +# ratelimit = 10 +# # ## Metrics to Pull (optional) # ## Defaults to all Metrics in Namespace if nothing is provided # ## Refreshes Namespace available metrics every 1h @@ -718,6 +752,9 @@ # ## specify a list of one or more Elasticsearch servers # servers = ["http://localhost:9200"] # +# ## Timeout for HTTP requests to the elastic search server(s) +# http_timeout = "5s" +# # ## set local to false when you want to read the indices stats from all nodes # ## within the cluster # local = true @@ -860,6 +897,8 @@ # "http://localhost:9999/stats/", # "http://localhost:9998/stats/", # ] +# ## Set response_timeout (default 5 seconds) +# response_timeout = "5s" # # ## HTTP method to use: GET or POST (case-sensitive) # method = "GET" @@ -899,6 +938,9 @@ # urls = [ # "http://localhost:8086/debug/vars" # ] +# +# ## http request & header timeout +# timeout = "5s" # # Read metrics from one or many bare metal servers @@ -910,22 +952,11 @@ # ## # servers = ["USERID:PASSW0RD@lan(192.168.1.1)"] -# # Gather packets and bytes throughput from iptables -# [[inputs.iptables]] -# ## iptables require root access on most systems. -# ## Setting 'use_sudo' to true will make use of sudo to run iptables. -# ## Users must configure sudo to allow telegraf user to run iptables. -# ## iptables can be restricted to only use list command "iptables -nvL" -# use_sudo = false -# ## define the table to monitor: -# table = "filter" -# ## Defines the chains to monitor: -# chains = [ "INPUT" ] - # # Read JMX metrics through Jolokia # [[inputs.jolokia]] # ## This is the context root used to compose the jolokia url +# ## NOTE that your jolokia security policy must allow for POST requests. # context = "/jolokia" # # ## This specifies the mode used @@ -1041,8 +1072,6 @@ # # "tasks", # # "messages", # # ] -# ## Include mesos tasks statistics, default is false -# # slave_tasks = true # # Read metrics from one or many MongoDB servers @@ -1186,23 +1215,6 @@ # command = "passenger-status -v --show=xml" -# # Read metrics from one or many pgbouncer servers -# [[inputs.pgbouncer]] -# ## specify address via a url matching: -# ## postgres://[pqgotest[:password]]@localhost:port[/dbname]\ -# ## ?sslmode=[disable|verify-ca|verify-full] -# ## or a simple string: -# ## host=localhost user=pqotest port=6432 password=... sslmode=... dbname=pgbouncer -# ## -# ## All connection parameters are optional, except for dbname, -# ## you need to set it always as pgbouncer. -# address = "host=localhost user=postgres port=6432 sslmode=disable dbname=pgbouncer" -# -# ## A list of databases to pull metrics about. If not specified, metrics for all -# ## databases are gathered. -# # databases = ["app_production", "testing"] - - # # Read metrics of phpfpm, via HTTP status page or socket # [[inputs.phpfpm]] # ## An array of addresses to gather stats about. Specify an ip or hostname @@ -1261,8 +1273,12 @@ # ## # address = "host=localhost user=postgres sslmode=disable" # +# ## A list of databases to explicitly ignore. If not specified, metrics for all +# ## databases are gathered. Do NOT use with the 'databases' option. +# # ignored_databases = ["postgres", "template0", "template1"] +# # ## A list of databases to pull metrics about. If not specified, metrics for all -# ## databases are gathered. +# ## databases are gathered. Do NOT use with the 'ignore_databases' option. # # databases = ["app_production", "testing"] @@ -1429,6 +1445,65 @@ # servers = ["http://localhost:8098"] +# # Retrieves SNMP values from remote agents +# [[inputs.snmp]] +# agents = [ "127.0.0.1:161" ] +# ## Timeout for each SNMP query. +# timeout = "5s" +# ## Number of retries to attempt within timeout. +# retries = 3 +# ## SNMP version, values can be 1, 2, or 3 +# version = 2 +# +# ## SNMP community string. +# community = "public" +# +# ## The GETBULK max-repetitions parameter +# max_repetitions = 10 +# +# ## SNMPv3 auth parameters +# #sec_name = "myuser" +# #auth_protocol = "md5" # Values: "MD5", "SHA", "" +# #auth_password = "pass" +# #sec_level = "authNoPriv" # Values: "noAuthNoPriv", "authNoPriv", "authPriv" +# #context_name = "" +# #priv_protocol = "" # Values: "DES", "AES", "" +# #priv_password = "" +# +# ## measurement name +# name = "system" +# [[inputs.snmp.field]] +# name = "hostname" +# oid = ".1.0.0.1.1" +# [[inputs.snmp.field]] +# name = "uptime" +# oid = ".1.0.0.1.2" +# [[inputs.snmp.field]] +# name = "load" +# oid = ".1.0.0.1.3" +# [[inputs.snmp.field]] +# oid = "HOST-RESOURCES-MIB::hrMemorySize" +# +# [[inputs.snmp.table]] +# ## measurement name +# name = "remote_servers" +# inherit_tags = [ "hostname" ] +# [[inputs.snmp.table.field]] +# name = "server" +# oid = ".1.0.0.0.1.0" +# is_tag = true +# [[inputs.snmp.table.field]] +# name = "connections" +# oid = ".1.0.0.0.1.1" +# [[inputs.snmp.table.field]] +# name = "latency" +# oid = ".1.0.0.0.1.2" +# +# [[inputs.snmp.table]] +# ## auto populate table's fields using the MIB +# oid = "HOST-RESOURCES-MIB::hrNetworkTable" + + # # DEPRECATED! PLEASE USE inputs.snmp INSTEAD. # [[inputs.snmp_legacy]] # ## Use 'oids.txt' file to translate oids to names @@ -1601,6 +1676,16 @@ # SERVICE INPUT PLUGINS # ############################################################################### +# # Influx HTTP write listener +# [[inputs.http_listener]] +# ## Address and port to host HTTP listener on +# service_address = ":8186" +# +# ## timeouts +# read_timeout = "10s" +# write_timeout = "10s" + + # # Read metrics from Kafka topic(s) # [[inputs.kafka_consumer]] # ## topic(s) to consume @@ -1823,6 +1908,9 @@ # ## Address and port to host Webhook listener on # service_address = ":1619" # +# [inputs.webhooks.filestack] +# path = "/filestack" +# # [inputs.webhooks.github] # path = "/github" # diff --git a/etc/telegraf_windows.conf b/etc/telegraf_windows.conf index 9ce067c3925de..4825d715a6b7c 100644 --- a/etc/telegraf_windows.conf +++ b/etc/telegraf_windows.conf @@ -42,10 +42,14 @@ ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s flush_jitter = "0s" + ## Logging configuration: ## Run telegraf in debug mode debug = false ## Run telegraf in quiet mode quiet = false + ## Specify the log file name. The empty string means to log to stdout. + logfile = "/Program Files/Telegraf/telegraf.log" + ## Override default hostname, if empty use os.Hostname() hostname = "" @@ -85,7 +89,7 @@ # Windows Performance Counters plugin. # These are the recommended method of monitoring system metrics on windows, # as the regular system plugins (inputs.cpu, inputs.mem, etc.) rely on WMI, -# which utilizes a lot of system resources. +# which utilize more system resources. # # See more configuration examples at: # https://github.com/influxdata/telegraf/tree/master/plugins/inputs/win_perf_counters @@ -95,70 +99,104 @@ # Processor usage, alternative to native, reports on a per core. ObjectName = "Processor" Instances = ["*"] - Counters = ["% Idle Time", "% Interrupt Time", "% Privileged Time", "% User Time", "% Processor Time"] + Counters = [ + "% Idle Time", + "% Interrupt Time", + "% Privileged Time", + "% User Time", + "% Processor Time", + ] Measurement = "win_cpu" - #IncludeTotal=false #Set to true to include _Total instance when querying for all (*). + # Set to true to include _Total instance when querying for all (*). + #IncludeTotal=false [[inputs.win_perf_counters.object]] # Disk times and queues ObjectName = "LogicalDisk" Instances = ["*"] - Counters = ["% Idle Time", "% Disk Time","% Disk Read Time", "% Disk Write Time", "% User Time", "Current Disk Queue Length"] + Counters = [ + "% Idle Time", + "% Disk Time","% Disk Read Time", + "% Disk Write Time", + "% User Time", + "Current Disk Queue Length", + ] Measurement = "win_disk" - #IncludeTotal=false #Set to true to include _Total instance when querying for all (*). + # Set to true to include _Total instance when querying for all (*). + #IncludeTotal=false [[inputs.win_perf_counters.object]] ObjectName = "System" - Counters = ["Context Switches/sec","System Calls/sec"] + Counters = [ + "Context Switches/sec", + "System Calls/sec", + ] Instances = ["------"] Measurement = "win_system" - #IncludeTotal=false #Set to true to include _Total instance when querying for all (*). + # Set to true to include _Total instance when querying for all (*). + #IncludeTotal=false [[inputs.win_perf_counters.object]] - # Example query where the Instance portion must be removed to get data back, such as from the Memory object. + # Example query where the Instance portion must be removed to get data back, + # such as from the Memory object. ObjectName = "Memory" - Counters = ["Available Bytes","Cache Faults/sec","Demand Zero Faults/sec","Page Faults/sec","Pages/sec","Transition Faults/sec","Pool Nonpaged Bytes","Pool Paged Bytes"] - Instances = ["------"] # Use 6 x - to remove the Instance bit from the query. + Counters = [ + "Available Bytes", + "Cache Faults/sec", + "Demand Zero Faults/sec", + "Page Faults/sec", + "Pages/sec", + "Transition Faults/sec", + "Pool Nonpaged Bytes", + "Pool Paged Bytes", + ] + # Use 6 x - to remove the Instance bit from the query. + Instances = ["------"] Measurement = "win_mem" - #IncludeTotal=false #Set to true to include _Total instance when querying for all (*). + # Set to true to include _Total instance when querying for all (*). + #IncludeTotal=false # Windows system plugins using WMI (disabled by default, using # win_perf_counters over WMI is recommended) -# Read metrics about cpu usage -#[[inputs.cpu]] - ## Whether to report per-cpu stats or not - #percpu = true - ## Whether to report total system cpu stats or not - #totalcpu = true - ## Comment this line if you want the raw CPU time metrics - #fielddrop = ["time_*"] - -# Read metrics about disk usage by mount point -#[[inputs.disk]] - ## By default, telegraf gather stats for all mountpoints. - ## Setting mountpoints will restrict the stats to the specified mountpoints. - ## mount_points=["/"] - - ## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually - ## present on /run, /var/run, /dev/shm or /dev). - #ignore_fs = ["tmpfs", "devtmpfs"] - -# Read metrics about disk IO by device -#[[inputs.diskio]] - ## By default, telegraf will gather stats for all devices including - ## disk partitions. - ## Setting devices will restrict the stats to the specified devices. - ## devices = ["sda", "sdb"] - ## Uncomment the following line if you do not need disk serial numbers. - ## skip_serial_number = true - -# Read metrics about memory usage -#[[inputs.mem]] - # no configuration - -# Read metrics about swap memory usage -#[[inputs.swap]] - # no configuration +# # Read metrics about cpu usage +# [[inputs.cpu]] +# ## Whether to report per-cpu stats or not +# percpu = true +# ## Whether to report total system cpu stats or not +# totalcpu = true +# ## Comment this line if you want the raw CPU time metrics +# fielddrop = ["time_*"] + + +# # Read metrics about disk usage by mount point +# [[inputs.disk]] +# ## By default, telegraf gather stats for all mountpoints. +# ## Setting mountpoints will restrict the stats to the specified mountpoints. +# ## mount_points=["/"] +# +# ## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually +# ## present on /run, /var/run, /dev/shm or /dev). +# # ignore_fs = ["tmpfs", "devtmpfs"] + + +# # Read metrics about disk IO by device +# [[inputs.diskio]] +# ## By default, telegraf will gather stats for all devices including +# ## disk partitions. +# ## Setting devices will restrict the stats to the specified devices. +# ## devices = ["sda", "sdb"] +# ## Uncomment the following line if you do not need disk serial numbers. +# ## skip_serial_number = true + + +# # Read metrics about memory usage +# [[inputs.mem]] +# # no configuration + + +# # Read metrics about swap memory usage +# [[inputs.swap]] +# # no configuration diff --git a/internal/buffer/buffer.go b/internal/buffer/buffer.go index b7a05bf0337af..58cd1c3764d7e 100644 --- a/internal/buffer/buffer.go +++ b/internal/buffer/buffer.go @@ -1,6 +1,8 @@ package buffer import ( + "sync" + "github.com/influxdata/telegraf" ) @@ -11,6 +13,8 @@ type Buffer struct { drops int // total metrics added total int + + mu sync.Mutex } // NewBuffer returns a Buffer @@ -61,11 +65,13 @@ func (b *Buffer) Add(metrics ...telegraf.Metric) { // the batch will be of maximum length batchSize. It can be less than batchSize, // if the length of Buffer is less than batchSize. func (b *Buffer) Batch(batchSize int) []telegraf.Metric { + b.mu.Lock() n := min(len(b.buf), batchSize) out := make([]telegraf.Metric, n) for i := 0; i < n; i++ { out[i] = <-b.buf } + b.mu.Unlock() return out } diff --git a/internal/config/config.go b/internal/config/config.go index 30e62789023ec..2c2199dacb00e 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -11,15 +11,18 @@ import ( "regexp" "runtime" "sort" + "strconv" "strings" "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal/models" + "github.com/influxdata/telegraf/plugins/aggregators" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/plugins/processors" "github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/config" @@ -47,9 +50,12 @@ type Config struct { InputFilters []string OutputFilters []string - Agent *AgentConfig - Inputs []*models.RunningInput - Outputs []*models.RunningOutput + Agent *AgentConfig + Inputs []*models.RunningInput + Outputs []*models.RunningOutput + Aggregators []*models.RunningAggregator + // Processors have a slice wrapper type because they need to be sorted + Processors models.RunningProcessors } func NewConfig() *Config { @@ -64,6 +70,7 @@ func NewConfig() *Config { Tags: make(map[string]string), Inputs: make([]*models.RunningInput, 0), Outputs: make([]*models.RunningOutput, 0), + Processors: make([]*models.RunningProcessor, 0), InputFilters: make([]string, 0), OutputFilters: make([]string, 0), } @@ -125,6 +132,9 @@ type AgentConfig struct { // Debug is the option for running in debug mode Debug bool + // Logfile specifies the file to send logs to + Logfile string + // Quiet is the option for running in quiet mode Quiet bool Hostname string @@ -135,7 +145,7 @@ type AgentConfig struct { func (c *Config) InputNames() []string { var name []string for _, input := range c.Inputs { - name = append(name, input.Name) + name = append(name, input.Name()) } return name } @@ -195,12 +205,15 @@ var header = `# Telegraf Configuration ## ie, if interval="10s" then always collect on :00, :10, :20, etc. round_interval = true - ## Telegraf will send metrics to outputs in batches of at - ## most metric_batch_size metrics. + ## Telegraf will send metrics to outputs in batches of at most + ## metric_batch_size metrics. + ## This controls the size of writes that Telegraf sends to output plugins. metric_batch_size = 1000 + ## For failed writes, telegraf will cache metric_buffer_limit metrics for each ## output, and will flush this buffer on a successful write. Oldest metrics ## are dropped first when this buffer fills. + ## This buffer only fills when writes fail to output plugin(s). metric_buffer_limit = 10000 ## Collection jitter is used to jitter the collection by a random amount. @@ -222,10 +235,15 @@ var header = `# Telegraf Configuration ## Precision will NOT be used for service inputs, such as logparser and statsd. ## Valid values are "ns", "us" (or "µs"), "ms", "s". precision = "" - ## Run telegraf in debug mode + + ## Logging configuration: + ## Run telegraf with debug log messages. debug = false - ## Run telegraf in quiet mode + ## Run telegraf in quiet mode (error log messages only). quiet = false + ## Specify the log file name. The empty string means to log to stderr. + logfile = "" + ## Override default hostname, if empty use os.Hostname() hostname = "" ## If set to true, do no set the "host" tag in the telegraf agent. @@ -237,6 +255,20 @@ var header = `# Telegraf Configuration ############################################################################### ` +var processorHeader = ` + +############################################################################### +# PROCESSOR PLUGINS # +############################################################################### +` + +var aggregatorHeader = ` + +############################################################################### +# AGGREGATOR PLUGINS # +############################################################################### +` + var inputHeader = ` ############################################################################### @@ -252,9 +284,15 @@ var serviceInputHeader = ` ` // PrintSampleConfig prints the sample config -func PrintSampleConfig(inputFilters []string, outputFilters []string) { +func PrintSampleConfig( + inputFilters []string, + outputFilters []string, + aggregatorFilters []string, + processorFilters []string, +) { fmt.Printf(header) + // print output plugins if len(outputFilters) != 0 { printFilteredOutputs(outputFilters, false) } else { @@ -270,6 +308,33 @@ func PrintSampleConfig(inputFilters []string, outputFilters []string) { printFilteredOutputs(pnames, true) } + // print processor plugins + fmt.Printf(processorHeader) + if len(processorFilters) != 0 { + printFilteredProcessors(processorFilters, false) + } else { + pnames := []string{} + for pname := range processors.Processors { + pnames = append(pnames, pname) + } + sort.Strings(pnames) + printFilteredProcessors(pnames, true) + } + + // pring aggregator plugins + fmt.Printf(aggregatorHeader) + if len(aggregatorFilters) != 0 { + printFilteredAggregators(aggregatorFilters, false) + } else { + pnames := []string{} + for pname := range aggregators.Aggregators { + pnames = append(pnames, pname) + } + sort.Strings(pnames) + printFilteredAggregators(pnames, true) + } + + // print input plugins fmt.Printf(inputHeader) if len(inputFilters) != 0 { printFilteredInputs(inputFilters, false) @@ -287,6 +352,42 @@ func PrintSampleConfig(inputFilters []string, outputFilters []string) { } } +func printFilteredProcessors(processorFilters []string, commented bool) { + // Filter processors + var pnames []string + for pname := range processors.Processors { + if sliceContains(pname, processorFilters) { + pnames = append(pnames, pname) + } + } + sort.Strings(pnames) + + // Print Outputs + for _, pname := range pnames { + creator := processors.Processors[pname] + output := creator() + printConfig(pname, output, "processors", commented) + } +} + +func printFilteredAggregators(aggregatorFilters []string, commented bool) { + // Filter outputs + var anames []string + for aname := range aggregators.Aggregators { + if sliceContains(aname, aggregatorFilters) { + anames = append(anames, aname) + } + } + sort.Strings(anames) + + // Print Outputs + for _, aname := range anames { + creator := aggregators.Aggregators[aname] + output := creator() + printConfig(aname, output, "aggregators", commented) + } +} + func printFilteredInputs(inputFilters []string, commented bool) { // Filter inputs var pnames []string @@ -404,24 +505,21 @@ func PrintOutputConfig(name string) error { } func (c *Config) LoadDirectory(path string) error { - directoryEntries, err := ioutil.ReadDir(path) - if err != nil { - return err - } - for _, entry := range directoryEntries { - if entry.IsDir() { - continue + walkfn := func(thispath string, info os.FileInfo, _ error) error { + if info.IsDir() { + return nil } - name := entry.Name() + name := info.Name() if len(name) < 6 || name[len(name)-5:] != ".conf" { - continue + return nil } - err := c.LoadConfig(filepath.Join(path, name)) + err := c.LoadConfig(thispath) if err != nil { return err } + return nil } - return nil + return filepath.Walk(path, walkfn) } // Try to find a default config file at these locations (in order): @@ -438,7 +536,7 @@ func getDefaultConfigPath() (string, error) { } for _, path := range []string{envfile, homefile, etcfile} { if _, err := os.Stat(path); err == nil { - log.Printf("Using config file: %s", path) + log.Printf("I! Using config file: %s", path) return path, nil } } @@ -469,7 +567,7 @@ func (c *Config) LoadConfig(path string) error { return fmt.Errorf("%s: invalid configuration", path) } if err = config.UnmarshalTable(subTable, c.Tags); err != nil { - log.Printf("Could not parse [global_tags] config\n") + log.Printf("E! Could not parse [global_tags] config\n") return fmt.Errorf("Error parsing %s, %s", path, err) } } @@ -482,7 +580,7 @@ func (c *Config) LoadConfig(path string) error { return fmt.Errorf("%s: invalid configuration", path) } if err = config.UnmarshalTable(subTable, c.Agent); err != nil { - log.Printf("Could not parse [agent] config\n") + log.Printf("E! Could not parse [agent] config\n") return fmt.Errorf("Error parsing %s, %s", path, err) } } @@ -499,6 +597,7 @@ func (c *Config) LoadConfig(path string) error { case "outputs": for pluginName, pluginVal := range subTable.Fields { switch pluginSubTable := pluginVal.(type) { + // legacy [outputs.influxdb] support case *ast.Table: if err = c.addOutput(pluginName, pluginSubTable); err != nil { return fmt.Errorf("Error parsing %s, %s", path, err) @@ -517,6 +616,7 @@ func (c *Config) LoadConfig(path string) error { case "inputs", "plugins": for pluginName, pluginVal := range subTable.Fields { switch pluginSubTable := pluginVal.(type) { + // legacy [inputs.cpu] support case *ast.Table: if err = c.addInput(pluginName, pluginSubTable); err != nil { return fmt.Errorf("Error parsing %s, %s", path, err) @@ -532,6 +632,34 @@ func (c *Config) LoadConfig(path string) error { pluginName, path) } } + case "processors": + for pluginName, pluginVal := range subTable.Fields { + switch pluginSubTable := pluginVal.(type) { + case []*ast.Table: + for _, t := range pluginSubTable { + if err = c.addProcessor(pluginName, t); err != nil { + return fmt.Errorf("Error parsing %s, %s", path, err) + } + } + default: + return fmt.Errorf("Unsupported config format: %s, file %s", + pluginName, path) + } + } + case "aggregators": + for pluginName, pluginVal := range subTable.Fields { + switch pluginSubTable := pluginVal.(type) { + case []*ast.Table: + for _, t := range pluginSubTable { + if err = c.addAggregator(pluginName, t); err != nil { + return fmt.Errorf("Error parsing %s, %s", path, err) + } + } + default: + return fmt.Errorf("Unsupported config format: %s, file %s", + pluginName, path) + } + } // Assume it's an input input for legacy config file support if no other // identifiers are present default: @@ -540,6 +668,10 @@ func (c *Config) LoadConfig(path string) error { } } } + + if len(c.Processors) > 1 { + sort.Sort(c.Processors) + } return nil } @@ -572,6 +704,52 @@ func parseFile(fpath string) (*ast.Table, error) { return toml.Parse(contents) } +func (c *Config) addAggregator(name string, table *ast.Table) error { + creator, ok := aggregators.Aggregators[name] + if !ok { + return fmt.Errorf("Undefined but requested aggregator: %s", name) + } + aggregator := creator() + + conf, err := buildAggregator(name, table) + if err != nil { + return err + } + + if err := config.UnmarshalTable(table, aggregator); err != nil { + return err + } + + c.Aggregators = append(c.Aggregators, models.NewRunningAggregator(aggregator, conf)) + return nil +} + +func (c *Config) addProcessor(name string, table *ast.Table) error { + creator, ok := processors.Processors[name] + if !ok { + return fmt.Errorf("Undefined but requested processor: %s", name) + } + processor := creator() + + processorConfig, err := buildProcessor(name, table) + if err != nil { + return err + } + + if err := config.UnmarshalTable(table, processor); err != nil { + return err + } + + rf := &models.RunningProcessor{ + Name: name, + Processor: processor, + Config: processorConfig, + } + + c.Processors = append(c.Processors, rf) + return nil +} + func (c *Config) addOutput(name string, table *ast.Table) error { if len(c.OutputFilters) > 0 && !sliceContains(name, c.OutputFilters) { return nil @@ -644,7 +822,6 @@ func (c *Config) addInput(name string, table *ast.Table) error { } rp := &models.RunningInput{ - Name: name, Input: input, Config: pluginConfig, } @@ -652,6 +829,144 @@ func (c *Config) addInput(name string, table *ast.Table) error { return nil } +// buildAggregator parses Aggregator specific items from the ast.Table, +// builds the filter and returns a +// models.AggregatorConfig to be inserted into models.RunningAggregator +func buildAggregator(name string, tbl *ast.Table) (*models.AggregatorConfig, error) { + unsupportedFields := []string{"tagexclude", "taginclude"} + for _, field := range unsupportedFields { + if _, ok := tbl.Fields[field]; ok { + return nil, fmt.Errorf("%s is not supported for aggregator plugins (%s).", + field, name) + } + } + + conf := &models.AggregatorConfig{ + Name: name, + Delay: time.Millisecond * 100, + Period: time.Second * 30, + } + + if node, ok := tbl.Fields["period"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + dur, err := time.ParseDuration(str.Value) + if err != nil { + return nil, err + } + + conf.Period = dur + } + } + } + + if node, ok := tbl.Fields["delay"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + dur, err := time.ParseDuration(str.Value) + if err != nil { + return nil, err + } + + conf.Delay = dur + } + } + } + + if node, ok := tbl.Fields["drop_original"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if b, ok := kv.Value.(*ast.Boolean); ok { + var err error + conf.DropOriginal, err = strconv.ParseBool(b.Value) + if err != nil { + log.Printf("Error parsing boolean value for %s: %s\n", name, err) + } + } + } + } + + if node, ok := tbl.Fields["name_prefix"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + conf.MeasurementPrefix = str.Value + } + } + } + + if node, ok := tbl.Fields["name_suffix"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + conf.MeasurementSuffix = str.Value + } + } + } + + if node, ok := tbl.Fields["name_override"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + conf.NameOverride = str.Value + } + } + } + + conf.Tags = make(map[string]string) + if node, ok := tbl.Fields["tags"]; ok { + if subtbl, ok := node.(*ast.Table); ok { + if err := config.UnmarshalTable(subtbl, conf.Tags); err != nil { + log.Printf("Could not parse tags for input %s\n", name) + } + } + } + + delete(tbl.Fields, "period") + delete(tbl.Fields, "delay") + delete(tbl.Fields, "drop_original") + delete(tbl.Fields, "name_prefix") + delete(tbl.Fields, "name_suffix") + delete(tbl.Fields, "name_override") + delete(tbl.Fields, "tags") + var err error + conf.Filter, err = buildFilter(tbl) + if err != nil { + return conf, err + } + return conf, nil +} + +// buildProcessor parses Processor specific items from the ast.Table, +// builds the filter and returns a +// models.ProcessorConfig to be inserted into models.RunningProcessor +func buildProcessor(name string, tbl *ast.Table) (*models.ProcessorConfig, error) { + conf := &models.ProcessorConfig{Name: name} + unsupportedFields := []string{"tagexclude", "taginclude", "fielddrop", "fieldpass"} + for _, field := range unsupportedFields { + if _, ok := tbl.Fields[field]; ok { + return nil, fmt.Errorf("%s is not supported for processor plugins (%s).", + field, name) + } + } + + if node, ok := tbl.Fields["order"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if b, ok := kv.Value.(*ast.Integer); ok { + var err error + conf.Order, err = strconv.ParseInt(b.Value, 10, 64) + if err != nil { + log.Printf("Error parsing int value for %s: %s\n", name, err) + } + } + } + } + + delete(tbl.Fields, "order") + var err error + conf.Filter, err = buildFilter(tbl) + if err != nil { + return conf, err + } + return conf, nil +} + // buildFilter builds a Filter // (tagpass/tagdrop/namepass/namedrop/fieldpass/fielddrop) to // be inserted into the models.OutputConfig/models.InputConfig @@ -835,7 +1150,7 @@ func buildInput(name string, tbl *ast.Table) (*models.InputConfig, error) { if node, ok := tbl.Fields["tags"]; ok { if subtbl, ok := node.(*ast.Table); ok { if err := config.UnmarshalTable(subtbl, cp.Tags); err != nil { - log.Printf("Could not parse tags for input %s\n", name) + log.Printf("E! Could not parse tags for input %s\n", name) } } } diff --git a/internal/globpath/globpath.go b/internal/globpath/globpath.go index 6755e69b271f3..22ae92721d415 100644 --- a/internal/globpath/globpath.go +++ b/internal/globpath/globpath.go @@ -12,21 +12,23 @@ import ( var sepStr = fmt.Sprintf("%v", string(os.PathSeparator)) type GlobPath struct { - path string - hasMeta bool - g glob.Glob - root string + path string + hasMeta bool + hasSuperMeta bool + g glob.Glob + root string } func Compile(path string) (*GlobPath, error) { out := GlobPath{ - hasMeta: hasMeta(path), - path: path, + hasMeta: hasMeta(path), + hasSuperMeta: hasSuperMeta(path), + path: path, } // if there are no glob meta characters in the path, don't bother compiling // a glob object or finding the root directory. (see short-circuit in Match) - if !out.hasMeta { + if !out.hasMeta || !out.hasSuperMeta { return &out, nil } @@ -48,6 +50,17 @@ func (g *GlobPath) Match() map[string]os.FileInfo { } return out } + if !g.hasSuperMeta { + out := make(map[string]os.FileInfo) + files, _ := filepath.Glob(g.path) + for _, file := range files { + info, err := os.Stat(file) + if !os.IsNotExist(err) { + out[file] = info + } + } + return out + } return walkFilePath(g.root, g.g) } @@ -96,3 +109,8 @@ func findRootDir(path string) string { func hasMeta(path string) bool { return strings.IndexAny(path, "*?[") >= 0 } + +// hasSuperMeta reports whether path contains any super magic glob characters (**). +func hasSuperMeta(path string) bool { + return strings.Index(path, "**") >= 0 +} diff --git a/internal/internal.go b/internal/internal.go index 58a1200e06929..28b37c09f2f1b 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -35,12 +35,21 @@ type Duration struct { // UnmarshalTOML parses the duration from the TOML config file func (d *Duration) UnmarshalTOML(b []byte) error { var err error - // Parse string duration, ie, "1s" - d.Duration, err = time.ParseDuration(string(b[1 : len(b)-1])) + + // see if we can straight convert it + d.Duration, err = time.ParseDuration(string(b)) if err == nil { return nil } + // Parse string duration, ie, "1s" + if uq, err := strconv.Unquote(string(b)); err == nil && len(uq) > 0 { + d.Duration, err = time.ParseDuration(uq) + if err == nil { + return nil + } + } + // First try parsing as integer seconds sI, err := strconv.ParseInt(string(b), 10, 64) if err == nil { @@ -198,7 +207,7 @@ func WaitTimeout(c *exec.Cmd, timeout time.Duration) error { return err case <-timer.C: if err := c.Process.Kill(); err != nil { - log.Printf("FATAL error killing process: %s", err) + log.Printf("E! FATAL error killing process: %s", err) return err } // wait for the command to return after killing it diff --git a/internal/internal_test.go b/internal/internal_test.go index c18991c2d535f..0d98218575be8 100644 --- a/internal/internal_test.go +++ b/internal/internal_test.go @@ -131,3 +131,22 @@ func TestRandomSleep(t *testing.T) { elapsed = time.Since(s) assert.True(t, elapsed < time.Millisecond*150) } + +func TestDuration(t *testing.T) { + var d Duration + + d.UnmarshalTOML([]byte(`"1s"`)) + assert.Equal(t, time.Second, d.Duration) + + d = Duration{} + d.UnmarshalTOML([]byte(`1s`)) + assert.Equal(t, time.Second, d.Duration) + + d = Duration{} + d.UnmarshalTOML([]byte(`10`)) + assert.Equal(t, 10*time.Second, d.Duration) + + d = Duration{} + d.UnmarshalTOML([]byte(`1.5`)) + assert.Equal(t, time.Second, d.Duration) +} diff --git a/internal/models/filter.go b/internal/models/filter.go index b87c595012ab5..8e080f4a85d0f 100644 --- a/internal/models/filter.go +++ b/internal/models/filter.go @@ -96,7 +96,7 @@ func (f *Filter) Compile() error { // Apply applies the filter to the given measurement name, fields map, and // tags map. It will return false if the metric should be "filtered out", and // true if the metric should "pass". -// It will modify tags in-place if they need to be deleted. +// It will modify tags & fields in-place if they need to be deleted. func (f *Filter) Apply( measurement string, fields map[string]interface{}, diff --git a/internal/models/makemetric.go b/internal/models/makemetric.go new file mode 100644 index 0000000000000..71427607c187c --- /dev/null +++ b/internal/models/makemetric.go @@ -0,0 +1,154 @@ +package models + +import ( + "log" + "math" + "time" + + "github.com/influxdata/telegraf" +) + +// makemetric is used by both RunningAggregator & RunningInput +// to make metrics. +// nameOverride: override the name of the measurement being made. +// namePrefix: add this prefix to each measurement name. +// nameSuffix: add this suffix to each measurement name. +// pluginTags: these are tags that are specific to this plugin. +// daemonTags: these are daemon-wide global tags, and get applied after pluginTags. +// filter: this is a filter to apply to each metric being made. +// applyFilter: if false, the above filter is not applied to each metric. +// This is used by Aggregators, because aggregators use filters +// on incoming metrics instead of on created metrics. +// TODO refactor this to not have such a huge func signature. +func makemetric( + measurement string, + fields map[string]interface{}, + tags map[string]string, + nameOverride string, + namePrefix string, + nameSuffix string, + pluginTags map[string]string, + daemonTags map[string]string, + filter Filter, + applyFilter bool, + debug bool, + mType telegraf.ValueType, + t time.Time, +) telegraf.Metric { + if len(fields) == 0 || len(measurement) == 0 { + return nil + } + if tags == nil { + tags = make(map[string]string) + } + + // Override measurement name if set + if len(nameOverride) != 0 { + measurement = nameOverride + } + // Apply measurement prefix and suffix if set + if len(namePrefix) != 0 { + measurement = namePrefix + measurement + } + if len(nameSuffix) != 0 { + measurement = measurement + nameSuffix + } + + // Apply plugin-wide tags if set + for k, v := range pluginTags { + if _, ok := tags[k]; !ok { + tags[k] = v + } + } + // Apply daemon-wide tags if set + for k, v := range daemonTags { + if _, ok := tags[k]; !ok { + tags[k] = v + } + } + + // Apply the metric filter(s) + // for aggregators, the filter does not get applied when the metric is made. + // instead, the filter is applied to metric incoming into the plugin. + // ie, it gets applied in the RunningAggregator.Apply function. + if applyFilter { + if ok := filter.Apply(measurement, fields, tags); !ok { + return nil + } + } + + for k, v := range fields { + // Validate uint64 and float64 fields + // convert all int & uint types to int64 + switch val := v.(type) { + case nil: + // delete nil fields + delete(fields, k) + case uint: + fields[k] = int64(val) + continue + case uint8: + fields[k] = int64(val) + continue + case uint16: + fields[k] = int64(val) + continue + case uint32: + fields[k] = int64(val) + continue + case int: + fields[k] = int64(val) + continue + case int8: + fields[k] = int64(val) + continue + case int16: + fields[k] = int64(val) + continue + case int32: + fields[k] = int64(val) + continue + case uint64: + // InfluxDB does not support writing uint64 + if val < uint64(9223372036854775808) { + fields[k] = int64(val) + } else { + fields[k] = int64(9223372036854775807) + } + continue + case float32: + fields[k] = float64(val) + continue + case float64: + // NaNs are invalid values in influxdb, skip measurement + if math.IsNaN(val) || math.IsInf(val, 0) { + if debug { + log.Printf("Measurement [%s] field [%s] has a NaN or Inf "+ + "field, skipping", + measurement, k) + } + delete(fields, k) + continue + } + default: + fields[k] = v + } + } + + var m telegraf.Metric + var err error + switch mType { + case telegraf.Counter: + m, err = telegraf.NewCounterMetric(measurement, tags, fields, t) + case telegraf.Gauge: + m, err = telegraf.NewGaugeMetric(measurement, tags, fields, t) + default: + m, err = telegraf.NewMetric(measurement, tags, fields, t) + } + if err != nil { + log.Printf("Error adding point [%s]: %s\n", measurement, err.Error()) + return nil + } + + return m +} diff --git a/internal/models/running_aggregator.go b/internal/models/running_aggregator.go new file mode 100644 index 0000000000000..5c7640ba68258 --- /dev/null +++ b/internal/models/running_aggregator.go @@ -0,0 +1,164 @@ +package models + +import ( + "time" + + "github.com/influxdata/telegraf" +) + +type RunningAggregator struct { + a telegraf.Aggregator + Config *AggregatorConfig + + metrics chan telegraf.Metric + + periodStart time.Time + periodEnd time.Time +} + +func NewRunningAggregator( + a telegraf.Aggregator, + conf *AggregatorConfig, +) *RunningAggregator { + return &RunningAggregator{ + a: a, + Config: conf, + metrics: make(chan telegraf.Metric, 100), + } +} + +// AggregatorConfig containing configuration parameters for the running +// aggregator plugin. +type AggregatorConfig struct { + Name string + + DropOriginal bool + NameOverride string + MeasurementPrefix string + MeasurementSuffix string + Tags map[string]string + Filter Filter + + Period time.Duration + Delay time.Duration +} + +func (r *RunningAggregator) Name() string { + return "aggregators." + r.Config.Name +} + +func (r *RunningAggregator) MakeMetric( + measurement string, + fields map[string]interface{}, + tags map[string]string, + mType telegraf.ValueType, + t time.Time, +) telegraf.Metric { + m := makemetric( + measurement, + fields, + tags, + r.Config.NameOverride, + r.Config.MeasurementPrefix, + r.Config.MeasurementSuffix, + r.Config.Tags, + nil, + r.Config.Filter, + false, + false, + mType, + t, + ) + + m.SetAggregate(true) + + return m +} + +// Add applies the given metric to the aggregator. +// Before applying to the plugin, it will run any defined filters on the metric. +// Apply returns true if the original metric should be dropped. +func (r *RunningAggregator) Add(in telegraf.Metric) bool { + if r.Config.Filter.IsActive() { + // check if the aggregator should apply this metric + name := in.Name() + fields := in.Fields() + tags := in.Tags() + t := in.Time() + if ok := r.Config.Filter.Apply(name, fields, tags); !ok { + // aggregator should not apply this metric + return false + } + + in, _ = telegraf.NewMetric(name, tags, fields, t) + } + + r.metrics <- in + return r.Config.DropOriginal +} +func (r *RunningAggregator) add(in telegraf.Metric) { + r.a.Add(in) +} + +func (r *RunningAggregator) push(acc telegraf.Accumulator) { + r.a.Push(acc) +} + +func (r *RunningAggregator) reset() { + r.a.Reset() +} + +// Run runs the running aggregator, listens for incoming metrics, and waits +// for period ticks to tell it when to push and reset the aggregator. +func (r *RunningAggregator) Run( + acc telegraf.Accumulator, + shutdown chan struct{}, +) { + // The start of the period is truncated to the nearest second. + // + // Every metric then gets it's timestamp checked and is dropped if it + // is not within: + // + // start < t < end + truncation + delay + // + // So if we start at now = 00:00.2 with a 10s period and 0.3s delay: + // now = 00:00.2 + // start = 00:00 + // truncation = 00:00.2 + // end = 00:10 + // 1st interval: 00:00 - 00:10.5 + // 2nd interval: 00:10 - 00:20.5 + // etc. + // + now := time.Now() + r.periodStart = now.Truncate(time.Second) + truncation := now.Sub(r.periodStart) + r.periodEnd = r.periodStart.Add(r.Config.Period) + time.Sleep(r.Config.Delay) + periodT := time.NewTicker(r.Config.Period) + defer periodT.Stop() + + for { + select { + case <-shutdown: + if len(r.metrics) > 0 { + // wait until metrics are flushed before exiting + continue + } + return + case m := <-r.metrics: + if m.Time().Before(r.periodStart) || + m.Time().After(r.periodEnd.Add(truncation).Add(r.Config.Delay)) { + // the metric is outside the current aggregation period, so + // skip it. + continue + } + r.add(m) + case <-periodT.C: + r.periodStart = r.periodEnd + r.periodEnd = r.periodStart.Add(r.Config.Period) + r.push(acc) + r.reset() + } + } +} diff --git a/internal/models/running_aggregator_test.go b/internal/models/running_aggregator_test.go new file mode 100644 index 0000000000000..834f7d1e0118f --- /dev/null +++ b/internal/models/running_aggregator_test.go @@ -0,0 +1,256 @@ +package models + +import ( + "fmt" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + + "github.com/stretchr/testify/assert" +) + +func TestAdd(t *testing.T) { + a := &TestAggregator{} + ra := NewRunningAggregator(a, &AggregatorConfig{ + Name: "TestRunningAggregator", + Filter: Filter{ + NamePass: []string{"*"}, + }, + Period: time.Millisecond * 500, + }) + assert.NoError(t, ra.Config.Filter.Compile()) + acc := testutil.Accumulator{} + go ra.Run(&acc, make(chan struct{})) + + m := ra.MakeMetric( + "RITest", + map[string]interface{}{"value": int(101)}, + map[string]string{}, + telegraf.Untyped, + time.Now().Add(time.Millisecond*150), + ) + assert.False(t, ra.Add(m)) + + for { + time.Sleep(time.Millisecond) + if atomic.LoadInt64(&a.sum) > 0 { + break + } + } + assert.Equal(t, int64(101), atomic.LoadInt64(&a.sum)) +} + +func TestAddMetricsOutsideCurrentPeriod(t *testing.T) { + a := &TestAggregator{} + ra := NewRunningAggregator(a, &AggregatorConfig{ + Name: "TestRunningAggregator", + Filter: Filter{ + NamePass: []string{"*"}, + }, + Period: time.Millisecond * 500, + }) + assert.NoError(t, ra.Config.Filter.Compile()) + acc := testutil.Accumulator{} + go ra.Run(&acc, make(chan struct{})) + + // metric before current period + m := ra.MakeMetric( + "RITest", + map[string]interface{}{"value": int(101)}, + map[string]string{}, + telegraf.Untyped, + time.Now().Add(-time.Hour), + ) + assert.False(t, ra.Add(m)) + + // metric after current period + m = ra.MakeMetric( + "RITest", + map[string]interface{}{"value": int(101)}, + map[string]string{}, + telegraf.Untyped, + time.Now().Add(time.Hour), + ) + assert.False(t, ra.Add(m)) + + // "now" metric + m = ra.MakeMetric( + "RITest", + map[string]interface{}{"value": int(101)}, + map[string]string{}, + telegraf.Untyped, + time.Now().Add(time.Millisecond*50), + ) + assert.False(t, ra.Add(m)) + + for { + time.Sleep(time.Millisecond) + if atomic.LoadInt64(&a.sum) > 0 { + break + } + } + assert.Equal(t, int64(101), atomic.LoadInt64(&a.sum)) +} + +func TestAddAndPushOnePeriod(t *testing.T) { + a := &TestAggregator{} + ra := NewRunningAggregator(a, &AggregatorConfig{ + Name: "TestRunningAggregator", + Filter: Filter{ + NamePass: []string{"*"}, + }, + Period: time.Millisecond * 500, + }) + assert.NoError(t, ra.Config.Filter.Compile()) + acc := testutil.Accumulator{} + shutdown := make(chan struct{}) + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + ra.Run(&acc, shutdown) + }() + + m := ra.MakeMetric( + "RITest", + map[string]interface{}{"value": int(101)}, + map[string]string{}, + telegraf.Untyped, + time.Now().Add(time.Millisecond*100), + ) + assert.False(t, ra.Add(m)) + + for { + time.Sleep(time.Millisecond) + if acc.NMetrics() > 0 { + break + } + } + acc.AssertContainsFields(t, "TestMetric", map[string]interface{}{"sum": int64(101)}) + + close(shutdown) + wg.Wait() +} + +func TestAddDropOriginal(t *testing.T) { + ra := NewRunningAggregator(&TestAggregator{}, &AggregatorConfig{ + Name: "TestRunningAggregator", + Filter: Filter{ + NamePass: []string{"RI*"}, + }, + DropOriginal: true, + }) + assert.NoError(t, ra.Config.Filter.Compile()) + + m := ra.MakeMetric( + "RITest", + map[string]interface{}{"value": int(101)}, + map[string]string{}, + telegraf.Untyped, + time.Now(), + ) + assert.True(t, ra.Add(m)) + + // this metric name doesn't match the filter, so Add will return false + m2 := ra.MakeMetric( + "foobar", + map[string]interface{}{"value": int(101)}, + map[string]string{}, + telegraf.Untyped, + time.Now(), + ) + assert.False(t, ra.Add(m2)) +} + +// make an untyped, counter, & gauge metric +func TestMakeMetricA(t *testing.T) { + now := time.Now() + ra := NewRunningAggregator(&TestAggregator{}, &AggregatorConfig{ + Name: "TestRunningAggregator", + }) + assert.Equal(t, "aggregators.TestRunningAggregator", ra.Name()) + + m := ra.MakeMetric( + "RITest", + map[string]interface{}{"value": int(101)}, + map[string]string{}, + telegraf.Untyped, + now, + ) + assert.Equal( + t, + m.String(), + fmt.Sprintf("RITest value=101i %d", now.UnixNano()), + ) + assert.Equal( + t, + m.Type(), + telegraf.Untyped, + ) + + m = ra.MakeMetric( + "RITest", + map[string]interface{}{"value": int(101)}, + map[string]string{}, + telegraf.Counter, + now, + ) + assert.Equal( + t, + m.String(), + fmt.Sprintf("RITest value=101i %d", now.UnixNano()), + ) + assert.Equal( + t, + m.Type(), + telegraf.Counter, + ) + + m = ra.MakeMetric( + "RITest", + map[string]interface{}{"value": int(101)}, + map[string]string{}, + telegraf.Gauge, + now, + ) + assert.Equal( + t, + m.String(), + fmt.Sprintf("RITest value=101i %d", now.UnixNano()), + ) + assert.Equal( + t, + m.Type(), + telegraf.Gauge, + ) +} + +type TestAggregator struct { + sum int64 +} + +func (t *TestAggregator) Description() string { return "" } +func (t *TestAggregator) SampleConfig() string { return "" } +func (t *TestAggregator) Reset() { + atomic.StoreInt64(&t.sum, 0) +} + +func (t *TestAggregator) Push(acc telegraf.Accumulator) { + acc.AddFields("TestMetric", + map[string]interface{}{"sum": t.sum}, + map[string]string{}, + ) +} + +func (t *TestAggregator) Add(in telegraf.Metric) { + for _, v := range in.Fields() { + if vi, ok := v.(int64); ok { + atomic.AddInt64(&t.sum, vi) + } + } +} diff --git a/internal/models/running_input.go b/internal/models/running_input.go index 445c5ee96a781..558af3e5c0d12 100644 --- a/internal/models/running_input.go +++ b/internal/models/running_input.go @@ -1,15 +1,19 @@ package models import ( + "fmt" "time" "github.com/influxdata/telegraf" ) type RunningInput struct { - Name string Input telegraf.Input Config *InputConfig + + trace bool + debug bool + defaultTags map[string]string } // InputConfig containing a name, interval, and filter @@ -22,3 +26,59 @@ type InputConfig struct { Filter Filter Interval time.Duration } + +func (r *RunningInput) Name() string { + return "inputs." + r.Config.Name +} + +// MakeMetric either returns a metric, or returns nil if the metric doesn't +// need to be created (because of filtering, an error, etc.) +func (r *RunningInput) MakeMetric( + measurement string, + fields map[string]interface{}, + tags map[string]string, + mType telegraf.ValueType, + t time.Time, +) telegraf.Metric { + m := makemetric( + measurement, + fields, + tags, + r.Config.NameOverride, + r.Config.MeasurementPrefix, + r.Config.MeasurementSuffix, + r.Config.Tags, + r.defaultTags, + r.Config.Filter, + true, + r.debug, + mType, + t, + ) + + if r.trace && m != nil { + fmt.Println("> " + m.String()) + } + + return m +} + +func (r *RunningInput) Debug() bool { + return r.debug +} + +func (r *RunningInput) SetDebug(debug bool) { + r.debug = debug +} + +func (r *RunningInput) Trace() bool { + return r.trace +} + +func (r *RunningInput) SetTrace(trace bool) { + r.trace = trace +} + +func (r *RunningInput) SetDefaultTags(tags map[string]string) { + r.defaultTags = tags +} diff --git a/internal/models/running_input_test.go b/internal/models/running_input_test.go new file mode 100644 index 0000000000000..3d3b65b953151 --- /dev/null +++ b/internal/models/running_input_test.go @@ -0,0 +1,352 @@ +package models + +import ( + "fmt" + "math" + "testing" + "time" + + "github.com/influxdata/telegraf" + + "github.com/stretchr/testify/assert" +) + +func TestMakeMetricNoFields(t *testing.T) { + now := time.Now() + ri := RunningInput{ + Config: &InputConfig{ + Name: "TestRunningInput", + }, + } + + m := ri.MakeMetric( + "RITest", + map[string]interface{}{}, + map[string]string{}, + telegraf.Untyped, + now, + ) + assert.Nil(t, m) +} + +// nil fields should get dropped +func TestMakeMetricNilFields(t *testing.T) { + now := time.Now() + ri := RunningInput{ + Config: &InputConfig{ + Name: "TestRunningInput", + }, + } + + m := ri.MakeMetric( + "RITest", + map[string]interface{}{ + "value": int(101), + "nil": nil, + }, + map[string]string{}, + telegraf.Untyped, + now, + ) + assert.Equal( + t, + fmt.Sprintf("RITest value=101i %d", now.UnixNano()), + m.String(), + ) +} + +// make an untyped, counter, & gauge metric +func TestMakeMetric(t *testing.T) { + now := time.Now() + ri := RunningInput{ + Config: &InputConfig{ + Name: "TestRunningInput", + }, + } + ri.SetDebug(true) + assert.Equal(t, true, ri.Debug()) + ri.SetTrace(true) + assert.Equal(t, true, ri.Trace()) + assert.Equal(t, "inputs.TestRunningInput", ri.Name()) + + m := ri.MakeMetric( + "RITest", + map[string]interface{}{"value": int(101)}, + map[string]string{}, + telegraf.Untyped, + now, + ) + assert.Equal( + t, + m.String(), + fmt.Sprintf("RITest value=101i %d", now.UnixNano()), + ) + assert.Equal( + t, + m.Type(), + telegraf.Untyped, + ) + + m = ri.MakeMetric( + "RITest", + map[string]interface{}{"value": int(101)}, + map[string]string{}, + telegraf.Counter, + now, + ) + assert.Equal( + t, + m.String(), + fmt.Sprintf("RITest value=101i %d", now.UnixNano()), + ) + assert.Equal( + t, + m.Type(), + telegraf.Counter, + ) + + m = ri.MakeMetric( + "RITest", + map[string]interface{}{"value": int(101)}, + map[string]string{}, + telegraf.Gauge, + now, + ) + assert.Equal( + t, + m.String(), + fmt.Sprintf("RITest value=101i %d", now.UnixNano()), + ) + assert.Equal( + t, + m.Type(), + telegraf.Gauge, + ) +} + +func TestMakeMetricWithPluginTags(t *testing.T) { + now := time.Now() + ri := RunningInput{ + Config: &InputConfig{ + Name: "TestRunningInput", + Tags: map[string]string{ + "foo": "bar", + }, + }, + } + ri.SetDebug(true) + assert.Equal(t, true, ri.Debug()) + ri.SetTrace(true) + assert.Equal(t, true, ri.Trace()) + + m := ri.MakeMetric( + "RITest", + map[string]interface{}{"value": int(101)}, + nil, + telegraf.Untyped, + now, + ) + assert.Equal( + t, + m.String(), + fmt.Sprintf("RITest,foo=bar value=101i %d", now.UnixNano()), + ) +} + +func TestMakeMetricFilteredOut(t *testing.T) { + now := time.Now() + ri := RunningInput{ + Config: &InputConfig{ + Name: "TestRunningInput", + Tags: map[string]string{ + "foo": "bar", + }, + Filter: Filter{NamePass: []string{"foobar"}}, + }, + } + ri.SetDebug(true) + assert.Equal(t, true, ri.Debug()) + ri.SetTrace(true) + assert.Equal(t, true, ri.Trace()) + assert.NoError(t, ri.Config.Filter.Compile()) + + m := ri.MakeMetric( + "RITest", + map[string]interface{}{"value": int(101)}, + nil, + telegraf.Untyped, + now, + ) + assert.Nil(t, m) +} + +func TestMakeMetricWithDaemonTags(t *testing.T) { + now := time.Now() + ri := RunningInput{ + Config: &InputConfig{ + Name: "TestRunningInput", + }, + } + ri.SetDefaultTags(map[string]string{ + "foo": "bar", + }) + ri.SetDebug(true) + assert.Equal(t, true, ri.Debug()) + ri.SetTrace(true) + assert.Equal(t, true, ri.Trace()) + + m := ri.MakeMetric( + "RITest", + map[string]interface{}{"value": int(101)}, + map[string]string{}, + telegraf.Untyped, + now, + ) + assert.Equal( + t, + m.String(), + fmt.Sprintf("RITest,foo=bar value=101i %d", now.UnixNano()), + ) +} + +// make an untyped, counter, & gauge metric +func TestMakeMetricInfFields(t *testing.T) { + inf := math.Inf(1) + ninf := math.Inf(-1) + now := time.Now() + ri := RunningInput{ + Config: &InputConfig{ + Name: "TestRunningInput", + }, + } + ri.SetDebug(true) + assert.Equal(t, true, ri.Debug()) + ri.SetTrace(true) + assert.Equal(t, true, ri.Trace()) + + m := ri.MakeMetric( + "RITest", + map[string]interface{}{ + "value": int(101), + "inf": inf, + "ninf": ninf, + }, + map[string]string{}, + telegraf.Untyped, + now, + ) + assert.Equal( + t, + m.String(), + fmt.Sprintf("RITest value=101i %d", now.UnixNano()), + ) +} + +func TestMakeMetricAllFieldTypes(t *testing.T) { + now := time.Now() + ri := RunningInput{ + Config: &InputConfig{ + Name: "TestRunningInput", + }, + } + ri.SetDebug(true) + assert.Equal(t, true, ri.Debug()) + ri.SetTrace(true) + assert.Equal(t, true, ri.Trace()) + + m := ri.MakeMetric( + "RITest", + map[string]interface{}{ + "a": int(10), + "b": int8(10), + "c": int16(10), + "d": int32(10), + "e": uint(10), + "f": uint8(10), + "g": uint16(10), + "h": uint32(10), + "i": uint64(10), + "j": float32(10), + "k": uint64(9223372036854775810), + "l": "foobar", + "m": true, + }, + map[string]string{}, + telegraf.Untyped, + now, + ) + assert.Equal( + t, + fmt.Sprintf("RITest a=10i,b=10i,c=10i,d=10i,e=10i,f=10i,g=10i,h=10i,i=10i,j=10,k=9223372036854775807i,l=\"foobar\",m=true %d", now.UnixNano()), + m.String(), + ) +} + +func TestMakeMetricNameOverride(t *testing.T) { + now := time.Now() + ri := RunningInput{ + Config: &InputConfig{ + Name: "TestRunningInput", + NameOverride: "foobar", + }, + } + + m := ri.MakeMetric( + "RITest", + map[string]interface{}{"value": int(101)}, + map[string]string{}, + telegraf.Untyped, + now, + ) + assert.Equal( + t, + m.String(), + fmt.Sprintf("foobar value=101i %d", now.UnixNano()), + ) +} + +func TestMakeMetricNamePrefix(t *testing.T) { + now := time.Now() + ri := RunningInput{ + Config: &InputConfig{ + Name: "TestRunningInput", + MeasurementPrefix: "foobar_", + }, + } + + m := ri.MakeMetric( + "RITest", + map[string]interface{}{"value": int(101)}, + map[string]string{}, + telegraf.Untyped, + now, + ) + assert.Equal( + t, + m.String(), + fmt.Sprintf("foobar_RITest value=101i %d", now.UnixNano()), + ) +} + +func TestMakeMetricNameSuffix(t *testing.T) { + now := time.Now() + ri := RunningInput{ + Config: &InputConfig{ + Name: "TestRunningInput", + MeasurementSuffix: "_foobar", + }, + } + + m := ri.MakeMetric( + "RITest", + map[string]interface{}{"value": int(101)}, + map[string]string{}, + telegraf.Untyped, + now, + ) + assert.Equal( + t, + m.String(), + fmt.Sprintf("RITest_foobar value=101i %d", now.UnixNano()), + ) +} diff --git a/internal/models/running_output.go b/internal/models/running_output.go index c4de4afd95d7f..aa94178f74145 100644 --- a/internal/models/running_output.go +++ b/internal/models/running_output.go @@ -85,7 +85,7 @@ func (ro *RunningOutput) AddMetric(metric telegraf.Metric) { // Write writes all cached points to this output. func (ro *RunningOutput) Write() error { if !ro.Quiet { - log.Printf("Output [%s] buffer fullness: %d / %d metrics. "+ + log.Printf("I! Output [%s] buffer fullness: %d / %d metrics. "+ "Total gathered metrics: %d. Total dropped metrics: %d.", ro.Name, ro.failMetrics.Len()+ro.metrics.Len(), @@ -142,7 +142,7 @@ func (ro *RunningOutput) write(metrics []telegraf.Metric) error { elapsed := time.Since(start) if err == nil { if !ro.Quiet { - log.Printf("Output [%s] wrote batch of %d metrics in %s\n", + log.Printf("I! Output [%s] wrote batch of %d metrics in %s\n", ro.Name, len(metrics), elapsed) } } diff --git a/internal/models/running_output_test.go b/internal/models/running_output_test.go index a42d6fc7e6ff2..2bca79a067b01 100644 --- a/internal/models/running_output_test.go +++ b/internal/models/running_output_test.go @@ -132,7 +132,6 @@ func TestRunningOutput_PassFilter(t *testing.T) { func TestRunningOutput_TagIncludeNoMatch(t *testing.T) { conf := &OutputConfig{ Filter: Filter{ - TagInclude: []string{"nothing*"}, }, } @@ -154,7 +153,6 @@ func TestRunningOutput_TagIncludeNoMatch(t *testing.T) { func TestRunningOutput_TagExcludeMatch(t *testing.T) { conf := &OutputConfig{ Filter: Filter{ - TagExclude: []string{"tag*"}, }, } @@ -176,7 +174,6 @@ func TestRunningOutput_TagExcludeMatch(t *testing.T) { func TestRunningOutput_TagExcludeNoMatch(t *testing.T) { conf := &OutputConfig{ Filter: Filter{ - TagExclude: []string{"nothing*"}, }, } @@ -198,7 +195,6 @@ func TestRunningOutput_TagExcludeNoMatch(t *testing.T) { func TestRunningOutput_TagIncludeMatch(t *testing.T) { conf := &OutputConfig{ Filter: Filter{ - TagInclude: []string{"tag*"}, }, } diff --git a/internal/models/running_processor.go b/internal/models/running_processor.go new file mode 100644 index 0000000000000..600b619281576 --- /dev/null +++ b/internal/models/running_processor.go @@ -0,0 +1,44 @@ +package models + +import ( + "github.com/influxdata/telegraf" +) + +type RunningProcessor struct { + Name string + Processor telegraf.Processor + Config *ProcessorConfig +} + +type RunningProcessors []*RunningProcessor + +func (rp RunningProcessors) Len() int { return len(rp) } +func (rp RunningProcessors) Swap(i, j int) { rp[i], rp[j] = rp[j], rp[i] } +func (rp RunningProcessors) Less(i, j int) bool { return rp[i].Config.Order < rp[j].Config.Order } + +// FilterConfig containing a name and filter +type ProcessorConfig struct { + Name string + Order int64 + Filter Filter +} + +func (rp *RunningProcessor) Apply(in ...telegraf.Metric) []telegraf.Metric { + ret := []telegraf.Metric{} + + for _, metric := range in { + if rp.Config.Filter.IsActive() { + // check if the filter should be applied to this metric + if ok := rp.Config.Filter.Apply(metric.Name(), metric.Fields(), metric.Tags()); !ok { + // this means filter should not be applied + ret = append(ret, metric) + continue + } + } + // This metric should pass through the filter, so call the filter Apply + // function and append results to the output slice. + ret = append(ret, rp.Processor.Apply(metric)...) + } + + return ret +} diff --git a/internal/models/running_processor_test.go b/internal/models/running_processor_test.go new file mode 100644 index 0000000000000..8a691a9b8f54f --- /dev/null +++ b/internal/models/running_processor_test.go @@ -0,0 +1,117 @@ +package models + +import ( + "testing" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + + "github.com/stretchr/testify/assert" +) + +type TestProcessor struct { +} + +func (f *TestProcessor) SampleConfig() string { return "" } +func (f *TestProcessor) Description() string { return "" } + +// Apply renames: +// "foo" to "fuz" +// "bar" to "baz" +// And it also drops measurements named "dropme" +func (f *TestProcessor) Apply(in ...telegraf.Metric) []telegraf.Metric { + out := make([]telegraf.Metric, 0) + for _, m := range in { + switch m.Name() { + case "foo": + out = append(out, testutil.TestMetric(1, "fuz")) + case "bar": + out = append(out, testutil.TestMetric(1, "baz")) + case "dropme": + // drop the metric! + default: + out = append(out, m) + } + } + return out +} + +func NewTestRunningProcessor() *RunningProcessor { + out := &RunningProcessor{ + Name: "test", + Processor: &TestProcessor{}, + Config: &ProcessorConfig{Filter: Filter{}}, + } + return out +} + +func TestRunningProcessor(t *testing.T) { + inmetrics := []telegraf.Metric{ + testutil.TestMetric(1, "foo"), + testutil.TestMetric(1, "bar"), + testutil.TestMetric(1, "baz"), + } + + expectedNames := []string{ + "fuz", + "baz", + "baz", + } + rfp := NewTestRunningProcessor() + filteredMetrics := rfp.Apply(inmetrics...) + + actualNames := []string{ + filteredMetrics[0].Name(), + filteredMetrics[1].Name(), + filteredMetrics[2].Name(), + } + assert.Equal(t, expectedNames, actualNames) +} + +func TestRunningProcessor_WithNameDrop(t *testing.T) { + inmetrics := []telegraf.Metric{ + testutil.TestMetric(1, "foo"), + testutil.TestMetric(1, "bar"), + testutil.TestMetric(1, "baz"), + } + + expectedNames := []string{ + "foo", + "baz", + "baz", + } + rfp := NewTestRunningProcessor() + + rfp.Config.Filter.NameDrop = []string{"foo"} + assert.NoError(t, rfp.Config.Filter.Compile()) + + filteredMetrics := rfp.Apply(inmetrics...) + + actualNames := []string{ + filteredMetrics[0].Name(), + filteredMetrics[1].Name(), + filteredMetrics[2].Name(), + } + assert.Equal(t, expectedNames, actualNames) +} + +func TestRunningProcessor_DroppedMetric(t *testing.T) { + inmetrics := []telegraf.Metric{ + testutil.TestMetric(1, "dropme"), + testutil.TestMetric(1, "foo"), + testutil.TestMetric(1, "bar"), + } + + expectedNames := []string{ + "fuz", + "baz", + } + rfp := NewTestRunningProcessor() + filteredMetrics := rfp.Apply(inmetrics...) + + actualNames := []string{ + filteredMetrics[0].Name(), + filteredMetrics[1].Name(), + } + assert.Equal(t, expectedNames, actualNames) +} diff --git a/logger/logger.go b/logger/logger.go new file mode 100644 index 0000000000000..21db2d4116915 --- /dev/null +++ b/logger/logger.go @@ -0,0 +1,58 @@ +package logger + +import ( + "io" + "log" + "os" + + "github.com/influxdata/wlog" +) + +// newTelegrafWriter returns a logging-wrapped writer. +func newTelegrafWriter(w io.Writer) io.Writer { + return &telegrafLog{ + writer: wlog.NewWriter(w), + } +} + +type telegrafLog struct { + writer io.Writer +} + +func (t *telegrafLog) Write(p []byte) (n int, err error) { + return t.writer.Write(p) +} + +// SetupLogging configures the logging output. +// debug will set the log level to DEBUG +// quiet will set the log level to ERROR +// logfile will direct the logging output to a file. Empty string is +// interpreted as stderr. If there is an error opening the file the +// logger will fallback to stderr. +func SetupLogging(debug, quiet bool, logfile string) { + if debug { + wlog.SetLevel(wlog.DEBUG) + } + if quiet { + wlog.SetLevel(wlog.ERROR) + } + + var oFile *os.File + if logfile != "" { + if _, err := os.Stat(logfile); os.IsNotExist(err) { + if oFile, err = os.Create(logfile); err != nil { + log.Printf("E! Unable to create %s (%s), using stderr", logfile, err) + oFile = os.Stderr + } + } else { + if oFile, err = os.OpenFile(logfile, os.O_APPEND|os.O_WRONLY, os.ModeAppend); err != nil { + log.Printf("E! Unable to append to %s (%s), using stderr", logfile, err) + oFile = os.Stderr + } + } + } else { + oFile = os.Stderr + } + + log.SetOutput(newTelegrafWriter(oFile)) +} diff --git a/metric.go b/metric.go index 937603cdcc778..5079ff4f1b480 100644 --- a/metric.go +++ b/metric.go @@ -4,6 +4,7 @@ import ( "time" "github.com/influxdata/influxdb/client/v2" + "github.com/influxdata/influxdb/models" ) // ValueType is an enumeration of metric types that represent a simple value. @@ -33,6 +34,10 @@ type Metric interface { // UnixNano returns the unix nano time of the metric UnixNano() int64 + // HashID returns a non-cryptographic hash of the metric (name + tags) + // NOTE: do not persist & depend on this value to disk. + HashID() uint64 + // Fields returns the fields for the metric Fields() map[string]interface{} @@ -44,13 +49,28 @@ type Metric interface { // Point returns a influxdb client.Point object Point() *client.Point + + // SetAggregate sets the metric's aggregate status + // This is so that aggregate metrics don't get re-sent to aggregator plugins + SetAggregate(bool) + // IsAggregate returns true if the metric is an aggregate + IsAggregate() bool } // metric is a wrapper of the influxdb client.Point struct type metric struct { - pt *client.Point + pt models.Point mType ValueType + + isaggregate bool +} + +func NewMetricFromPoint(pt models.Point) Metric { + return &metric{ + pt: pt, + mType: Untyped, + } } // NewMetric returns an untyped metric. @@ -60,7 +80,7 @@ func NewMetric( fields map[string]interface{}, t time.Time, ) (Metric, error) { - pt, err := client.NewPoint(name, tags, fields, t) + pt, err := models.NewPoint(name, models.NewTags(tags), fields, t) if err != nil { return nil, err } @@ -79,7 +99,7 @@ func NewGaugeMetric( fields map[string]interface{}, t time.Time, ) (Metric, error) { - pt, err := client.NewPoint(name, tags, fields, t) + pt, err := models.NewPoint(name, models.NewTags(tags), fields, t) if err != nil { return nil, err } @@ -98,7 +118,7 @@ func NewCounterMetric( fields map[string]interface{}, t time.Time, ) (Metric, error) { - pt, err := client.NewPoint(name, tags, fields, t) + pt, err := models.NewPoint(name, models.NewTags(tags), fields, t) if err != nil { return nil, err } @@ -113,7 +133,7 @@ func (m *metric) Name() string { } func (m *metric) Tags() map[string]string { - return m.pt.Tags() + return m.pt.Tags().Map() } func (m *metric) Time() time.Time { @@ -124,6 +144,10 @@ func (m *metric) Type() ValueType { return m.mType } +func (m *metric) HashID() uint64 { + return m.pt.HashID() +} + func (m *metric) UnixNano() int64 { return m.pt.UnixNano() } @@ -141,5 +165,13 @@ func (m *metric) PrecisionString(precison string) string { } func (m *metric) Point() *client.Point { - return m.pt + return client.NewPointFrom(m.pt) +} + +func (m *metric) IsAggregate() bool { + return m.isaggregate +} + +func (m *metric) SetAggregate(b bool) { + m.isaggregate = b } diff --git a/plugins/aggregators/all/all.go b/plugins/aggregators/all/all.go new file mode 100644 index 0000000000000..1041a0c9c4814 --- /dev/null +++ b/plugins/aggregators/all/all.go @@ -0,0 +1,5 @@ +package all + +import ( + _ "github.com/influxdata/telegraf/plugins/aggregators/minmax" +) diff --git a/plugins/aggregators/minmax/minmax.go b/plugins/aggregators/minmax/minmax.go new file mode 100644 index 0000000000000..1c83c0cc21d80 --- /dev/null +++ b/plugins/aggregators/minmax/minmax.go @@ -0,0 +1,119 @@ +package minmax + +import ( + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/aggregators" +) + +type MinMax struct { + cache map[uint64]aggregate +} + +func NewMinMax() telegraf.Aggregator { + mm := &MinMax{} + mm.Reset() + return mm +} + +type aggregate struct { + fields map[string]minmax + name string + tags map[string]string +} + +type minmax struct { + min float64 + max float64 +} + +var sampleConfig = ` + ## General Aggregator Arguments: + ## The period on which to flush & clear the aggregator. + period = "30s" + ## If true, the original metric will be dropped by the + ## aggregator and will not get sent to the output plugins. + drop_original = false +` + +func (m *MinMax) SampleConfig() string { + return sampleConfig +} + +func (m *MinMax) Description() string { + return "Keep the aggregate min/max of each metric passing through." +} + +func (m *MinMax) Add(in telegraf.Metric) { + id := in.HashID() + if _, ok := m.cache[id]; !ok { + // hit an uncached metric, create caches for first time: + a := aggregate{ + name: in.Name(), + tags: in.Tags(), + fields: make(map[string]minmax), + } + for k, v := range in.Fields() { + if fv, ok := convert(v); ok { + a.fields[k] = minmax{ + min: fv, + max: fv, + } + } + } + m.cache[id] = a + } else { + for k, v := range in.Fields() { + if fv, ok := convert(v); ok { + if _, ok := m.cache[id].fields[k]; !ok { + // hit an uncached field of a cached metric + m.cache[id].fields[k] = minmax{ + min: fv, + max: fv, + } + continue + } + if fv < m.cache[id].fields[k].min { + tmp := m.cache[id].fields[k] + tmp.min = fv + m.cache[id].fields[k] = tmp + } else if fv > m.cache[id].fields[k].max { + tmp := m.cache[id].fields[k] + tmp.max = fv + m.cache[id].fields[k] = tmp + } + } + } + } +} + +func (m *MinMax) Push(acc telegraf.Accumulator) { + for _, aggregate := range m.cache { + fields := map[string]interface{}{} + for k, v := range aggregate.fields { + fields[k+"_min"] = v.min + fields[k+"_max"] = v.max + } + acc.AddFields(aggregate.name, fields, aggregate.tags) + } +} + +func (m *MinMax) Reset() { + m.cache = make(map[uint64]aggregate) +} + +func convert(in interface{}) (float64, bool) { + switch v := in.(type) { + case float64: + return v, true + case int64: + return float64(v), true + default: + return 0, false + } +} + +func init() { + aggregators.Add("minmax", func() telegraf.Aggregator { + return NewMinMax() + }) +} diff --git a/plugins/aggregators/minmax/minmax_test.go b/plugins/aggregators/minmax/minmax_test.go new file mode 100644 index 0000000000000..97af5749db317 --- /dev/null +++ b/plugins/aggregators/minmax/minmax_test.go @@ -0,0 +1,162 @@ +package minmax + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" +) + +var m1, _ = telegraf.NewMetric("m1", + map[string]string{"foo": "bar"}, + map[string]interface{}{ + "a": int64(1), + "b": int64(1), + "c": int64(1), + "d": int64(1), + "e": int64(1), + "f": float64(2), + "g": float64(2), + "h": float64(2), + "i": float64(2), + "j": float64(3), + }, + time.Now(), +) +var m2, _ = telegraf.NewMetric("m1", + map[string]string{"foo": "bar"}, + map[string]interface{}{ + "a": int64(1), + "b": int64(3), + "c": int64(3), + "d": int64(3), + "e": int64(3), + "f": float64(1), + "g": float64(1), + "h": float64(1), + "i": float64(1), + "j": float64(1), + "k": float64(200), + "ignoreme": "string", + "andme": true, + }, + time.Now(), +) + +func BenchmarkApply(b *testing.B) { + minmax := NewMinMax() + + for n := 0; n < b.N; n++ { + minmax.Add(m1) + minmax.Add(m2) + } +} + +// Test two metrics getting added. +func TestMinMaxWithPeriod(t *testing.T) { + acc := testutil.Accumulator{} + minmax := NewMinMax() + + minmax.Add(m1) + minmax.Add(m2) + minmax.Push(&acc) + + expectedFields := map[string]interface{}{ + "a_max": float64(1), + "a_min": float64(1), + "b_max": float64(3), + "b_min": float64(1), + "c_max": float64(3), + "c_min": float64(1), + "d_max": float64(3), + "d_min": float64(1), + "e_max": float64(3), + "e_min": float64(1), + "f_max": float64(2), + "f_min": float64(1), + "g_max": float64(2), + "g_min": float64(1), + "h_max": float64(2), + "h_min": float64(1), + "i_max": float64(2), + "i_min": float64(1), + "j_max": float64(3), + "j_min": float64(1), + "k_max": float64(200), + "k_min": float64(200), + } + expectedTags := map[string]string{ + "foo": "bar", + } + acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags) +} + +// Test two metrics getting added with a push/reset in between (simulates +// getting added in different periods.) +func TestMinMaxDifferentPeriods(t *testing.T) { + acc := testutil.Accumulator{} + minmax := NewMinMax() + + minmax.Add(m1) + minmax.Push(&acc) + expectedFields := map[string]interface{}{ + "a_max": float64(1), + "a_min": float64(1), + "b_max": float64(1), + "b_min": float64(1), + "c_max": float64(1), + "c_min": float64(1), + "d_max": float64(1), + "d_min": float64(1), + "e_max": float64(1), + "e_min": float64(1), + "f_max": float64(2), + "f_min": float64(2), + "g_max": float64(2), + "g_min": float64(2), + "h_max": float64(2), + "h_min": float64(2), + "i_max": float64(2), + "i_min": float64(2), + "j_max": float64(3), + "j_min": float64(3), + } + expectedTags := map[string]string{ + "foo": "bar", + } + acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags) + + acc.ClearMetrics() + minmax.Reset() + minmax.Add(m2) + minmax.Push(&acc) + expectedFields = map[string]interface{}{ + "a_max": float64(1), + "a_min": float64(1), + "b_max": float64(3), + "b_min": float64(3), + "c_max": float64(3), + "c_min": float64(3), + "d_max": float64(3), + "d_min": float64(3), + "e_max": float64(3), + "e_min": float64(3), + "f_max": float64(1), + "f_min": float64(1), + "g_max": float64(1), + "g_min": float64(1), + "h_max": float64(1), + "h_min": float64(1), + "i_max": float64(1), + "i_min": float64(1), + "j_max": float64(1), + "j_min": float64(1), + "k_max": float64(200), + "k_min": float64(200), + } + expectedTags = map[string]string{ + "foo": "bar", + } + acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags) +} diff --git a/plugins/aggregators/registry.go b/plugins/aggregators/registry.go new file mode 100644 index 0000000000000..77a9c9a643485 --- /dev/null +++ b/plugins/aggregators/registry.go @@ -0,0 +1,11 @@ +package aggregators + +import "github.com/influxdata/telegraf" + +type Creator func() telegraf.Aggregator + +var Aggregators = map[string]Creator{} + +func Add(name string, creator Creator) { + Aggregators[name] = creator +} diff --git a/plugins/inputs/aerospike/aerospike.go b/plugins/inputs/aerospike/aerospike.go index f7c90019553eb..10f7fcd40a6bb 100644 --- a/plugins/inputs/aerospike/aerospike.go +++ b/plugins/inputs/aerospike/aerospike.go @@ -88,7 +88,7 @@ func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) erro if err == nil { fields[strings.Replace(k, "-", "_", -1)] = val } else { - log.Printf("skipping aerospike field %v with int64 overflow", k) + log.Printf("I! skipping aerospike field %v with int64 overflow", k) } } acc.AddFields("aerospike_node", fields, tags, time.Now()) @@ -121,7 +121,7 @@ func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) erro if err == nil { nFields[strings.Replace(parts[0], "-", "_", -1)] = val } else { - log.Printf("skipping aerospike field %v with int64 overflow", parts[0]) + log.Printf("I! skipping aerospike field %v with int64 overflow", parts[0]) } } acc.AddFields("aerospike_namespace", nFields, nTags, time.Now()) diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 058b230d80604..67b85905e8dc5 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -31,6 +31,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/iptables" _ "github.com/influxdata/telegraf/plugins/inputs/jolokia" _ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer" + _ "github.com/influxdata/telegraf/plugins/inputs/kubernetes" _ "github.com/influxdata/telegraf/plugins/inputs/leofs" _ "github.com/influxdata/telegraf/plugins/inputs/logparser" _ "github.com/influxdata/telegraf/plugins/inputs/lustre2" diff --git a/plugins/inputs/cassandra/README.md b/plugins/inputs/cassandra/README.md index bfbcff77cb399..edcef08c4b41d 100644 --- a/plugins/inputs/cassandra/README.md +++ b/plugins/inputs/cassandra/README.md @@ -7,7 +7,7 @@ #### Description -The Cassandra plugin collects Cassandra/JVM metrics exposed as MBean's attributes through jolokia REST endpoint. All metrics are collected for each server configured. +The Cassandra plugin collects Cassandra 3 / JVM metrics exposed as MBean's attributes through jolokia REST endpoint. All metrics are collected for each server configured. See: https://jolokia.org/ and [Cassandra Documentation](http://docs.datastax.com/en/cassandra/3.x/cassandra/operations/monitoringCassandraTOC.html) @@ -38,9 +38,9 @@ Here is a list of metrics that might be useful to monitor your cassandra cluster ####measurement = javaGarbageCollector -- /java.lang:type=GarbageCollector,name=ConcurrentMarkSweep/CollectionTime +- /java.lang:type=GarbageCollector,name=ConcurrentMarkSweep/CollectionTime - /java.lang:type=GarbageCollector,name=ConcurrentMarkSweep/CollectionCount -- /java.lang:type=GarbageCollector,name=ParNew/CollectionTime +- /java.lang:type=GarbageCollector,name=ParNew/CollectionTime - /java.lang:type=GarbageCollector,name=ParNew/CollectionCount ####measurement = javaMemory @@ -50,13 +50,13 @@ Here is a list of metrics that might be useful to monitor your cassandra cluster ####measurement = cassandraCache -- /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Hit +- /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Hits - /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Requests - /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Entries -- /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Size -- /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Capacity -- /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Hit -- /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Requests +- /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Size +- /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Capacity +- /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Hits +- /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Requests - /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Entries - /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Size - /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Capacity @@ -67,33 +67,33 @@ Here is a list of metrics that might be useful to monitor your cassandra cluster ####measurement = cassandraClientRequest -- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=TotalLatency -- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=TotalLatency -- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Latency -- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Latency -- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Timeouts +- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=TotalLatency +- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=TotalLatency +- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Latency +- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Latency +- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Timeouts - /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Timeouts -- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Unavailables -- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Unavailables -- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Failures -- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Failures +- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Unavailables +- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Unavailables +- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Failures +- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Failures ####measurement = cassandraCommitLog -- /org.apache.cassandra.metrics:type=CommitLog,name=PendingTasks +- /org.apache.cassandra.metrics:type=CommitLog,name=PendingTasks - /org.apache.cassandra.metrics:type=CommitLog,name=TotalCommitLogSize ####measurement = cassandraCompaction -- /org.apache.cassandra.metrics:type=Compaction,name=CompletedTask -- /org.apache.cassandra.metrics:type=Compaction,name=PendingTasks +- /org.apache.cassandra.metrics:type=Compaction,name=CompletedTasks +- /org.apache.cassandra.metrics:type=Compaction,name=PendingTasks - /org.apache.cassandra.metrics:type=Compaction,name=TotalCompactionsCompleted - /org.apache.cassandra.metrics:type=Compaction,name=BytesCompacted ####measurement = cassandraStorage - /org.apache.cassandra.metrics:type=Storage,name=Load -- /org.apache.cassandra.metrics:type=Storage,name=Exceptions +- /org.apache.cassandra.metrics:type=Storage,name=Exceptions ####measurement = cassandraTable Using wildcards for "keyspace" and "scope" can create a lot of series as metrics will be reported for every table and keyspace including internal system tables. Specify a keyspace name and/or a table name to limit them. @@ -101,25 +101,25 @@ Using wildcards for "keyspace" and "scope" can create a lot of series as metrics - /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=LiveDiskSpaceUsed - /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=TotalDiskSpaceUsed - /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=ReadLatency -- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=CoordinatorReadLatency -- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=WriteLatency -- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=ReadTotalLatency -- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=WriteTotalLatency +- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=CoordinatorReadLatency +- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=WriteLatency +- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=ReadTotalLatency +- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=WriteTotalLatency ####measurement = cassandraThreadPools -- /org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=CompactionExecutor,name=ActiveTasks -- /org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=AntiEntropyStage,name=ActiveTasks +- /org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=CompactionExecutor,name=ActiveTasks +- /org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=AntiEntropyStage,name=ActiveTasks - /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=CounterMutationStage,name=PendingTasks - /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=CounterMutationStage,name=CurrentlyBlockedTasks - /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=MutationStage,name=PendingTasks -- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=MutationStage,name=CurrentlyBlockedTasks +- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=MutationStage,name=CurrentlyBlockedTasks - /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadRepairStage,name=PendingTasks -- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadRepairStage,name=CurrentlyBlockedTasks +- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadRepairStage,name=CurrentlyBlockedTasks - /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadStage,name=PendingTasks -- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadStage,name=CurrentlyBlockedTasks -- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=RequestResponseStage,name=PendingTasks -- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=RequestResponseStage,name=CurrentlyBlockedTasks +- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadStage,name=CurrentlyBlockedTasks +- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=RequestResponseStage,name=PendingTasks +- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=RequestResponseStage,name=CurrentlyBlockedTasks diff --git a/plugins/inputs/cassandra/cassandra.go b/plugins/inputs/cassandra/cassandra.go index e7edf7153980f..dc4bb2b720e1c 100644 --- a/plugins/inputs/cassandra/cassandra.go +++ b/plugins/inputs/cassandra/cassandra.go @@ -274,7 +274,7 @@ func (c *Cassandra) Gather(acc telegraf.Accumulator) error { m = newCassandraMetric(serverTokens["host"], metric, acc) } else { // unsupported metric type - log.Printf("Unsupported Cassandra metric [%s], skipping", + log.Printf("I! Unsupported Cassandra metric [%s], skipping", metric) continue } diff --git a/plugins/inputs/ceph/ceph.go b/plugins/inputs/ceph/ceph.go index d5ed464fa5a38..9f0a6ac786257 100644 --- a/plugins/inputs/ceph/ceph.go +++ b/plugins/inputs/ceph/ceph.go @@ -100,12 +100,12 @@ func (c *Ceph) gatherAdminSocketStats(acc telegraf.Accumulator) error { for _, s := range sockets { dump, err := perfDump(c.CephBinary, s) if err != nil { - log.Printf("error reading from socket '%s': %v", s.socket, err) + log.Printf("E! error reading from socket '%s': %v", s.socket, err) continue } data, err := parseDump(dump) if err != nil { - log.Printf("error parsing dump from socket '%s': %v", s.socket, err) + log.Printf("E! error parsing dump from socket '%s': %v", s.socket, err) continue } for tag, metrics := range *data { @@ -293,7 +293,7 @@ func flatten(data interface{}) []*metric { } } default: - log.Printf("Ignoring unexpected type '%T' for value %v", val, val) + log.Printf("I! Ignoring unexpected type '%T' for value %v", val, val) } return metrics diff --git a/plugins/inputs/cgroup/README.md b/plugins/inputs/cgroup/README.md index feb332dd9cd53..e62c9b15e6c96 100644 --- a/plugins/inputs/cgroup/README.md +++ b/plugins/inputs/cgroup/README.md @@ -2,6 +2,10 @@ This input plugin will capture specific statistics per cgroup. +Consider restricting paths to the set of cgroups you really +want to monitor if you have a large number of cgroups, to avoid +any cardinality issues. + Following file formats are supported: * Single value @@ -33,9 +37,8 @@ KEY1 VAL1\n ### Tags: -Measurements don't have any specific tags unless you define them at the telegraf level (defaults). We -used to have the path listed as a tag, but to keep cardinality in check it's easier to move this -value to a field. Thanks @sebito91! +All measurements have the following tags: + - path ### Configuration: diff --git a/plugins/inputs/cgroup/cgroup.go b/plugins/inputs/cgroup/cgroup.go index e38b6a4c13ddc..cc5e4b4968f2d 100644 --- a/plugins/inputs/cgroup/cgroup.go +++ b/plugins/inputs/cgroup/cgroup.go @@ -11,15 +11,18 @@ type CGroup struct { } var sampleConfig = ` - ## Directories in which to look for files, globs are supported. - # paths = [ - # "/cgroup/memory", - # "/cgroup/memory/child1", - # "/cgroup/memory/child2/*", - # ] - ## cgroup stat fields, as file names, globs are supported. - ## these file names are appended to each path from above. - # files = ["memory.*usage*", "memory.limit_in_bytes"] + ## Directories in which to look for files, globs are supported. + ## Consider restricting paths to the set of cgroups you really + ## want to monitor if you have a large number of cgroups, to avoid + ## any cardinality issues. + # paths = [ + # "/cgroup/memory", + # "/cgroup/memory/child1", + # "/cgroup/memory/child2/*", + # ] + ## cgroup stat fields, as file names, globs are supported. + ## these file names are appended to each path from above. + # files = ["memory.*usage*", "memory.limit_in_bytes"] ` func (g *CGroup) SampleConfig() string { diff --git a/plugins/inputs/cgroup/cgroup_linux.go b/plugins/inputs/cgroup/cgroup_linux.go index ecaf8126d7323..e8ba6f8819df8 100644 --- a/plugins/inputs/cgroup/cgroup_linux.go +++ b/plugins/inputs/cgroup/cgroup_linux.go @@ -56,9 +56,10 @@ func (g *CGroup) gatherDir(dir string, acc telegraf.Accumulator) error { return err } } - fields["path"] = dir - acc.AddFields(metricName, fields, nil) + tags := map[string]string{"path": dir} + + acc.AddFields(metricName, fields, tags) return nil } diff --git a/plugins/inputs/cgroup/cgroup_test.go b/plugins/inputs/cgroup/cgroup_test.go index ff9b8d7a894cf..206b51f6d28f4 100644 --- a/plugins/inputs/cgroup/cgroup_test.go +++ b/plugins/inputs/cgroup/cgroup_test.go @@ -3,13 +3,10 @@ package cgroup import ( - "fmt" "testing" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "reflect" ) var cg1 = &CGroup{ @@ -24,32 +21,15 @@ var cg1 = &CGroup{ }, } -func assertContainsFields(a *testutil.Accumulator, t *testing.T, measurement string, fieldSet []map[string]interface{}) { - a.Lock() - defer a.Unlock() - - numEquals := 0 - for _, p := range a.Metrics { - if p.Measurement == measurement { - for _, fields := range fieldSet { - if reflect.DeepEqual(fields, p.Fields) { - numEquals++ - } - } - } - } - - if numEquals != len(fieldSet) { - assert.Fail(t, fmt.Sprintf("only %d of %d are equal", numEquals, len(fieldSet))) - } -} - func TestCgroupStatistics_1(t *testing.T) { var acc testutil.Accumulator err := cg1.Gather(&acc) require.NoError(t, err) + tags := map[string]string{ + "path": "testdata/memory", + } fields := map[string]interface{}{ "memory.stat.cache": 1739362304123123123, "memory.stat.rss": 1775325184, @@ -62,9 +42,8 @@ func TestCgroupStatistics_1(t *testing.T) { "memory.limit_in_bytes": 223372036854771712, "memory.use_hierarchy": "12-781", "notify_on_release": 0, - "path": "testdata/memory", } - assertContainsFields(&acc, t, "cgroup", []map[string]interface{}{fields}) + acc.AssertContainsTaggedFields(t, "cgroup", fields, tags) } // ====================================================================== @@ -80,14 +59,16 @@ func TestCgroupStatistics_2(t *testing.T) { err := cg2.Gather(&acc) require.NoError(t, err) + tags := map[string]string{ + "path": "testdata/cpu", + } fields := map[string]interface{}{ "cpuacct.usage_percpu.0": -1452543795404, "cpuacct.usage_percpu.1": 1376681271659, "cpuacct.usage_percpu.2": 1450950799997, "cpuacct.usage_percpu.3": -1473113374257, - "path": "testdata/cpu", } - assertContainsFields(&acc, t, "cgroup", []map[string]interface{}{fields}) + acc.AssertContainsTaggedFields(t, "cgroup", fields, tags) } // ====================================================================== @@ -103,16 +84,18 @@ func TestCgroupStatistics_3(t *testing.T) { err := cg3.Gather(&acc) require.NoError(t, err) + tags := map[string]string{ + "path": "testdata/memory/group_1", + } fields := map[string]interface{}{ "memory.limit_in_bytes": 223372036854771712, - "path": "testdata/memory/group_1", } + acc.AssertContainsTaggedFields(t, "cgroup", fields, tags) - fieldsTwo := map[string]interface{}{ - "memory.limit_in_bytes": 223372036854771712, - "path": "testdata/memory/group_2", + tags = map[string]string{ + "path": "testdata/memory/group_2", } - assertContainsFields(&acc, t, "cgroup", []map[string]interface{}{fields, fieldsTwo}) + acc.AssertContainsTaggedFields(t, "cgroup", fields, tags) } // ====================================================================== @@ -128,22 +111,23 @@ func TestCgroupStatistics_4(t *testing.T) { err := cg4.Gather(&acc) require.NoError(t, err) + tags := map[string]string{ + "path": "testdata/memory/group_1/group_1_1", + } fields := map[string]interface{}{ "memory.limit_in_bytes": 223372036854771712, - "path": "testdata/memory/group_1/group_1_1", } + acc.AssertContainsTaggedFields(t, "cgroup", fields, tags) - fieldsTwo := map[string]interface{}{ - "memory.limit_in_bytes": 223372036854771712, - "path": "testdata/memory/group_1/group_1_2", + tags = map[string]string{ + "path": "testdata/memory/group_1/group_1_2", } + acc.AssertContainsTaggedFields(t, "cgroup", fields, tags) - fieldsThree := map[string]interface{}{ - "memory.limit_in_bytes": 223372036854771712, - "path": "testdata/memory/group_2", + tags = map[string]string{ + "path": "testdata/memory/group_2", } - - assertContainsFields(&acc, t, "cgroup", []map[string]interface{}{fields, fieldsTwo, fieldsThree}) + acc.AssertContainsTaggedFields(t, "cgroup", fields, tags) } // ====================================================================== @@ -159,16 +143,18 @@ func TestCgroupStatistics_5(t *testing.T) { err := cg5.Gather(&acc) require.NoError(t, err) + tags := map[string]string{ + "path": "testdata/memory/group_1/group_1_1", + } fields := map[string]interface{}{ "memory.limit_in_bytes": 223372036854771712, - "path": "testdata/memory/group_1/group_1_1", } + acc.AssertContainsTaggedFields(t, "cgroup", fields, tags) - fieldsTwo := map[string]interface{}{ - "memory.limit_in_bytes": 223372036854771712, - "path": "testdata/memory/group_2/group_1_1", + tags = map[string]string{ + "path": "testdata/memory/group_2/group_1_1", } - assertContainsFields(&acc, t, "cgroup", []map[string]interface{}{fields, fieldsTwo}) + acc.AssertContainsTaggedFields(t, "cgroup", fields, tags) } // ====================================================================== @@ -184,11 +170,13 @@ func TestCgroupStatistics_6(t *testing.T) { err := cg6.Gather(&acc) require.NoError(t, err) + tags := map[string]string{ + "path": "testdata/memory", + } fields := map[string]interface{}{ "memory.usage_in_bytes": 3513667584, "memory.use_hierarchy": "12-781", "memory.kmem.limit_in_bytes": 9223372036854771712, - "path": "testdata/memory", } - assertContainsFields(&acc, t, "cgroup", []map[string]interface{}{fields}) + acc.AssertContainsTaggedFields(t, "cgroup", fields, tags) } diff --git a/plugins/inputs/conntrack/conntrack.go b/plugins/inputs/conntrack/conntrack.go index 68bf8adba52d5..841aedb545a03 100644 --- a/plugins/inputs/conntrack/conntrack.go +++ b/plugins/inputs/conntrack/conntrack.go @@ -93,13 +93,14 @@ func (c *Conntrack) Gather(acc telegraf.Accumulator) error { contents, err := ioutil.ReadFile(fName) if err != nil { - log.Printf("failed to read file '%s': %v", fName, err) + log.Printf("E! failed to read file '%s': %v", fName, err) + continue } v := strings.TrimSpace(string(contents)) fields[metricKey], err = strconv.ParseFloat(v, 64) if err != nil { - log.Printf("failed to parse metric, expected number but "+ + log.Printf("E! failed to parse metric, expected number but "+ " found '%s': %v", v, err) } } diff --git a/plugins/inputs/docker/README.md b/plugins/inputs/docker/README.md index 52f9a7adb43ef..5e89106776ee1 100644 --- a/plugins/inputs/docker/README.md +++ b/plugins/inputs/docker/README.md @@ -103,6 +103,9 @@ based on the availability of per-cpu stats on your system. - n_used_file_descriptors - n_cpus - n_containers + - n_containers_running + - n_containers_stopped + - n_containers_paused - n_images - n_goroutines - n_listener_events @@ -153,6 +156,9 @@ based on the availability of per-cpu stats on your system. > docker n_cpus=8i 1456926671065383978 > docker n_used_file_descriptors=15i 1456926671065383978 > docker n_containers=7i 1456926671065383978 +> docker n_containers_running=7i 1456926671065383978 +> docker n_containers_stopped=3i 1456926671065383978 +> docker n_containers_paused=0i 1456926671065383978 > docker n_images=152i 1456926671065383978 > docker n_goroutines=36i 1456926671065383978 > docker n_listener_events=0i 1456926671065383978 diff --git a/plugins/inputs/docker/docker.go b/plugins/inputs/docker/docker.go index 06c39d179baf5..e2c488dc85cff 100644 --- a/plugins/inputs/docker/docker.go +++ b/plugins/inputs/docker/docker.go @@ -126,7 +126,7 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error { defer wg.Done() err := d.gatherContainer(c, acc) if err != nil { - log.Printf("Error gathering container %s stats: %s\n", + log.Printf("E! Error gathering container %s stats: %s\n", c.Names, err.Error()) } }(container) @@ -154,6 +154,9 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error { "n_cpus": info.NCPU, "n_used_file_descriptors": info.NFd, "n_containers": info.Containers, + "n_containers_running": info.ContainersRunning, + "n_containers_stopped": info.ContainersStopped, + "n_containers_paused": info.ContainersPaused, "n_images": info.Images, "n_goroutines": info.NGoroutines, "n_listener_events": info.NEventsListener, diff --git a/plugins/inputs/docker/docker_test.go b/plugins/inputs/docker/docker_test.go index e1a0a7d2eb1c2..21960a4d831d9 100644 --- a/plugins/inputs/docker/docker_test.go +++ b/plugins/inputs/docker/docker_test.go @@ -256,6 +256,9 @@ type FakeDockerClient struct { func (d FakeDockerClient) Info(ctx context.Context) (types.Info, error) { env := types.Info{ Containers: 108, + ContainersRunning: 98, + ContainersStopped: 6, + ContainersPaused: 3, OomKillDisable: false, SystemTime: "2016-02-24T00:55:09.15073105-05:00", NEventsListener: 0, @@ -397,6 +400,9 @@ func TestDockerGatherInfo(t *testing.T) { "n_cpus": int(4), "n_used_file_descriptors": int(19), "n_containers": int(108), + "n_containers_running": int(98), + "n_containers_stopped": int(6), + "n_containers_paused": int(3), "n_images": int(199), "n_goroutines": int(39), }, diff --git a/plugins/inputs/haproxy/README.md b/plugins/inputs/haproxy/README.md new file mode 100644 index 0000000000000..52be25aa75a46 --- /dev/null +++ b/plugins/inputs/haproxy/README.md @@ -0,0 +1,37 @@ +# HAproxy Input Plugin + +[HAproxy](http://www.haproxy.org/) input plugin gathers metrics directly from any running HAproxy instance. It can do so by using CSV generated by HAproxy status page or from admin socket(s). + +### Configuration: + +```toml +# SampleConfig +[[inputs.haproxy]] + servers = ["http://1.2.3.4/haproxy?stats", "/var/run/haproxy*.sock"] +``` + +Server addresses need to explicitly start with 'http' if you wish to use HAproxy status page. Otherwise, address will be assumed to be an UNIX socket and protocol (if present) will be discarded. + +Following examples will all resolve to the same socket: +``` +socket:/var/run/haproxy.sock +unix:/var/run/haproxy.sock +foo:/var/run/haproxy.sock +/var/run/haproxy.sock +``` + +When using socket names, wildcard expansion is supported so plugin can gather stats from multiple sockets at once. + +If no servers are specified, then the default address of `http://127.0.0.1:1936/haproxy?stats` will be used. + +### Measurements & Fields: + +Plugin will gather measurements outlined in [HAproxy CSV format documentation](https://cbonte.github.io/haproxy-dconv/1.5/configuration.html#9.1). + +### Tags: + +- All measurements have the following tags: + - server - address of server data is gathered from + - proxy - proxy name as reported in `pxname` + - sv - service name as reported in `svname` + diff --git a/plugins/inputs/haproxy/haproxy.go b/plugins/inputs/haproxy/haproxy.go index 9529bad3fe26b..6b42a0705a1ee 100644 --- a/plugins/inputs/haproxy/haproxy.go +++ b/plugins/inputs/haproxy/haproxy.go @@ -7,6 +7,7 @@ import ( "net" "net/http" "net/url" + "path/filepath" "strconv" "strings" "sync" @@ -17,7 +18,7 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) -//CSV format: https://cbonte.github.io/haproxy-dconv/configuration-1.5.html#9.1 +//CSV format: https://cbonte.github.io/haproxy-dconv/1.5/configuration.html#9.1 const ( HF_PXNAME = 0 // 0. pxname [LFBS]: proxy name HF_SVNAME = 1 // 1. svname [LFBS]: service name (FRONTEND for frontend, BACKEND for backend, any name for server/listener) @@ -93,12 +94,15 @@ var sampleConfig = ` ## An array of address to gather stats about. Specify an ip on hostname ## with optional port. ie localhost, 10.10.3.33:1936, etc. ## Make sure you specify the complete path to the stats endpoint - ## ie 10.10.3.33:1936/haproxy?stats + ## including the protocol, ie http://10.10.3.33:1936/haproxy?stats # ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats servers = ["http://myhaproxy.com:1936/haproxy?stats"] - ## Or you can also use local socket - ## servers = ["socket:/run/haproxy/admin.sock"] + ## + ## You can also use local socket with standard wildcard globbing. + ## Server address not starting with 'http' will be treated as a possible + ## socket, so both examples below are valid. + ## servers = ["socket:/run/haproxy/admin.sock", "/run/haproxy/*.sock"] ` func (r *haproxy) SampleConfig() string { @@ -116,10 +120,36 @@ func (g *haproxy) Gather(acc telegraf.Accumulator) error { return g.gatherServer("http://127.0.0.1:1936/haproxy?stats", acc) } + endpoints := make([]string, 0, len(g.Servers)) + + for _, endpoint := range g.Servers { + + if strings.HasPrefix(endpoint, "http") { + endpoints = append(endpoints, endpoint) + continue + } + + socketPath := getSocketAddr(endpoint) + + matches, err := filepath.Glob(socketPath) + + if err != nil { + return err + } + + if len(matches) == 0 { + endpoints = append(endpoints, socketPath) + } else { + for _, match := range matches { + endpoints = append(endpoints, match) + } + } + } + var wg sync.WaitGroup - errChan := errchan.New(len(g.Servers)) - wg.Add(len(g.Servers)) - for _, server := range g.Servers { + errChan := errchan.New(len(endpoints)) + wg.Add(len(endpoints)) + for _, server := range endpoints { go func(serv string) { defer wg.Done() errChan.C <- g.gatherServer(serv, acc) @@ -131,14 +161,7 @@ func (g *haproxy) Gather(acc telegraf.Accumulator) error { } func (g *haproxy) gatherServerSocket(addr string, acc telegraf.Accumulator) error { - var socketPath string - socketAddr := strings.Split(addr, ":") - - if len(socketAddr) >= 2 { - socketPath = socketAddr[1] - } else { - socketPath = socketAddr[0] - } + socketPath := getSocketAddr(addr) c, err := net.Dial("unix", socketPath) @@ -196,6 +219,16 @@ func (g *haproxy) gatherServer(addr string, acc telegraf.Accumulator) error { return importCsvResult(res.Body, acc, u.Host) } +func getSocketAddr(sock string) string { + socketAddr := strings.Split(sock, ":") + + if len(socketAddr) >= 2 { + return socketAddr[1] + } else { + return socketAddr[0] + } +} + func importCsvResult(r io.Reader, acc telegraf.Accumulator, host string) error { csv := csv.NewReader(r) result, err := csv.ReadAll() diff --git a/plugins/inputs/haproxy/haproxy_test.go b/plugins/inputs/haproxy/haproxy_test.go index befcabd97f2b6..ae71ad76cf54b 100644 --- a/plugins/inputs/haproxy/haproxy_test.go +++ b/plugins/inputs/haproxy/haproxy_test.go @@ -72,38 +72,7 @@ func TestHaproxyGeneratesMetricsWithAuthentication(t *testing.T) { "sv": "host0", } - fields := map[string]interface{}{ - "active_servers": uint64(1), - "backup_servers": uint64(0), - "bin": uint64(510913516), - "bout": uint64(2193856571), - "check_duration": uint64(10), - "cli_abort": uint64(73), - "ctime": uint64(2), - "downtime": uint64(0), - "dresp": uint64(0), - "econ": uint64(0), - "eresp": uint64(1), - "http_response.1xx": uint64(0), - "http_response.2xx": uint64(119534), - "http_response.3xx": uint64(48051), - "http_response.4xx": uint64(2345), - "http_response.5xx": uint64(1056), - "lbtot": uint64(171013), - "qcur": uint64(0), - "qmax": uint64(0), - "qtime": uint64(0), - "rate": uint64(3), - "rate_max": uint64(12), - "rtime": uint64(312), - "scur": uint64(1), - "smax": uint64(32), - "srv_abort": uint64(1), - "stot": uint64(171014), - "ttime": uint64(2341), - "wredis": uint64(0), - "wretr": uint64(1), - } + fields := HaproxyGetFieldValues() acc.AssertContainsTaggedFields(t, "haproxy", fields, tags) //Here, we should get error because we don't pass authentication data @@ -136,69 +105,73 @@ func TestHaproxyGeneratesMetricsWithoutAuthentication(t *testing.T) { "sv": "host0", } - fields := map[string]interface{}{ - "active_servers": uint64(1), - "backup_servers": uint64(0), - "bin": uint64(510913516), - "bout": uint64(2193856571), - "check_duration": uint64(10), - "cli_abort": uint64(73), - "ctime": uint64(2), - "downtime": uint64(0), - "dresp": uint64(0), - "econ": uint64(0), - "eresp": uint64(1), - "http_response.1xx": uint64(0), - "http_response.2xx": uint64(119534), - "http_response.3xx": uint64(48051), - "http_response.4xx": uint64(2345), - "http_response.5xx": uint64(1056), - "lbtot": uint64(171013), - "qcur": uint64(0), - "qmax": uint64(0), - "qtime": uint64(0), - "rate": uint64(3), - "rate_max": uint64(12), - "rtime": uint64(312), - "scur": uint64(1), - "smax": uint64(32), - "srv_abort": uint64(1), - "stot": uint64(171014), - "ttime": uint64(2341), - "wredis": uint64(0), - "wretr": uint64(1), - } + fields := HaproxyGetFieldValues() acc.AssertContainsTaggedFields(t, "haproxy", fields, tags) } func TestHaproxyGeneratesMetricsUsingSocket(t *testing.T) { var randomNumber int64 - binary.Read(rand.Reader, binary.LittleEndian, &randomNumber) - sock, err := net.Listen("unix", fmt.Sprintf("/tmp/test-haproxy%d.sock", randomNumber)) - if err != nil { - t.Fatal("Cannot initialize socket ") - } + var sockets [5]net.Listener + _globmask := "/tmp/test-haproxy*.sock" + _badmask := "/tmp/test-fail-haproxy*.sock" - defer sock.Close() + for i := 0; i < 5; i++ { + binary.Read(rand.Reader, binary.LittleEndian, &randomNumber) + sockname := fmt.Sprintf("/tmp/test-haproxy%d.sock", randomNumber) + + sock, err := net.Listen("unix", sockname) + if err != nil { + t.Fatal("Cannot initialize socket ") + } - s := statServer{} - go s.serverSocket(sock) + sockets[i] = sock + defer sock.Close() + + s := statServer{} + go s.serverSocket(sock) + } r := &haproxy{ - Servers: []string{sock.Addr().String()}, + Servers: []string{_globmask}, } var acc testutil.Accumulator - err = r.Gather(&acc) + err := r.Gather(&acc) require.NoError(t, err) - tags := map[string]string{ - "proxy": "be_app", - "server": sock.Addr().String(), - "sv": "host0", + fields := HaproxyGetFieldValues() + + for _, sock := range sockets { + tags := map[string]string{ + "proxy": "be_app", + "server": sock.Addr().String(), + "sv": "host0", + } + + acc.AssertContainsTaggedFields(t, "haproxy", fields, tags) } + // This mask should not match any socket + r.Servers = []string{_badmask} + + err = r.Gather(&acc) + require.Error(t, err) +} + +//When not passing server config, we default to localhost +//We just want to make sure we did request stat from localhost +func TestHaproxyDefaultGetFromLocalhost(t *testing.T) { + r := &haproxy{} + + var acc testutil.Accumulator + + err := r.Gather(&acc) + require.Error(t, err) + assert.Contains(t, err.Error(), "127.0.0.1:1936/haproxy?stats/;csv") +} + +func HaproxyGetFieldValues() map[string]interface{} { fields := map[string]interface{}{ "active_servers": uint64(1), "backup_servers": uint64(0), @@ -231,19 +204,7 @@ func TestHaproxyGeneratesMetricsUsingSocket(t *testing.T) { "wredis": uint64(0), "wretr": uint64(1), } - acc.AssertContainsTaggedFields(t, "haproxy", fields, tags) -} - -//When not passing server config, we default to localhost -//We just want to make sure we did request stat from localhost -func TestHaproxyDefaultGetFromLocalhost(t *testing.T) { - r := &haproxy{} - - var acc testutil.Accumulator - - err := r.Gather(&acc) - require.Error(t, err) - assert.Contains(t, err.Error(), "127.0.0.1:1936/haproxy?stats/;csv") + return fields } const csvOutputSample = ` diff --git a/plugins/inputs/http_listener/bufferpool.go b/plugins/inputs/http_listener/bufferpool.go new file mode 100644 index 0000000000000..00a93652db2fb --- /dev/null +++ b/plugins/inputs/http_listener/bufferpool.go @@ -0,0 +1,43 @@ +package http_listener + +import ( + "sync/atomic" +) + +type pool struct { + buffers chan []byte + size int + + created int64 +} + +// NewPool returns a new pool object. +// n is the number of buffers +// bufSize is the size (in bytes) of each buffer +func NewPool(n, bufSize int) *pool { + return &pool{ + buffers: make(chan []byte, n), + size: bufSize, + } +} + +func (p *pool) get() []byte { + select { + case b := <-p.buffers: + return b + default: + atomic.AddInt64(&p.created, 1) + return make([]byte, p.size) + } +} + +func (p *pool) put(b []byte) { + select { + case p.buffers <- b: + default: + } +} + +func (p *pool) ncreated() int64 { + return atomic.LoadInt64(&p.created) +} diff --git a/plugins/inputs/http_listener/http_listener.go b/plugins/inputs/http_listener/http_listener.go index 9110fd106dd14..ddc9ac7bf8cec 100644 --- a/plugins/inputs/http_listener/http_listener.go +++ b/plugins/inputs/http_listener/http_listener.go @@ -1,7 +1,9 @@ package http_listener import ( - "io/ioutil" + "bytes" + "compress/gzip" + "io" "log" "net" "net/http" @@ -11,126 +13,137 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" - "github.com/influxdata/telegraf/plugins/inputs/http_listener/stoppableListener" - "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/plugins/parsers/influx" ) -type HttpListener struct { +const ( + // DEFAULT_MAX_BODY_SIZE is the default maximum request body size, in bytes. + // if the request body is over this size, we will return an HTTP 413 error. + // 500 MB + DEFAULT_MAX_BODY_SIZE = 500 * 1024 * 1024 + + // MAX_LINE_SIZE is the maximum size, in bytes, that can be allocated for + // a single InfluxDB point. + // 64 KB + DEFAULT_MAX_LINE_SIZE = 64 * 1024 +) + +type HTTPListener struct { ServiceAddress string ReadTimeout internal.Duration WriteTimeout internal.Duration + MaxBodySize int64 + MaxLineSize int - sync.Mutex + mu sync.Mutex wg sync.WaitGroup - listener *stoppableListener.StoppableListener + listener net.Listener - parser parsers.Parser + parser influx.InfluxParser acc telegraf.Accumulator + pool *pool } const sampleConfig = ` ## Address and port to host HTTP listener on service_address = ":8186" - ## timeouts + ## maximum duration before timing out read of the request read_timeout = "10s" + ## maximum duration before timing out write of the response write_timeout = "10s" + + ## Maximum allowed http request body size in bytes. + ## 0 means to use the default of 536,870,912 bytes (500 mebibytes) + max_body_size = 0 + + ## Maximum line size allowed to be sent in bytes. + ## 0 means to use the default of 65536 bytes (64 kibibytes) + max_line_size = 0 ` -func (t *HttpListener) SampleConfig() string { +func (h *HTTPListener) SampleConfig() string { return sampleConfig } -func (t *HttpListener) Description() string { +func (h *HTTPListener) Description() string { return "Influx HTTP write listener" } -func (t *HttpListener) Gather(_ telegraf.Accumulator) error { +func (h *HTTPListener) Gather(_ telegraf.Accumulator) error { + log.Printf("D! The http_listener has created %d buffers", h.pool.ncreated()) return nil } -func (t *HttpListener) SetParser(parser parsers.Parser) { - t.parser = parser -} - // Start starts the http listener service. -func (t *HttpListener) Start(acc telegraf.Accumulator) error { - t.Lock() - defer t.Unlock() +func (h *HTTPListener) Start(acc telegraf.Accumulator) error { + h.mu.Lock() + defer h.mu.Unlock() - t.acc = acc - - var rawListener, err = net.Listen("tcp", t.ServiceAddress) - if err != nil { - return err + if h.MaxBodySize == 0 { + h.MaxBodySize = DEFAULT_MAX_BODY_SIZE + } + if h.MaxLineSize == 0 { + h.MaxLineSize = DEFAULT_MAX_LINE_SIZE } - t.listener, err = stoppableListener.New(rawListener) + + h.acc = acc + h.pool = NewPool(200, h.MaxLineSize) + + var listener, err = net.Listen("tcp", h.ServiceAddress) if err != nil { return err } + h.listener = listener - go t.httpListen() + h.wg.Add(1) + go func() { + defer h.wg.Done() + h.httpListen() + }() - log.Printf("Started HTTP listener service on %s\n", t.ServiceAddress) + log.Printf("I! Started HTTP listener service on %s\n", h.ServiceAddress) return nil } // Stop cleans up all resources -func (t *HttpListener) Stop() { - t.Lock() - defer t.Unlock() - - t.listener.Stop() - t.listener.Close() +func (h *HTTPListener) Stop() { + h.mu.Lock() + defer h.mu.Unlock() - t.wg.Wait() + h.listener.Close() + h.wg.Wait() - log.Println("Stopped HTTP listener service on ", t.ServiceAddress) + log.Println("I! Stopped HTTP listener service on ", h.ServiceAddress) } -// httpListen listens for HTTP requests. -func (t *HttpListener) httpListen() error { - if t.ReadTimeout.Duration < time.Second { - t.ReadTimeout.Duration = time.Second * 10 +// httpListen sets up an http.Server and calls server.Serve. +// like server.Serve, httpListen will always return a non-nil error, for this +// reason, the error returned should probably be ignored. +// see https://golang.org/pkg/net/http/#Server.Serve +func (h *HTTPListener) httpListen() error { + if h.ReadTimeout.Duration < time.Second { + h.ReadTimeout.Duration = time.Second * 10 } - if t.WriteTimeout.Duration < time.Second { - t.WriteTimeout.Duration = time.Second * 10 + if h.WriteTimeout.Duration < time.Second { + h.WriteTimeout.Duration = time.Second * 10 } var server = http.Server{ - Handler: t, - ReadTimeout: t.ReadTimeout.Duration, - WriteTimeout: t.WriteTimeout.Duration, + Handler: h, + ReadTimeout: h.ReadTimeout.Duration, + WriteTimeout: h.WriteTimeout.Duration, } - return server.Serve(t.listener) + return server.Serve(h.listener) } -func (t *HttpListener) ServeHTTP(res http.ResponseWriter, req *http.Request) { - t.wg.Add(1) - defer t.wg.Done() - body, err := ioutil.ReadAll(req.Body) - if err != nil { - log.Printf("Problem reading request: [%s], Error: %s\n", string(body), err) - http.Error(res, "ERROR reading request", http.StatusInternalServerError) - return - } - +func (h *HTTPListener) ServeHTTP(res http.ResponseWriter, req *http.Request) { switch req.URL.Path { case "/write": - var metrics []telegraf.Metric - metrics, err = t.parser.Parse(body) - if err == nil { - for _, m := range metrics { - t.acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time()) - } - res.WriteHeader(http.StatusNoContent) - } else { - log.Printf("Problem parsing body: [%s], Error: %s\n", string(body), err) - http.Error(res, "ERROR parsing metrics", http.StatusInternalServerError) - } + h.serveWrite(res, req) case "/query": // Deliver a dummy response to the query endpoint, as some InfluxDB // clients test endpoint availability with a query @@ -147,8 +160,135 @@ func (t *HttpListener) ServeHTTP(res http.ResponseWriter, req *http.Request) { } } +func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) { + // Check that the content length is not too large for us to handle. + if req.ContentLength > h.MaxBodySize { + tooLarge(res) + return + } + now := time.Now() + + // Handle gzip request bodies + body := req.Body + var err error + if req.Header.Get("Content-Encoding") == "gzip" { + body, err = gzip.NewReader(req.Body) + defer body.Close() + if err != nil { + log.Println("E! " + err.Error()) + badRequest(res) + return + } + } + body = http.MaxBytesReader(res, body, h.MaxBodySize) + + var return400 bool + var hangingBytes bool + buf := h.pool.get() + defer h.pool.put(buf) + bufStart := 0 + for { + n, err := io.ReadFull(body, buf[bufStart:]) + if err != nil && err != io.ErrUnexpectedEOF && err != io.EOF { + log.Println("E! " + err.Error()) + // problem reading the request body + badRequest(res) + return + } + + if err == io.EOF { + if return400 { + badRequest(res) + } else { + res.WriteHeader(http.StatusNoContent) + } + return + } + + if hangingBytes { + i := bytes.IndexByte(buf, '\n') + if i == -1 { + // still didn't find a newline, keep scanning + continue + } + // rotate the bit remaining after the first newline to the front of the buffer + i++ // start copying after the newline + bufStart = len(buf) - i + if bufStart > 0 { + copy(buf, buf[i:]) + } + hangingBytes = false + continue + } + + if err == io.ErrUnexpectedEOF { + // finished reading the request body + if err := h.parse(buf[:n+bufStart], now); err != nil { + log.Println("E! " + err.Error()) + return400 = true + } + if return400 { + badRequest(res) + } else { + res.WriteHeader(http.StatusNoContent) + } + return + } + + // if we got down here it means that we filled our buffer, and there + // are still bytes remaining to be read. So we will parse up until the + // final newline, then push the rest of the bytes into the next buffer. + i := bytes.LastIndexByte(buf, '\n') + if i == -1 { + // drop any line longer than the max buffer size + log.Printf("E! http_listener received a single line longer than the maximum of %d bytes", + len(buf)) + hangingBytes = true + return400 = true + bufStart = 0 + continue + } + if err := h.parse(buf[:i], now); err != nil { + log.Println("E! " + err.Error()) + return400 = true + } + // rotate the bit remaining after the last newline to the front of the buffer + i++ // start copying after the newline + bufStart = len(buf) - i + if bufStart > 0 { + copy(buf, buf[i:]) + } + } +} + +func (h *HTTPListener) parse(b []byte, t time.Time) error { + metrics, err := h.parser.ParseWithDefaultTime(b, t) + + for _, m := range metrics { + h.acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time()) + } + + return err +} + +func tooLarge(res http.ResponseWriter) { + res.Header().Set("Content-Type", "application/json") + res.Header().Set("X-Influxdb-Version", "1.0") + res.WriteHeader(http.StatusRequestEntityTooLarge) + res.Write([]byte(`{"error":"http: request body too large"}`)) +} + +func badRequest(res http.ResponseWriter) { + res.Header().Set("Content-Type", "application/json") + res.Header().Set("X-Influxdb-Version", "1.0") + res.WriteHeader(http.StatusBadRequest) + res.Write([]byte(`{"error":"http: bad request"}`)) +} + func init() { inputs.Add("http_listener", func() telegraf.Input { - return &HttpListener{} + return &HTTPListener{ + ServiceAddress: ":8186", + } }) } diff --git a/plugins/inputs/http_listener/http_listener_test.go b/plugins/inputs/http_listener/http_listener_test.go index 270e8264a1808..84cf209ff5f2a 100644 --- a/plugins/inputs/http_listener/http_listener_test.go +++ b/plugins/inputs/http_listener/http_listener_test.go @@ -1,16 +1,16 @@ package http_listener import ( + "bytes" + "io/ioutil" + "net/http" "sync" "testing" "time" - "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" - "bytes" "github.com/stretchr/testify/require" - "net/http" ) const ( @@ -27,17 +27,15 @@ cpu_load_short,host=server06 value=12.0 1422568543702900257 emptyMsg = "" ) -func newTestHttpListener() *HttpListener { - listener := &HttpListener{ +func newTestHTTPListener() *HTTPListener { + listener := &HTTPListener{ ServiceAddress: ":8186", } return listener } func TestWriteHTTP(t *testing.T) { - listener := newTestHttpListener() - parser, _ := parsers.NewInfluxParser() - listener.SetParser(parser) + listener := newTestHTTPListener() acc := &testutil.Accumulator{} require.NoError(t, listener.Start(acc)) @@ -70,13 +68,146 @@ func TestWriteHTTP(t *testing.T) { map[string]string{"host": hostTag}, ) } + + // Post a gigantic metric to the listener and verify that an error is returned: + resp, err = http.Post("http://localhost:8186/write?db=mydb", "", bytes.NewBuffer([]byte(hugeMetric))) + require.NoError(t, err) + require.EqualValues(t, 400, resp.StatusCode) + + time.Sleep(time.Millisecond * 15) + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": "server01"}, + ) +} + +func TestWriteHTTPMaxLineSizeIncrease(t *testing.T) { + listener := &HTTPListener{ + ServiceAddress: ":8296", + MaxLineSize: 128 * 1000, + } + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + time.Sleep(time.Millisecond * 25) + + // Post a gigantic metric to the listener and verify that it writes OK this time: + resp, err := http.Post("http://localhost:8296/write?db=mydb", "", bytes.NewBuffer([]byte(hugeMetric))) + require.NoError(t, err) + require.EqualValues(t, 204, resp.StatusCode) +} + +func TestWriteHTTPVerySmallMaxBody(t *testing.T) { + listener := &HTTPListener{ + ServiceAddress: ":8297", + MaxBodySize: 4096, + } + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + time.Sleep(time.Millisecond * 25) + + resp, err := http.Post("http://localhost:8297/write", "", bytes.NewBuffer([]byte(hugeMetric))) + require.NoError(t, err) + require.EqualValues(t, 413, resp.StatusCode) +} + +func TestWriteHTTPVerySmallMaxLineSize(t *testing.T) { + listener := &HTTPListener{ + ServiceAddress: ":8298", + MaxLineSize: 70, + } + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + time.Sleep(time.Millisecond * 25) + + resp, err := http.Post("http://localhost:8298/write", "", bytes.NewBuffer([]byte(testMsgs))) + require.NoError(t, err) + require.EqualValues(t, 204, resp.StatusCode) + + time.Sleep(time.Millisecond * 15) + hostTags := []string{"server02", "server03", + "server04", "server05", "server06"} + for _, hostTag := range hostTags { + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": hostTag}, + ) + } +} + +func TestWriteHTTPLargeLinesSkipped(t *testing.T) { + listener := &HTTPListener{ + ServiceAddress: ":8300", + MaxLineSize: 100, + } + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + time.Sleep(time.Millisecond * 25) + + resp, err := http.Post("http://localhost:8300/write", "", bytes.NewBuffer([]byte(hugeMetric+testMsgs))) + require.NoError(t, err) + require.EqualValues(t, 400, resp.StatusCode) + + time.Sleep(time.Millisecond * 15) + hostTags := []string{"server02", "server03", + "server04", "server05", "server06"} + for _, hostTag := range hostTags { + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": hostTag}, + ) + } +} + +// test that writing gzipped data works +func TestWriteHTTPGzippedData(t *testing.T) { + listener := &HTTPListener{ + ServiceAddress: ":8299", + } + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + time.Sleep(time.Millisecond * 25) + + data, err := ioutil.ReadFile("./testdata/testmsgs.gz") + require.NoError(t, err) + + req, err := http.NewRequest("POST", "http://localhost:8299/write", bytes.NewBuffer(data)) + require.NoError(t, err) + req.Header.Set("Content-Encoding", "gzip") + + client := &http.Client{} + resp, err := client.Do(req) + require.NoError(t, err) + require.EqualValues(t, 204, resp.StatusCode) + + time.Sleep(time.Millisecond * 50) + hostTags := []string{"server02", "server03", + "server04", "server05", "server06"} + for _, hostTag := range hostTags { + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": hostTag}, + ) + } } // writes 25,000 metrics to the listener with 10 different writers func TestWriteHTTPHighTraffic(t *testing.T) { - listener := &HttpListener{ServiceAddress: ":8286"} - parser, _ := parsers.NewInfluxParser() - listener.SetParser(parser) + listener := &HTTPListener{ServiceAddress: ":8286"} acc := &testutil.Accumulator{} require.NoError(t, listener.Start(acc)) @@ -88,26 +219,25 @@ func TestWriteHTTPHighTraffic(t *testing.T) { var wg sync.WaitGroup for i := 0; i < 10; i++ { wg.Add(1) - go func() { + go func(innerwg *sync.WaitGroup) { + defer innerwg.Done() for i := 0; i < 500; i++ { resp, err := http.Post("http://localhost:8286/write?db=mydb", "", bytes.NewBuffer([]byte(testMsgs))) require.NoError(t, err) require.EqualValues(t, 204, resp.StatusCode) } - wg.Done() - }() + }(&wg) } wg.Wait() - time.Sleep(time.Millisecond * 50) + time.Sleep(time.Millisecond * 250) listener.Gather(acc) require.Equal(t, int64(25000), int64(acc.NMetrics())) } func TestReceive404ForInvalidEndpoint(t *testing.T) { - listener := newTestHttpListener() - listener.parser, _ = parsers.NewInfluxParser() + listener := newTestHTTPListener() acc := &testutil.Accumulator{} require.NoError(t, listener.Start(acc)) @@ -124,8 +254,7 @@ func TestReceive404ForInvalidEndpoint(t *testing.T) { func TestWriteHTTPInvalid(t *testing.T) { time.Sleep(time.Millisecond * 250) - listener := newTestHttpListener() - listener.parser, _ = parsers.NewInfluxParser() + listener := newTestHTTPListener() acc := &testutil.Accumulator{} require.NoError(t, listener.Start(acc)) @@ -136,14 +265,13 @@ func TestWriteHTTPInvalid(t *testing.T) { // post single message to listener resp, err := http.Post("http://localhost:8186/write?db=mydb", "", bytes.NewBuffer([]byte(badMsg))) require.NoError(t, err) - require.EqualValues(t, 500, resp.StatusCode) + require.EqualValues(t, 400, resp.StatusCode) } func TestWriteHTTPEmpty(t *testing.T) { time.Sleep(time.Millisecond * 250) - listener := newTestHttpListener() - listener.parser, _ = parsers.NewInfluxParser() + listener := newTestHTTPListener() acc := &testutil.Accumulator{} require.NoError(t, listener.Start(acc)) @@ -160,8 +288,7 @@ func TestWriteHTTPEmpty(t *testing.T) { func TestQueryAndPingHTTP(t *testing.T) { time.Sleep(time.Millisecond * 250) - listener := newTestHttpListener() - listener.parser, _ = parsers.NewInfluxParser() + listener := newTestHTTPListener() acc := &testutil.Accumulator{} require.NoError(t, listener.Start(acc)) @@ -179,3 +306,6 @@ func TestQueryAndPingHTTP(t *testing.T) { require.NoError(t, err) require.EqualValues(t, 204, resp.StatusCode) } + +const hugeMetric = `super_long_metric,foo=bar clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i +` diff --git a/plugins/inputs/http_listener/stoppableListener/LICENSE b/plugins/inputs/http_listener/stoppableListener/LICENSE deleted file mode 100644 index eb07824517a4d..0000000000000 --- a/plugins/inputs/http_listener/stoppableListener/LICENSE +++ /dev/null @@ -1,10 +0,0 @@ -Copyright (c) 2014, Eric Urban -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/plugins/inputs/http_listener/stoppableListener/listener.go b/plugins/inputs/http_listener/stoppableListener/listener.go deleted file mode 100644 index 69a9f33cc3b46..0000000000000 --- a/plugins/inputs/http_listener/stoppableListener/listener.go +++ /dev/null @@ -1,62 +0,0 @@ -package stoppableListener - -import ( - "errors" - "net" - "time" -) - -type StoppableListener struct { - *net.TCPListener //Wrapped listener - stop chan int //Channel used only to indicate listener should shutdown -} - -func New(l net.Listener) (*StoppableListener, error) { - tcpL, ok := l.(*net.TCPListener) - - if !ok { - return nil, errors.New("Cannot wrap listener") - } - - retval := &StoppableListener{} - retval.TCPListener = tcpL - retval.stop = make(chan int) - - return retval, nil -} - -var StoppedError = errors.New("Listener stopped") - -func (sl *StoppableListener) Accept() (net.Conn, error) { - - for { - //Wait up to one second for a new connection - sl.SetDeadline(time.Now().Add(time.Second)) - - newConn, err := sl.TCPListener.Accept() - - //Check for the channel being closed - select { - case <-sl.stop: - return nil, StoppedError - default: - //If the channel is still open, continue as normal - } - - if err != nil { - netErr, ok := err.(net.Error) - - //If this is a timeout, then continue to wait for - //new connections - if ok && netErr.Timeout() && netErr.Temporary() { - continue - } - } - - return newConn, err - } -} - -func (sl *StoppableListener) Stop() { - close(sl.stop) -} diff --git a/plugins/inputs/http_listener/testdata/testmsgs.gz b/plugins/inputs/http_listener/testdata/testmsgs.gz new file mode 100644 index 0000000000000000000000000000000000000000..f524dc07128b95fa256b4e0df66bc2b6f04d7058 GIT binary patch literal 97 zcmV-n0G|IJiwFSz6b@Jb14}L_jnBzXOo=bf$S*3<$;dA*u`Nz5DoZUgFj6Q>%qdN^ zH8j#QP%tzxGBP!@Ff}nYH!!j^FfcMT=Ss${*O&smCKTv3r9iJ4A-!Axe?y61Edc-k Db0r~l literal 0 HcmV?d00001 diff --git a/plugins/inputs/influxdb/influxdb.go b/plugins/inputs/influxdb/influxdb.go index 92da50d3ef183..bb11cfee4c9a4 100644 --- a/plugins/inputs/influxdb/influxdb.go +++ b/plugins/inputs/influxdb/influxdb.go @@ -10,11 +10,16 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) type InfluxDB struct { URLs []string `toml:"urls"` + + Timeout internal.Duration + + client *http.Client } func (*InfluxDB) Description() string { @@ -32,6 +37,9 @@ func (*InfluxDB) SampleConfig() string { urls = [ "http://localhost:8086/debug/vars" ] + + ## http request & header timeout + timeout = "5s" ` } @@ -39,6 +47,16 @@ func (i *InfluxDB) Gather(acc telegraf.Accumulator) error { if len(i.URLs) == 0 { i.URLs = []string{"http://localhost:8086/debug/vars"} } + + if i.client == nil { + i.client = &http.Client{ + Transport: &http.Transport{ + ResponseHeaderTimeout: i.Timeout.Duration, + }, + Timeout: i.Timeout.Duration, + } + } + errorChannel := make(chan error, len(i.URLs)) var wg sync.WaitGroup @@ -104,15 +122,6 @@ type memstats struct { GCCPUFraction float64 `json:"GCCPUFraction"` } -var tr = &http.Transport{ - ResponseHeaderTimeout: time.Duration(3 * time.Second), -} - -var client = &http.Client{ - Transport: tr, - Timeout: time.Duration(4 * time.Second), -} - // Gathers data from a particular URL // Parameters: // acc : The telegraf Accumulator to use @@ -127,7 +136,7 @@ func (i *InfluxDB) gatherURL( shardCounter := 0 now := time.Now() - resp, err := client.Get(url) + resp, err := i.client.Get(url) if err != nil { return err } @@ -248,6 +257,8 @@ func (i *InfluxDB) gatherURL( func init() { inputs.Add("influxdb", func() telegraf.Input { - return &InfluxDB{} + return &InfluxDB{ + Timeout: internal.Duration{Duration: time.Second * 5}, + } }) } diff --git a/plugins/inputs/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go index bdfce17f981b1..52117759dd5ad 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer.go @@ -90,7 +90,7 @@ func (k *Kafka) Start(acc telegraf.Accumulator) error { case "newest": config.Offsets.Initial = sarama.OffsetNewest default: - log.Printf("WARNING: Kafka consumer invalid offset '%s', using 'oldest'\n", + log.Printf("I! WARNING: Kafka consumer invalid offset '%s', using 'oldest'\n", k.Offset) config.Offsets.Initial = sarama.OffsetOldest } @@ -115,7 +115,7 @@ func (k *Kafka) Start(acc telegraf.Accumulator) error { // Start the kafka message reader go k.receiver() - log.Printf("Started the kafka consumer service, peers: %v, topics: %v\n", + log.Printf("I! Started the kafka consumer service, peers: %v, topics: %v\n", k.ZookeeperPeers, k.Topics) return nil } @@ -129,12 +129,12 @@ func (k *Kafka) receiver() { return case err := <-k.errs: if err != nil { - log.Printf("Kafka Consumer Error: %s\n", err) + log.Printf("E! Kafka Consumer Error: %s\n", err) } case msg := <-k.in: metrics, err := k.parser.Parse(msg.Value) if err != nil { - log.Printf("KAFKA PARSE ERROR\nmessage: %s\nerror: %s", + log.Printf("E! Kafka Message Parse Error\nmessage: %s\nerror: %s", string(msg.Value), err.Error()) } @@ -158,7 +158,7 @@ func (k *Kafka) Stop() { defer k.Unlock() close(k.done) if err := k.Consumer.Close(); err != nil { - log.Printf("Error closing kafka consumer: %s\n", err.Error()) + log.Printf("E! Error closing kafka consumer: %s\n", err.Error()) } } diff --git a/plugins/inputs/kubernetes/README.md b/plugins/inputs/kubernetes/README.md new file mode 100644 index 0000000000000..099cf152650df --- /dev/null +++ b/plugins/inputs/kubernetes/README.md @@ -0,0 +1,265 @@ +# Kubernetes Input Plugin + +**This plugin is experimental and may cause high cardinality issues with moderate to large Kubernetes deployments** + +This input plugin talks to the kubelet api using the `/stats/summary` endpoint to gather metrics about the running pods and containers for a single host. It is assumed that this plugin is running as part of a `daemonset` within a kubernetes installation. This means that telegraf is running on every node within the cluster. Therefore, you should configure this plugin to talk to its locally running kubelet. + +To find the ip address of the host you are running on you can issue a command like the following: +``` +$ curl -s $API_URL/api/v1/namespaces/$POD_NAMESPACE/pods/$HOSTNAME --header "Authorization: Bearer $TOKEN" --insecure | jq -r '.status.hostIP' +``` +In this case we used the downward API to pass in the `$POD_NAMESPACE` and `$HOSTNAME` is the hostname of the pod which is set by the kubernetes API. + +## Summary Data + +```json +{ + "node": { + "nodeName": "node1", + "systemContainers": [ + { + "name": "kubelet", + "startTime": "2016-08-25T18:46:52Z", + "cpu": { + "time": "2016-09-27T16:57:31Z", + "usageNanoCores": 56652446, + "usageCoreNanoSeconds": 101437561712262 + }, + "memory": { + "time": "2016-09-27T16:57:31Z", + "usageBytes": 62529536, + "workingSetBytes": 62349312, + "rssBytes": 47509504, + "pageFaults": 4769397409, + "majorPageFaults": 13 + }, + "rootfs": { + "availableBytes": 84379979776, + "capacityBytes": 105553100800 + }, + "logs": { + "availableBytes": 84379979776, + "capacityBytes": 105553100800 + }, + "userDefinedMetrics": null + }, + { + "name": "bar", + "startTime": "2016-08-25T18:46:52Z", + "cpu": { + "time": "2016-09-27T16:57:31Z", + "usageNanoCores": 56652446, + "usageCoreNanoSeconds": 101437561712262 + }, + "memory": { + "time": "2016-09-27T16:57:31Z", + "usageBytes": 62529536, + "workingSetBytes": 62349312, + "rssBytes": 47509504, + "pageFaults": 4769397409, + "majorPageFaults": 13 + }, + "rootfs": { + "availableBytes": 84379979776, + "capacityBytes": 105553100800 + }, + "logs": { + "availableBytes": 84379979776, + "capacityBytes": 105553100800 + }, + "userDefinedMetrics": null + } + ], + "startTime": "2016-08-25T18:46:52Z", + "cpu": { + "time": "2016-09-27T16:57:41Z", + "usageNanoCores": 576996212, + "usageCoreNanoSeconds": 774129887054161 + }, + "memory": { + "time": "2016-09-27T16:57:41Z", + "availableBytes": 10726387712, + "usageBytes": 12313182208, + "workingSetBytes": 5081538560, + "rssBytes": 35586048, + "pageFaults": 351742, + "majorPageFaults": 1236 + }, + "network": { + "time": "2016-09-27T16:57:41Z", + "rxBytes": 213281337459, + "rxErrors": 0, + "txBytes": 292869995684, + "txErrors": 0 + }, + "fs": { + "availableBytes": 84379979776, + "capacityBytes": 105553100800, + "usedBytes": 16754286592 + }, + "runtime": { + "imageFs": { + "availableBytes": 84379979776, + "capacityBytes": 105553100800, + "usedBytes": 5809371475 + } + } + }, + "pods": [ + { + "podRef": { + "name": "foopod", + "namespace": "foons", + "uid": "6d305b06-8419-11e6-825c-42010af000ae" + }, + "startTime": "2016-09-26T18:45:42Z", + "containers": [ + { + "name": "foocontainer", + "startTime": "2016-09-26T18:46:43Z", + "cpu": { + "time": "2016-09-27T16:57:32Z", + "usageNanoCores": 846503, + "usageCoreNanoSeconds": 56507553554 + }, + "memory": { + "time": "2016-09-27T16:57:32Z", + "usageBytes": 30789632, + "workingSetBytes": 30789632, + "rssBytes": 30695424, + "pageFaults": 10761, + "majorPageFaults": 0 + }, + "rootfs": { + "availableBytes": 84379979776, + "capacityBytes": 105553100800, + "usedBytes": 57344 + }, + "logs": { + "availableBytes": 84379979776, + "capacityBytes": 105553100800, + "usedBytes": 24576 + }, + "userDefinedMetrics": null + } + ], + "network": { + "time": "2016-09-27T16:57:34Z", + "rxBytes": 70749124, + "rxErrors": 0, + "txBytes": 47813506, + "txErrors": 0 + }, + "volume": [ + { + "availableBytes": 7903948800, + "capacityBytes": 7903961088, + "usedBytes": 12288, + "name": "volume1" + }, + { + "availableBytes": 7903956992, + "capacityBytes": 7903961088, + "usedBytes": 4096, + "name": "volume2" + }, + { + "availableBytes": 7903948800, + "capacityBytes": 7903961088, + "usedBytes": 12288, + "name": "volume3" + }, + { + "availableBytes": 7903952896, + "capacityBytes": 7903961088, + "usedBytes": 8192, + "name": "volume4" + } + ] + } + ] + } + ``` + + ### Daemonset YAML + +```yaml +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: telegraf + namespace: telegraf +spec: + template: + metadata: + labels: + app: telegraf + spec: + serviceAccount: telegraf + containers: + - name: telegraf + image: quay.io/org/image:latest + imagePullPolicy: IfNotPresent + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: "HOST_PROC" + value: "/rootfs/proc" + - name: "HOST_SYS" + value: "/rootfs/sys" + volumeMounts: + - name: sysro + mountPath: /rootfs/sys + readOnly: true + - name: procro + mountPath: /rootfs/proc + readOnly: true + - name: varrunutmpro + mountPath: /var/run/utmp + readOnly: true + - name: logger-redis-creds + mountPath: /var/run/secrets/deis/redis/creds + volumes: + - name: sysro + hostPath: + path: /sys + - name: procro + hostPath: + path: /proc + - name: varrunutmpro + hostPath: + path: /var/run/utmp +``` + +### Line Protocol + +#### kubernetes_pod_container +``` +kubernetes_pod_container,host=ip-10-0-0-0.ec2.internal, +container_name=deis-controller,namespace=deis, +node_name=ip-10-0-0-0.ec2.internal, pod_name=deis-controller-3058870187-xazsr, cpu_usage_core_nanoseconds=2432835i,cpu_usage_nanocores=0i, +logsfs_avaialble_bytes=121128271872i,logsfs_capacity_bytes=153567944704i, +logsfs_used_bytes=20787200i,memory_major_page_faults=0i, +memory_page_faults=175i,memory_rss_bytes=0i, +memory_usage_bytes=0i,memory_working_set_bytes=0i, +rootfs_available_bytes=121128271872i,rootfs_capacity_bytes=153567944704i, +rootfs_used_bytes=1110016i 1476477530000000000 + ``` + +#### kubernetes_pod_volume +``` +kubernetes_pod_volume,host=ip-10-0-0-0.ec2.internal,name=default-token-f7wts, +namespace=kube-system,node_name=ip-10-0-0-0.ec2.internal, +pod_name=kubernetes-dashboard-v1.1.1-t4x4t, available_bytes=8415240192i, +capacity_bytes=8415252480i,used_bytes=12288i 1476477530000000000 +``` + +#### kubernetes_pod_network +``` +kubernetes_pod_network,host=ip-10-0-0-0.ec2.internal,namespace=deis, +node_name=ip-10-0-0-0.ec2.internal,pod_name=deis-controller-3058870187-xazsr, +rx_bytes=120671099i,rx_errors=0i, +tx_bytes=102451983i,tx_errors=0i 1476477530000000000 +``` diff --git a/plugins/inputs/kubernetes/kubernetes.go b/plugins/inputs/kubernetes/kubernetes.go new file mode 100644 index 0000000000000..ee95d560f426c --- /dev/null +++ b/plugins/inputs/kubernetes/kubernetes.go @@ -0,0 +1,242 @@ +package kubernetes + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/errchan" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// Kubernetes represents the config object for the plugin +type Kubernetes struct { + URL string + + // Bearer Token authorization file path + BearerToken string `toml:"bearer_token"` + + // Path to CA file + SSLCA string `toml:"ssl_ca"` + // Path to host cert file + SSLCert string `toml:"ssl_cert"` + // Path to cert key file + SSLKey string `toml:"ssl_key"` + // Use SSL but skip chain & host verification + InsecureSkipVerify bool + + RoundTripper http.RoundTripper +} + +var sampleConfig = ` + ## URL for the kubelet + url = "http://1.1.1.1:10255" + + ## Use bearer token for authorization + # bearer_token = /path/to/bearer/token + + ## Optional SSL Config + # ssl_ca = /path/to/cafile + # ssl_cert = /path/to/certfile + # ssl_key = /path/to/keyfile + ## Use SSL but skip chain & host verification + # insecure_skip_verify = false +` + +const ( + summaryEndpoint = `%s/stats/summary` +) + +func init() { + inputs.Add("kubernetes", func() telegraf.Input { + return &Kubernetes{} + }) +} + +//SampleConfig returns a sample config +func (k *Kubernetes) SampleConfig() string { + return sampleConfig +} + +//Description returns the description of this plugin +func (k *Kubernetes) Description() string { + return "Read metrics from the kubernetes kubelet api" +} + +//Gather collects kubernetes metrics from a given URL +func (k *Kubernetes) Gather(acc telegraf.Accumulator) error { + var wg sync.WaitGroup + errChan := errchan.New(1) + wg.Add(1) + go func(k *Kubernetes) { + defer wg.Done() + errChan.C <- k.gatherSummary(k.URL, acc) + }(k) + wg.Wait() + return errChan.Error() +} + +func buildURL(endpoint string, base string) (*url.URL, error) { + u := fmt.Sprintf(endpoint, base) + addr, err := url.Parse(u) + if err != nil { + return nil, fmt.Errorf("Unable to parse address '%s': %s", u, err) + } + return addr, nil +} + +func (k *Kubernetes) gatherSummary(baseURL string, acc telegraf.Accumulator) error { + url := fmt.Sprintf("%s/stats/summary", baseURL) + var req, err = http.NewRequest("GET", url, nil) + var token []byte + var resp *http.Response + + tlsCfg, err := internal.GetTLSConfig(k.SSLCert, k.SSLKey, k.SSLCA, k.InsecureSkipVerify) + if err != nil { + return err + } + + if k.RoundTripper == nil { + k.RoundTripper = &http.Transport{ + TLSHandshakeTimeout: 5 * time.Second, + TLSClientConfig: tlsCfg, + ResponseHeaderTimeout: time.Duration(3 * time.Second), + } + } + + if k.BearerToken != "" { + token, err = ioutil.ReadFile(k.BearerToken) + if err != nil { + return err + } + req.Header.Set("Authorization", "Bearer "+string(token)) + } + + resp, err = k.RoundTripper.RoundTrip(req) + if err != nil { + return fmt.Errorf("error making HTTP request to %s: %s", url, err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("%s returned HTTP status %s", url, resp.Status) + } + + summaryMetrics := &SummaryMetrics{} + err = json.NewDecoder(resp.Body).Decode(summaryMetrics) + if err != nil { + return fmt.Errorf(`Error parsing response: %s`, err) + } + buildSystemContainerMetrics(summaryMetrics, acc) + buildNodeMetrics(summaryMetrics, acc) + buildPodMetrics(summaryMetrics, acc) + return nil +} + +func buildSystemContainerMetrics(summaryMetrics *SummaryMetrics, acc telegraf.Accumulator) { + for _, container := range summaryMetrics.Node.SystemContainers { + tags := map[string]string{ + "node_name": summaryMetrics.Node.NodeName, + "container_name": container.Name, + } + fields := make(map[string]interface{}) + fields["cpu_usage_nanocores"] = container.CPU.UsageNanoCores + fields["cpu_usage_core_nanoseconds"] = container.CPU.UsageCoreNanoSeconds + fields["memory_usage_bytes"] = container.Memory.UsageBytes + fields["memory_working_set_bytes"] = container.Memory.WorkingSetBytes + fields["memory_rss_bytes"] = container.Memory.RSSBytes + fields["memory_page_faults"] = container.Memory.PageFaults + fields["memory_major_page_faults"] = container.Memory.MajorPageFaults + fields["rootfs_available_bytes"] = container.RootFS.AvailableBytes + fields["rootfs_capacity_bytes"] = container.RootFS.CapacityBytes + fields["logsfs_avaialble_bytes"] = container.LogsFS.AvailableBytes + fields["logsfs_capacity_bytes"] = container.LogsFS.CapacityBytes + acc.AddFields("kubernetes_system_container", fields, tags) + } +} + +func buildNodeMetrics(summaryMetrics *SummaryMetrics, acc telegraf.Accumulator) { + tags := map[string]string{ + "node_name": summaryMetrics.Node.NodeName, + } + fields := make(map[string]interface{}) + fields["cpu_usage_nanocores"] = summaryMetrics.Node.CPU.UsageNanoCores + fields["cpu_usage_core_nanoseconds"] = summaryMetrics.Node.CPU.UsageCoreNanoSeconds + fields["memory_available_bytes"] = summaryMetrics.Node.Memory.AvailableBytes + fields["memory_usage_bytes"] = summaryMetrics.Node.Memory.UsageBytes + fields["memory_working_set_bytes"] = summaryMetrics.Node.Memory.WorkingSetBytes + fields["memory_rss_bytes"] = summaryMetrics.Node.Memory.RSSBytes + fields["memory_page_faults"] = summaryMetrics.Node.Memory.PageFaults + fields["memory_major_page_faults"] = summaryMetrics.Node.Memory.MajorPageFaults + fields["network_rx_bytes"] = summaryMetrics.Node.Network.RXBytes + fields["network_rx_errors"] = summaryMetrics.Node.Network.RXErrors + fields["network_tx_bytes"] = summaryMetrics.Node.Network.TXBytes + fields["network_tx_errors"] = summaryMetrics.Node.Network.TXErrors + fields["fs_available_bytes"] = summaryMetrics.Node.FileSystem.AvailableBytes + fields["fs_capacity_bytes"] = summaryMetrics.Node.FileSystem.CapacityBytes + fields["fs_used_bytes"] = summaryMetrics.Node.FileSystem.UsedBytes + fields["runtime_image_fs_available_bytes"] = summaryMetrics.Node.Runtime.ImageFileSystem.AvailableBytes + fields["runtime_image_fs_capacity_bytes"] = summaryMetrics.Node.Runtime.ImageFileSystem.CapacityBytes + fields["runtime_image_fs_used_bytes"] = summaryMetrics.Node.Runtime.ImageFileSystem.UsedBytes + acc.AddFields("kubernetes_node", fields, tags) +} + +func buildPodMetrics(summaryMetrics *SummaryMetrics, acc telegraf.Accumulator) { + for _, pod := range summaryMetrics.Pods { + for _, container := range pod.Containers { + tags := map[string]string{ + "node_name": summaryMetrics.Node.NodeName, + "namespace": pod.PodRef.Namespace, + "container_name": container.Name, + "pod_name": pod.PodRef.Name, + } + fields := make(map[string]interface{}) + fields["cpu_usage_nanocores"] = container.CPU.UsageNanoCores + fields["cpu_usage_core_nanoseconds"] = container.CPU.UsageCoreNanoSeconds + fields["memory_usage_bytes"] = container.Memory.UsageBytes + fields["memory_working_set_bytes"] = container.Memory.WorkingSetBytes + fields["memory_rss_bytes"] = container.Memory.RSSBytes + fields["memory_page_faults"] = container.Memory.PageFaults + fields["memory_major_page_faults"] = container.Memory.MajorPageFaults + fields["rootfs_available_bytes"] = container.RootFS.AvailableBytes + fields["rootfs_capacity_bytes"] = container.RootFS.CapacityBytes + fields["rootfs_used_bytes"] = container.RootFS.UsedBytes + fields["logsfs_avaialble_bytes"] = container.LogsFS.AvailableBytes + fields["logsfs_capacity_bytes"] = container.LogsFS.CapacityBytes + fields["logsfs_used_bytes"] = container.LogsFS.UsedBytes + acc.AddFields("kubernetes_pod_container", fields, tags) + } + + for _, volume := range pod.Volumes { + tags := map[string]string{ + "node_name": summaryMetrics.Node.NodeName, + "pod_name": pod.PodRef.Name, + "namespace": pod.PodRef.Namespace, + "volume_name": volume.Name, + } + fields := make(map[string]interface{}) + fields["available_bytes"] = volume.AvailableBytes + fields["capacity_bytes"] = volume.CapacityBytes + fields["used_bytes"] = volume.UsedBytes + acc.AddFields("kubernetes_pod_volume", fields, tags) + } + + tags := map[string]string{ + "node_name": summaryMetrics.Node.NodeName, + "pod_name": pod.PodRef.Name, + "namespace": pod.PodRef.Namespace, + } + fields := make(map[string]interface{}) + fields["rx_bytes"] = pod.Network.RXBytes + fields["rx_errors"] = pod.Network.RXErrors + fields["tx_bytes"] = pod.Network.TXBytes + fields["tx_errors"] = pod.Network.TXErrors + acc.AddFields("kubernetes_pod_network", fields, tags) + } +} diff --git a/plugins/inputs/kubernetes/kubernetes_metrics.go b/plugins/inputs/kubernetes/kubernetes_metrics.go new file mode 100644 index 0000000000000..a767a604a0733 --- /dev/null +++ b/plugins/inputs/kubernetes/kubernetes_metrics.go @@ -0,0 +1,93 @@ +package kubernetes + +import "time" + +// SummaryMetrics represents all the summary data about a paritcular node retrieved from a kubelet +type SummaryMetrics struct { + Node NodeMetrics `json:"node"` + Pods []PodMetrics `json:"pods"` +} + +// NodeMetrics represents detailed information about a node +type NodeMetrics struct { + NodeName string `json:"nodeName"` + SystemContainers []ContainerMetrics `json:"systemContainers"` + StartTime time.Time `json:"startTime"` + CPU CPUMetrics `json:"cpu"` + Memory MemoryMetrics `json:"memory"` + Network NetworkMetrics `json:"network"` + FileSystem FileSystemMetrics `json:"fs"` + Runtime RuntimeMetrics `json:"runtime"` +} + +// ContainerMetrics represents the metric data collect about a container from the kubelet +type ContainerMetrics struct { + Name string `json:"name"` + StartTime time.Time `json:"startTime"` + CPU CPUMetrics `json:"cpu"` + Memory MemoryMetrics `json:"memory"` + RootFS FileSystemMetrics `json:"rootfs"` + LogsFS FileSystemMetrics `json:"logs"` +} + +// RuntimeMetrics contains metric data on the runtime of the system +type RuntimeMetrics struct { + ImageFileSystem FileSystemMetrics `json:"imageFs"` +} + +// CPUMetrics represents the cpu usage data of a pod or node +type CPUMetrics struct { + Time time.Time `json:"time"` + UsageNanoCores int64 `json:"usageNanoCores"` + UsageCoreNanoSeconds int64 `json:"usageCoreNanoSeconds"` +} + +// PodMetrics contains metric data on a given pod +type PodMetrics struct { + PodRef PodReference `json:"podRef"` + StartTime time.Time `json:"startTime"` + Containers []ContainerMetrics `json:"containers"` + Network NetworkMetrics `json:"network"` + Volumes []VolumeMetrics `json:"volume"` +} + +// PodReference is how a pod is identified +type PodReference struct { + Name string `json:"name"` + Namespace string `json:"namespace"` +} + +// MemoryMetrics represents the memory metrics for a pod or node +type MemoryMetrics struct { + Time time.Time `json:"time"` + AvailableBytes int64 `json:"availableBytes"` + UsageBytes int64 `json:"usageBytes"` + WorkingSetBytes int64 `json:"workingSetBytes"` + RSSBytes int64 `json:"rssBytes"` + PageFaults int64 `json:"pageFaults"` + MajorPageFaults int64 `json:"majorPageFaults"` +} + +// FileSystemMetrics represents disk usage metrics for a pod or node +type FileSystemMetrics struct { + AvailableBytes int64 `json:"availableBytes"` + CapacityBytes int64 `json:"capacityBytes"` + UsedBytes int64 `json:"usedBytes"` +} + +// NetworkMetrics represents network usage data for a pod or node +type NetworkMetrics struct { + Time time.Time `json:"time"` + RXBytes int64 `json:"rxBytes"` + RXErrors int64 `json:"rxErrors"` + TXBytes int64 `json:"txBytes"` + TXErrors int64 `json:"txErrors"` +} + +// VolumeMetrics represents the disk usage data for a given volume +type VolumeMetrics struct { + Name string `json:"name"` + AvailableBytes int64 `json:"availableBytes"` + CapacityBytes int64 `json:"capacityBytes"` + UsedBytes int64 `json:"usedBytes"` +} diff --git a/plugins/inputs/kubernetes/kubernetes_test.go b/plugins/inputs/kubernetes/kubernetes_test.go new file mode 100644 index 0000000000000..14134c150afde --- /dev/null +++ b/plugins/inputs/kubernetes/kubernetes_test.go @@ -0,0 +1,289 @@ +package kubernetes + +import ( + "fmt" + "net/http" + "net/http/httptest" + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestKubernetesStats(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + fmt.Fprintln(w, response) + })) + defer ts.Close() + + k := &Kubernetes{ + URL: ts.URL, + } + + var acc testutil.Accumulator + err := k.Gather(&acc) + require.NoError(t, err) + + fields := map[string]interface{}{ + "cpu_usage_nanocores": int64(56652446), + "cpu_usage_core_nanoseconds": int64(101437561712262), + "memory_usage_bytes": int64(62529536), + "memory_working_set_bytes": int64(62349312), + "memory_rss_bytes": int64(47509504), + "memory_page_faults": int64(4769397409), + "memory_major_page_faults": int64(13), + "rootfs_available_bytes": int64(84379979776), + "rootfs_capacity_bytes": int64(105553100800), + "logsfs_avaialble_bytes": int64(84379979776), + "logsfs_capacity_bytes": int64(105553100800), + } + tags := map[string]string{ + "node_name": "node1", + "container_name": "kubelet", + } + acc.AssertContainsTaggedFields(t, "kubernetes_system_container", fields, tags) + + fields = map[string]interface{}{ + "cpu_usage_nanocores": int64(576996212), + "cpu_usage_core_nanoseconds": int64(774129887054161), + "memory_usage_bytes": int64(12313182208), + "memory_working_set_bytes": int64(5081538560), + "memory_rss_bytes": int64(35586048), + "memory_page_faults": int64(351742), + "memory_major_page_faults": int64(1236), + "memory_available_bytes": int64(10726387712), + "network_rx_bytes": int64(213281337459), + "network_rx_errors": int64(0), + "network_tx_bytes": int64(292869995684), + "network_tx_errors": int64(0), + "fs_available_bytes": int64(84379979776), + "fs_capacity_bytes": int64(105553100800), + "fs_used_bytes": int64(16754286592), + "runtime_image_fs_available_bytes": int64(84379979776), + "runtime_image_fs_capacity_bytes": int64(105553100800), + "runtime_image_fs_used_bytes": int64(5809371475), + } + tags = map[string]string{ + "node_name": "node1", + } + acc.AssertContainsTaggedFields(t, "kubernetes_node", fields, tags) + + fields = map[string]interface{}{ + "cpu_usage_nanocores": int64(846503), + "cpu_usage_core_nanoseconds": int64(56507553554), + "memory_usage_bytes": int64(30789632), + "memory_working_set_bytes": int64(30789632), + "memory_rss_bytes": int64(30695424), + "memory_page_faults": int64(10761), + "memory_major_page_faults": int64(0), + "rootfs_available_bytes": int64(84379979776), + "rootfs_capacity_bytes": int64(105553100800), + "rootfs_used_bytes": int64(57344), + "logsfs_avaialble_bytes": int64(84379979776), + "logsfs_capacity_bytes": int64(105553100800), + "logsfs_used_bytes": int64(24576), + } + tags = map[string]string{ + "node_name": "node1", + "container_name": "foocontainer", + "namespace": "foons", + "pod_name": "foopod", + } + acc.AssertContainsTaggedFields(t, "kubernetes_pod_container", fields, tags) + + fields = map[string]interface{}{ + "available_bytes": int64(7903948800), + "capacity_bytes": int64(7903961088), + "used_bytes": int64(12288), + } + tags = map[string]string{ + "node_name": "node1", + "volume_name": "volume1", + "namespace": "foons", + "pod_name": "foopod", + } + acc.AssertContainsTaggedFields(t, "kubernetes_pod_volume", fields, tags) + + fields = map[string]interface{}{ + "rx_bytes": int64(70749124), + "rx_errors": int64(0), + "tx_bytes": int64(47813506), + "tx_errors": int64(0), + } + tags = map[string]string{ + "node_name": "node1", + "namespace": "foons", + "pod_name": "foopod", + } + acc.AssertContainsTaggedFields(t, "kubernetes_pod_network", fields, tags) + +} + +var response = ` +{ + "node": { + "nodeName": "node1", + "systemContainers": [ + { + "name": "kubelet", + "startTime": "2016-08-25T18:46:52Z", + "cpu": { + "time": "2016-09-27T16:57:31Z", + "usageNanoCores": 56652446, + "usageCoreNanoSeconds": 101437561712262 + }, + "memory": { + "time": "2016-09-27T16:57:31Z", + "usageBytes": 62529536, + "workingSetBytes": 62349312, + "rssBytes": 47509504, + "pageFaults": 4769397409, + "majorPageFaults": 13 + }, + "rootfs": { + "availableBytes": 84379979776, + "capacityBytes": 105553100800 + }, + "logs": { + "availableBytes": 84379979776, + "capacityBytes": 105553100800 + }, + "userDefinedMetrics": null + }, + { + "name": "bar", + "startTime": "2016-08-25T18:46:52Z", + "cpu": { + "time": "2016-09-27T16:57:31Z", + "usageNanoCores": 56652446, + "usageCoreNanoSeconds": 101437561712262 + }, + "memory": { + "time": "2016-09-27T16:57:31Z", + "usageBytes": 62529536, + "workingSetBytes": 62349312, + "rssBytes": 47509504, + "pageFaults": 4769397409, + "majorPageFaults": 13 + }, + "rootfs": { + "availableBytes": 84379979776, + "capacityBytes": 105553100800 + }, + "logs": { + "availableBytes": 84379979776, + "capacityBytes": 105553100800 + }, + "userDefinedMetrics": null + } + ], + "startTime": "2016-08-25T18:46:52Z", + "cpu": { + "time": "2016-09-27T16:57:41Z", + "usageNanoCores": 576996212, + "usageCoreNanoSeconds": 774129887054161 + }, + "memory": { + "time": "2016-09-27T16:57:41Z", + "availableBytes": 10726387712, + "usageBytes": 12313182208, + "workingSetBytes": 5081538560, + "rssBytes": 35586048, + "pageFaults": 351742, + "majorPageFaults": 1236 + }, + "network": { + "time": "2016-09-27T16:57:41Z", + "rxBytes": 213281337459, + "rxErrors": 0, + "txBytes": 292869995684, + "txErrors": 0 + }, + "fs": { + "availableBytes": 84379979776, + "capacityBytes": 105553100800, + "usedBytes": 16754286592 + }, + "runtime": { + "imageFs": { + "availableBytes": 84379979776, + "capacityBytes": 105553100800, + "usedBytes": 5809371475 + } + } + }, + "pods": [ + { + "podRef": { + "name": "foopod", + "namespace": "foons", + "uid": "6d305b06-8419-11e6-825c-42010af000ae" + }, + "startTime": "2016-09-26T18:45:42Z", + "containers": [ + { + "name": "foocontainer", + "startTime": "2016-09-26T18:46:43Z", + "cpu": { + "time": "2016-09-27T16:57:32Z", + "usageNanoCores": 846503, + "usageCoreNanoSeconds": 56507553554 + }, + "memory": { + "time": "2016-09-27T16:57:32Z", + "usageBytes": 30789632, + "workingSetBytes": 30789632, + "rssBytes": 30695424, + "pageFaults": 10761, + "majorPageFaults": 0 + }, + "rootfs": { + "availableBytes": 84379979776, + "capacityBytes": 105553100800, + "usedBytes": 57344 + }, + "logs": { + "availableBytes": 84379979776, + "capacityBytes": 105553100800, + "usedBytes": 24576 + }, + "userDefinedMetrics": null + } + ], + "network": { + "time": "2016-09-27T16:57:34Z", + "rxBytes": 70749124, + "rxErrors": 0, + "txBytes": 47813506, + "txErrors": 0 + }, + "volume": [ + { + "availableBytes": 7903948800, + "capacityBytes": 7903961088, + "usedBytes": 12288, + "name": "volume1" + }, + { + "availableBytes": 7903956992, + "capacityBytes": 7903961088, + "usedBytes": 4096, + "name": "volume2" + }, + { + "availableBytes": 7903948800, + "capacityBytes": 7903961088, + "usedBytes": 12288, + "name": "volume3" + }, + { + "availableBytes": 7903952896, + "capacityBytes": 7903961088, + "usedBytes": 8192, + "name": "volume4" + } + ] + } + ] + }` diff --git a/plugins/inputs/logparser/grok/grok.go b/plugins/inputs/logparser/grok/grok.go index 70b75982622e4..b2cabe642fd2d 100644 --- a/plugins/inputs/logparser/grok/grok.go +++ b/plugins/inputs/logparser/grok/grok.go @@ -202,21 +202,21 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { case INT: iv, err := strconv.ParseInt(v, 10, 64) if err != nil { - log.Printf("ERROR parsing %s to int: %s", v, err) + log.Printf("E! Error parsing %s to int: %s", v, err) } else { fields[k] = iv } case FLOAT: fv, err := strconv.ParseFloat(v, 64) if err != nil { - log.Printf("ERROR parsing %s to float: %s", v, err) + log.Printf("E! Error parsing %s to float: %s", v, err) } else { fields[k] = fv } case DURATION: d, err := time.ParseDuration(v) if err != nil { - log.Printf("ERROR parsing %s to duration: %s", v, err) + log.Printf("E! Error parsing %s to duration: %s", v, err) } else { fields[k] = int64(d) } @@ -227,14 +227,14 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { case EPOCH: iv, err := strconv.ParseInt(v, 10, 64) if err != nil { - log.Printf("ERROR parsing %s to int: %s", v, err) + log.Printf("E! Error parsing %s to int: %s", v, err) } else { timestamp = time.Unix(iv, 0) } case EPOCH_NANO: iv, err := strconv.ParseInt(v, 10, 64) if err != nil { - log.Printf("ERROR parsing %s to int: %s", v, err) + log.Printf("E! Error parsing %s to int: %s", v, err) } else { timestamp = time.Unix(0, iv) } @@ -265,7 +265,7 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { // if we still haven't found a timestamp layout, log it and we will // just use time.Now() if !foundTs { - log.Printf("ERROR parsing timestamp [%s], could not find any "+ + log.Printf("E! Error parsing timestamp [%s], could not find any "+ "suitable time layouts.", v) } case DROP: @@ -275,7 +275,7 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { if err == nil { timestamp = ts } else { - log.Printf("ERROR parsing %s to time layout [%s]: %s", v, t, err) + log.Printf("E! Error parsing %s to time layout [%s]: %s", v, t, err) } } } diff --git a/plugins/inputs/logparser/grok/grok_test.go b/plugins/inputs/logparser/grok/grok_test.go index bc8d980f2951b..105cc048c00a1 100644 --- a/plugins/inputs/logparser/grok/grok_test.go +++ b/plugins/inputs/logparser/grok/grok_test.go @@ -152,6 +152,31 @@ func TestBuiltinCommonLogFormat(t *testing.T) { assert.Equal(t, map[string]string{"verb": "GET", "resp_code": "200"}, m.Tags()) } +// common log format +// 127.0.0.1 user1234 frank1234 [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326 +func TestBuiltinCommonLogFormatWithNumbers(t *testing.T) { + p := &Parser{ + Patterns: []string{"%{COMMON_LOG_FORMAT}"}, + } + assert.NoError(t, p.Compile()) + + // Parse an influxdb POST request + m, err := p.ParseLine(`127.0.0.1 user1234 frank1234 [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326`) + require.NotNil(t, m) + assert.NoError(t, err) + assert.Equal(t, + map[string]interface{}{ + "resp_bytes": int64(2326), + "auth": "frank1234", + "client_ip": "127.0.0.1", + "http_version": float64(1.0), + "ident": "user1234", + "request": "/apache_pb.gif", + }, + m.Fields()) + assert.Equal(t, map[string]string{"verb": "GET", "resp_code": "200"}, m.Tags()) +} + // combined log format // 127.0.0.1 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326 "-" "Mozilla" func TestBuiltinCombinedLogFormat(t *testing.T) { diff --git a/plugins/inputs/logparser/grok/influx_patterns.go b/plugins/inputs/logparser/grok/influx_patterns.go index ff9d60ebfb242..0527911404b84 100644 --- a/plugins/inputs/logparser/grok/influx_patterns.go +++ b/plugins/inputs/logparser/grok/influx_patterns.go @@ -53,7 +53,7 @@ RESPONSE_TIME %{DURATION:response_time_ns:duration} EXAMPLE_LOG \[%{HTTPDATE:ts:ts-httpd}\] %{NUMBER:myfloat:float} %{RESPONSE_CODE} %{IPORHOST:clientip} %{RESPONSE_TIME} # Wider-ranging username matching vs. logstash built-in %{USER} -NGUSERNAME [a-zA-Z\.\@\-\+_%]+ +NGUSERNAME [a-zA-Z0-9\.\@\-\+_%]+ NGUSER %{NGUSERNAME} # Wider-ranging client IP matching CLIENT (?:%{IPORHOST}|%{HOSTPORT}|::1) @@ -64,7 +64,7 @@ CLIENT (?:%{IPORHOST}|%{HOSTPORT}|::1) # apache & nginx logs, this is also known as the "common log format" # see https://en.wikipedia.org/wiki/Common_Log_Format -COMMON_LOG_FORMAT %{CLIENT:client_ip} %{NGUSER:ident} %{NGUSER:auth} \[%{HTTPDATE:ts:ts-httpd}\] "(?:%{WORD:verb:tag} %{NOTSPACE:request}(?: HTTP/%{NUMBER:http_version:float})?|%{DATA})" %{NUMBER:resp_code:tag} (?:%{NUMBER:resp_bytes:int}|-) +COMMON_LOG_FORMAT %{CLIENT:client_ip} %{NOTSPACE:ident} %{NOTSPACE:auth} \[%{HTTPDATE:ts:ts-httpd}\] "(?:%{WORD:verb:tag} %{NOTSPACE:request}(?: HTTP/%{NUMBER:http_version:float})?|%{DATA})" %{NUMBER:resp_code:tag} (?:%{NUMBER:resp_bytes:int}|-) # Combined log format is the same as the common log format but with the addition # of two quoted strings at the end for "referrer" and "agent" diff --git a/plugins/inputs/logparser/grok/patterns/influx-patterns b/plugins/inputs/logparser/grok/patterns/influx-patterns index 6f4d81f895425..931b61bc8985f 100644 --- a/plugins/inputs/logparser/grok/patterns/influx-patterns +++ b/plugins/inputs/logparser/grok/patterns/influx-patterns @@ -49,7 +49,7 @@ RESPONSE_TIME %{DURATION:response_time_ns:duration} EXAMPLE_LOG \[%{HTTPDATE:ts:ts-httpd}\] %{NUMBER:myfloat:float} %{RESPONSE_CODE} %{IPORHOST:clientip} %{RESPONSE_TIME} # Wider-ranging username matching vs. logstash built-in %{USER} -NGUSERNAME [a-zA-Z\.\@\-\+_%]+ +NGUSERNAME [a-zA-Z0-9\.\@\-\+_%]+ NGUSER %{NGUSERNAME} # Wider-ranging client IP matching CLIENT (?:%{IPORHOST}|%{HOSTPORT}|::1) @@ -60,7 +60,7 @@ CLIENT (?:%{IPORHOST}|%{HOSTPORT}|::1) # apache & nginx logs, this is also known as the "common log format" # see https://en.wikipedia.org/wiki/Common_Log_Format -COMMON_LOG_FORMAT %{CLIENT:client_ip} %{NGUSER:ident} %{NGUSER:auth} \[%{HTTPDATE:ts:ts-httpd}\] "(?:%{WORD:verb:tag} %{NOTSPACE:request}(?: HTTP/%{NUMBER:http_version:float})?|%{DATA})" %{NUMBER:resp_code:tag} (?:%{NUMBER:resp_bytes:int}|-) +COMMON_LOG_FORMAT %{CLIENT:client_ip} %{NOTSPACE:ident} %{NOTSPACE:auth} \[%{HTTPDATE:ts:ts-httpd}\] "(?:%{WORD:verb:tag} %{NOTSPACE:request}(?: HTTP/%{NUMBER:http_version:float})?|%{DATA})" %{NUMBER:resp_code:tag} (?:%{NUMBER:resp_bytes:int}|-) # Combined log format is the same as the common log format but with the addition # of two quoted strings at the end for "referrer" and "agent" diff --git a/plugins/inputs/logparser/logparser.go b/plugins/inputs/logparser/logparser.go index 8ded03edc3ebd..0778a8a6d7ab2 100644 --- a/plugins/inputs/logparser/logparser.go +++ b/plugins/inputs/logparser/logparser.go @@ -134,7 +134,7 @@ func (l *LogParserPlugin) Start(acc telegraf.Accumulator) error { for _, filepath := range l.Files { g, err := globpath.Compile(filepath) if err != nil { - log.Printf("ERROR Glob %s failed to compile, %s", filepath, err) + log.Printf("E! Error Glob %s failed to compile, %s", filepath, err) continue } files := g.Match() @@ -167,7 +167,7 @@ func (l *LogParserPlugin) receiver(tailer *tail.Tail) { var line *tail.Line for line = range tailer.Lines { if line.Err != nil { - log.Printf("ERROR tailing file %s, Error: %s\n", + log.Printf("E! Error tailing file %s, Error: %s\n", tailer.Filename, line.Err) continue } @@ -216,7 +216,7 @@ func (l *LogParserPlugin) Stop() { for _, t := range l.tailers { err := t.Stop() if err != nil { - log.Printf("ERROR stopping tail on file %s\n", t.Filename) + log.Printf("E! Error stopping tail on file %s\n", t.Filename) } t.Cleanup() } diff --git a/plugins/inputs/mailchimp/chimp_api.go b/plugins/inputs/mailchimp/chimp_api.go index 75c9a30d79c6b..db0004ce2264f 100644 --- a/plugins/inputs/mailchimp/chimp_api.go +++ b/plugins/inputs/mailchimp/chimp_api.go @@ -134,7 +134,7 @@ func runChimp(api *ChimpAPI, params ReportsParams) ([]byte, error) { req.URL.RawQuery = params.String() req.Header.Set("User-Agent", "Telegraf-MailChimp-Plugin") if api.Debug { - log.Printf("Request URL: %s", req.URL.String()) + log.Printf("D! Request URL: %s", req.URL.String()) } resp, err := client.Do(req) @@ -148,7 +148,7 @@ func runChimp(api *ChimpAPI, params ReportsParams) ([]byte, error) { return nil, err } if api.Debug { - log.Printf("Response Body:%s", string(body)) + log.Printf("D! Response Body:%s", string(body)) } if err = chimpErrorCheck(body); err != nil { diff --git a/plugins/inputs/mesos/README.md b/plugins/inputs/mesos/README.md index 9151ff9a2e6a4..575396585f345 100644 --- a/plugins/inputs/mesos/README.md +++ b/plugins/inputs/mesos/README.md @@ -35,13 +35,10 @@ For more information, please check the [Mesos Observability Metrics](http://meso # "tasks", # "messages", # ] - ## Include mesos tasks statistics, default is false - # slave_tasks = true ``` By default this plugin is not configured to gather metrics from mesos. Since a mesos cluster can be deployed in numerous ways it does not provide any default -values. User needs to specify master/slave nodes this plugin will gather metrics from. Additionally, enabling `slave_tasks` will allow -gathering metrics from tasks running on specified slaves (this option is disabled by default). +values. User needs to specify master/slave nodes this plugin will gather metrics from. ### Measurements & Fields: @@ -235,31 +232,6 @@ Mesos slave metric groups - slave/valid_framework_messages - slave/valid_status_updates -Mesos tasks metric groups - -- executor_id -- executor_name -- framework_id -- source -- statistics - - cpus_limit - - cpus_system_time_secs - - cpus_user_time_secs - - mem_anon_bytes - - mem_cache_bytes - - mem_critical_pressure_counter - - mem_file_bytes - - mem_limit_bytes - - mem_low_pressure_counter - - mem_mapped_file_bytes - - mem_medium_pressure_counter - - mem_rss_bytes - - mem_swap_bytes - - mem_total_bytes - - mem_total_memsw_bytes - - mem_unevictable_bytes - - timestamp - ### Tags: - All master/slave measurements have the following tags: @@ -269,16 +241,11 @@ Mesos tasks metric groups - All master measurements have the extra tags: - state (leader/follower) -- Tasks measurements have the following tags: - - server - - framework_id - - task_id - ### Example Output: ``` $ telegraf -config ~/mesos.conf -input-filter mesos -test * Plugin: mesos, Collection 1 -mesos,role=master,state=leader,host=172.17.8.102,server=172.17.8.101 +mesos,role=master,state=leader,host=172.17.8.102,server=172.17.8.101 allocator/event_queue_dispatches=0,master/cpus_percent=0, master/cpus_revocable_percent=0,master/cpus_revocable_total=0, master/cpus_revocable_used=0,master/cpus_total=2, @@ -297,15 +264,3 @@ master/mem_used=0,master/messages_authenticate=0, master/messages_deactivate_framework=0 ... ``` -Meoso tasks metrics (if enabled): -``` -mesos-tasks,host=172.17.8.102,server=172.17.8.101,framework_id=e3060235-c4ed-4765-9d36-784e3beca07f-0000,task_id=hello-world.e4b5b497-2ccd-11e6-a659-0242fb222ce2 -cpus_limit=0.2,cpus_system_time_secs=142.49,cpus_user_time_secs=388.14, -mem_anon_bytes=359129088,mem_cache_bytes=3964928, -mem_critical_pressure_counter=0,mem_file_bytes=3964928, -mem_limit_bytes=767557632,mem_low_pressure_counter=0, -mem_mapped_file_bytes=114688,mem_medium_pressure_counter=0, -mem_rss_bytes=359129088,mem_swap_bytes=0,mem_total_bytes=363094016, -mem_total_memsw_bytes=363094016,mem_unevictable_bytes=0, -timestamp=1465486052.70525 1465486053052811792... -``` diff --git a/plugins/inputs/mesos/mesos.go b/plugins/inputs/mesos/mesos.go index ffcd5969bb32d..e6c68bd7dd321 100644 --- a/plugins/inputs/mesos/mesos.go +++ b/plugins/inputs/mesos/mesos.go @@ -30,7 +30,7 @@ type Mesos struct { MasterCols []string `toml:"master_collections"` Slaves []string SlaveCols []string `toml:"slave_collections"` - SlaveTasks bool + //SlaveTasks bool } var allMetrics = map[Role][]string{ @@ -66,8 +66,6 @@ var sampleConfig = ` # "tasks", # "messages", # ] - ## Include mesos tasks statistics, default is false - # slave_tasks = true ` // SampleConfig returns a sample configuration block @@ -90,7 +88,7 @@ func (m *Mesos) SetDefaults() { } if m.Timeout == 0 { - log.Println("[mesos] Missing timeout value, setting default value (100ms)") + log.Println("I! [mesos] Missing timeout value, setting default value (100ms)") m.Timeout = 100 } } @@ -121,16 +119,16 @@ func (m *Mesos) Gather(acc telegraf.Accumulator) error { return }(v) - if !m.SlaveTasks { - continue - } + // if !m.SlaveTasks { + // continue + // } - wg.Add(1) - go func(c string) { - errorChannel <- m.gatherSlaveTaskMetrics(c, ":5051", acc) - wg.Done() - return - }(v) + // wg.Add(1) + // go func(c string) { + // errorChannel <- m.gatherSlaveTaskMetrics(c, ":5051", acc) + // wg.Done() + // return + // }(v) } wg.Wait() @@ -385,7 +383,7 @@ func getMetrics(role Role, group string) []string { ret, ok := m[group] if !ok { - log.Printf("[mesos] Unkown %s metrics group: %s\n", role, group) + log.Printf("I! [mesos] Unkown %s metrics group: %s\n", role, group) return []string{} } @@ -459,7 +457,6 @@ func (m *Mesos) gatherSlaveTaskMetrics(address string, defaultPort string, acc t } for _, task := range metrics { - tags["task_id"] = task.ExecutorID tags["framework_id"] = task.FrameworkID jf := jsonparser.JSONFlattener{} @@ -468,7 +465,9 @@ func (m *Mesos) gatherSlaveTaskMetrics(address string, defaultPort string, acc t if err != nil { return err } + timestamp := time.Unix(int64(jf.Fields["timestamp"].(float64)), 0) + jf.Fields["executor_id"] = task.ExecutorID acc.AddFields("mesos_tasks", jf.Fields, tags, timestamp) } diff --git a/plugins/inputs/mesos/mesos_test.go b/plugins/inputs/mesos/mesos_test.go index 4ea6f6e161dc1..5c83e294c1c33 100644 --- a/plugins/inputs/mesos/mesos_test.go +++ b/plugins/inputs/mesos/mesos_test.go @@ -9,14 +9,14 @@ import ( "os" "testing" - jsonparser "github.com/influxdata/telegraf/plugins/parsers/json" "github.com/influxdata/telegraf/testutil" ) var masterMetrics map[string]interface{} var masterTestServer *httptest.Server var slaveMetrics map[string]interface{} -var slaveTaskMetrics map[string]interface{} + +// var slaveTaskMetrics map[string]interface{} var slaveTestServer *httptest.Server func randUUID() string { @@ -216,31 +216,31 @@ func generateMetrics() { slaveMetrics[k] = rand.Float64() } - slaveTaskMetrics = map[string]interface{}{ - "executor_id": fmt.Sprintf("task_%s", randUUID()), - "executor_name": "Some task description", - "framework_id": randUUID(), - "source": fmt.Sprintf("task_source_%s", randUUID()), - "statistics": map[string]interface{}{ - "cpus_limit": rand.Float64(), - "cpus_system_time_secs": rand.Float64(), - "cpus_user_time_secs": rand.Float64(), - "mem_anon_bytes": float64(rand.Int63()), - "mem_cache_bytes": float64(rand.Int63()), - "mem_critical_pressure_counter": float64(rand.Int63()), - "mem_file_bytes": float64(rand.Int63()), - "mem_limit_bytes": float64(rand.Int63()), - "mem_low_pressure_counter": float64(rand.Int63()), - "mem_mapped_file_bytes": float64(rand.Int63()), - "mem_medium_pressure_counter": float64(rand.Int63()), - "mem_rss_bytes": float64(rand.Int63()), - "mem_swap_bytes": float64(rand.Int63()), - "mem_total_bytes": float64(rand.Int63()), - "mem_total_memsw_bytes": float64(rand.Int63()), - "mem_unevictable_bytes": float64(rand.Int63()), - "timestamp": rand.Float64(), - }, - } + // slaveTaskMetrics = map[string]interface{}{ + // "executor_id": fmt.Sprintf("task_name.%s", randUUID()), + // "executor_name": "Some task description", + // "framework_id": randUUID(), + // "source": fmt.Sprintf("task_source.%s", randUUID()), + // "statistics": map[string]interface{}{ + // "cpus_limit": rand.Float64(), + // "cpus_system_time_secs": rand.Float64(), + // "cpus_user_time_secs": rand.Float64(), + // "mem_anon_bytes": float64(rand.Int63()), + // "mem_cache_bytes": float64(rand.Int63()), + // "mem_critical_pressure_counter": float64(rand.Int63()), + // "mem_file_bytes": float64(rand.Int63()), + // "mem_limit_bytes": float64(rand.Int63()), + // "mem_low_pressure_counter": float64(rand.Int63()), + // "mem_mapped_file_bytes": float64(rand.Int63()), + // "mem_medium_pressure_counter": float64(rand.Int63()), + // "mem_rss_bytes": float64(rand.Int63()), + // "mem_swap_bytes": float64(rand.Int63()), + // "mem_total_bytes": float64(rand.Int63()), + // "mem_total_memsw_bytes": float64(rand.Int63()), + // "mem_unevictable_bytes": float64(rand.Int63()), + // "timestamp": rand.Float64(), + // }, + // } } func TestMain(m *testing.M) { @@ -260,11 +260,11 @@ func TestMain(m *testing.M) { w.Header().Set("Content-Type", "application/json") json.NewEncoder(w).Encode(slaveMetrics) }) - slaveRouter.HandleFunc("/monitor/statistics", func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode([]map[string]interface{}{slaveTaskMetrics}) - }) + // slaveRouter.HandleFunc("/monitor/statistics", func(w http.ResponseWriter, r *http.Request) { + // w.WriteHeader(http.StatusOK) + // w.Header().Set("Content-Type", "application/json") + // json.NewEncoder(w).Encode([]map[string]interface{}{slaveTaskMetrics}) + // }) slaveTestServer = httptest.NewServer(slaveRouter) rc := m.Run() @@ -324,10 +324,10 @@ func TestMesosSlave(t *testing.T) { var acc testutil.Accumulator m := Mesos{ - Masters: []string{}, - Slaves: []string{slaveTestServer.Listener.Addr().String()}, - SlaveTasks: true, - Timeout: 10, + Masters: []string{}, + Slaves: []string{slaveTestServer.Listener.Addr().String()}, + // SlaveTasks: true, + Timeout: 10, } err := m.Gather(&acc) @@ -338,17 +338,17 @@ func TestMesosSlave(t *testing.T) { acc.AssertContainsFields(t, "mesos", slaveMetrics) - jf := jsonparser.JSONFlattener{} - err = jf.FlattenJSON("", slaveTaskMetrics) - - if err != nil { - t.Errorf(err.Error()) - } - - acc.AssertContainsFields( - t, - "mesos_tasks", - slaveTaskMetrics["statistics"].(map[string]interface{})) + // expectedFields := make(map[string]interface{}, len(slaveTaskMetrics["statistics"].(map[string]interface{}))+1) + // for k, v := range slaveTaskMetrics["statistics"].(map[string]interface{}) { + // expectedFields[k] = v + // } + // expectedFields["executor_id"] = slaveTaskMetrics["executor_id"] + + // acc.AssertContainsTaggedFields( + // t, + // "mesos_tasks", + // expectedFields, + // map[string]string{"server": "127.0.0.1", "framework_id": slaveTaskMetrics["framework_id"].(string)}) } func TestSlaveFilter(t *testing.T) { diff --git a/plugins/inputs/mongodb/mongodb_server.go b/plugins/inputs/mongodb/mongodb_server.go index 863e925225c86..e843c70f0fb7c 100644 --- a/plugins/inputs/mongodb/mongodb_server.go +++ b/plugins/inputs/mongodb/mongodb_server.go @@ -47,7 +47,7 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool) error }, }, result_repl) if err != nil { - log.Println("Not gathering replica set status, member not in replica set (" + err.Error() + ")") + log.Println("E! Not gathering replica set status, member not in replica set (" + err.Error() + ")") } jumbo_chunks, _ := s.Session.DB("config").C("chunks").Find(bson.M{"jumbo": true}).Count() @@ -62,7 +62,7 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool) error names := []string{} names, err = s.Session.DatabaseNames() if err != nil { - log.Println("Error getting database names (" + err.Error() + ")") + log.Println("E! Error getting database names (" + err.Error() + ")") } for _, db_name := range names { db_stat_line := &DbStatsData{} @@ -73,7 +73,7 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool) error }, }, db_stat_line) if err != nil { - log.Println("Error getting db stats from " + db_name + "(" + err.Error() + ")") + log.Println("E! Error getting db stats from " + db_name + "(" + err.Error() + ")") } db := &Db{ Name: db_name, diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer.go b/plugins/inputs/mqtt_consumer/mqtt_consumer.go index beebe00ce3ba5..cfade2944d7e1 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer.go @@ -133,7 +133,7 @@ func (m *MQTTConsumer) Start(acc telegraf.Accumulator) error { return nil } func (m *MQTTConsumer) onConnect(c mqtt.Client) { - log.Printf("MQTT Client Connected") + log.Printf("I! MQTT Client Connected") if !m.PersistentSession || !m.started { topics := make(map[string]byte) for _, topic := range m.Topics { @@ -142,7 +142,7 @@ func (m *MQTTConsumer) onConnect(c mqtt.Client) { subscribeToken := c.SubscribeMultiple(topics, m.recvMessage) subscribeToken.Wait() if subscribeToken.Error() != nil { - log.Printf("MQTT SUBSCRIBE ERROR\ntopics: %s\nerror: %s", + log.Printf("E! MQTT Subscribe Error\ntopics: %s\nerror: %s", strings.Join(m.Topics[:], ","), subscribeToken.Error()) } m.started = true @@ -151,7 +151,7 @@ func (m *MQTTConsumer) onConnect(c mqtt.Client) { } func (m *MQTTConsumer) onConnectionLost(c mqtt.Client, err error) { - log.Printf("MQTT Connection lost\nerror: %s\nMQTT Client will try to reconnect", err.Error()) + log.Printf("E! MQTT Connection lost\nerror: %s\nMQTT Client will try to reconnect", err.Error()) return } @@ -166,7 +166,7 @@ func (m *MQTTConsumer) receiver() { topic := msg.Topic() metrics, err := m.parser.Parse(msg.Payload()) if err != nil { - log.Printf("MQTT PARSE ERROR\nmessage: %s\nerror: %s", + log.Printf("E! MQTT Parse Error\nmessage: %s\nerror: %s", string(msg.Payload()), err.Error()) } diff --git a/plugins/inputs/mysql/mysql.go b/plugins/inputs/mysql/mysql.go index ef39a65ed3fa4..ece5e14477d4e 100644 --- a/plugins/inputs/mysql/mysql.go +++ b/plugins/inputs/mysql/mysql.go @@ -4,16 +4,16 @@ import ( "bytes" "database/sql" "fmt" - "net/url" "strconv" "strings" "sync" "time" - _ "github.com/go-sql-driver/mysql" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal/errchan" "github.com/influxdata/telegraf/plugins/inputs" + + "github.com/go-sql-driver/mysql" ) type Mysql struct { @@ -69,13 +69,13 @@ var sampleConfig = ` ## gather metrics from SHOW BINARY LOGS command output gather_binary_logs = false # - ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMART_BY_TABLE + ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE gather_table_io_waits = false # ## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS gather_table_lock_waits = false # - ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMART_BY_INDEX_USAGE + ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE gather_index_io_waits = false # ## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS @@ -313,6 +313,10 @@ var mappings = []*mapping{ onServer: "wsrep_", inExport: "wsrep_", }, + { + onServer: "Uptime_", + inExport: "uptime_", + }, } var ( @@ -394,27 +398,6 @@ var ( } ) -func dsnAddTimeout(dsn string) (string, error) { - - // DSN "?timeout=5s" is not valid, but "/?timeout=5s" is valid ("" and "/" - // are the same DSN) - if dsn == "" { - dsn = "/" - } - u, err := url.Parse(dsn) - if err != nil { - return "", err - } - v := u.Query() - - // Only override timeout if not already defined - if _, ok := v["timeout"]; ok == false { - v.Add("timeout", defaultTimeout.String()) - u.RawQuery = v.Encode() - } - return u.String(), nil -} - // Math constants const ( picoSeconds = 1e12 @@ -678,10 +661,7 @@ func (m *Mysql) gatherGlobalVariables(db *sql.DB, serv string, acc telegraf.Accu var val sql.RawBytes // parse DSN and save server tag - servtag, err := parseDSN(serv) - if err != nil { - servtag = "localhost" - } + servtag := getDSNTag(serv) tags := map[string]string{"server": servtag} fields := make(map[string]interface{}) for rows.Next() { @@ -718,10 +698,7 @@ func (m *Mysql) gatherSlaveStatuses(db *sql.DB, serv string, acc telegraf.Accumu } defer rows.Close() - servtag, err := parseDSN(serv) - if err != nil { - servtag = "localhost" - } + servtag := getDSNTag(serv) tags := map[string]string{"server": servtag} fields := make(map[string]interface{}) @@ -766,11 +743,7 @@ func (m *Mysql) gatherBinaryLogs(db *sql.DB, serv string, acc telegraf.Accumulat defer rows.Close() // parse DSN and save host as a tag - var servtag string - servtag, err = parseDSN(serv) - if err != nil { - servtag = "localhost" - } + servtag := getDSNTag(serv) tags := map[string]string{"server": servtag} var ( size uint64 = 0 @@ -813,11 +786,7 @@ func (m *Mysql) gatherGlobalStatuses(db *sql.DB, serv string, acc telegraf.Accum } // parse the DSN and save host name as a tag - var servtag string - servtag, err = parseDSN(serv) - if err != nil { - servtag = "localhost" - } + servtag := getDSNTag(serv) tags := map[string]string{"server": servtag} fields := make(map[string]interface{}) for rows.Next() { @@ -928,10 +897,7 @@ func (m *Mysql) GatherProcessListStatuses(db *sql.DB, serv string, acc telegraf. var servtag string fields := make(map[string]interface{}) - servtag, err = parseDSN(serv) - if err != nil { - servtag = "localhost" - } + servtag = getDSNTag(serv) // mapping of state with its counts stateCounts := make(map[string]uint32, len(generalThreadStates)) @@ -974,10 +940,7 @@ func (m *Mysql) gatherPerfTableIOWaits(db *sql.DB, serv string, acc telegraf.Acc timeFetch, timeInsert, timeUpdate, timeDelete float64 ) - servtag, err = parseDSN(serv) - if err != nil { - servtag = "localhost" - } + servtag = getDSNTag(serv) for rows.Next() { err = rows.Scan(&objSchema, &objName, @@ -1026,10 +989,7 @@ func (m *Mysql) gatherPerfIndexIOWaits(db *sql.DB, serv string, acc telegraf.Acc timeFetch, timeInsert, timeUpdate, timeDelete float64 ) - servtag, err = parseDSN(serv) - if err != nil { - servtag = "localhost" - } + servtag = getDSNTag(serv) for rows.Next() { err = rows.Scan(&objSchema, &objName, &indexName, @@ -1081,10 +1041,7 @@ func (m *Mysql) gatherInfoSchemaAutoIncStatuses(db *sql.DB, serv string, acc tel incValue, maxInt uint64 ) - servtag, err := parseDSN(serv) - if err != nil { - servtag = "localhost" - } + servtag := getDSNTag(serv) for rows.Next() { if err := rows.Scan(&schema, &table, &column, &incValue, &maxInt); err != nil { @@ -1128,10 +1085,7 @@ func (m *Mysql) gatherPerfTableLockWaits(db *sql.DB, serv string, acc telegraf.A } defer rows.Close() - servtag, err := parseDSN(serv) - if err != nil { - servtag = "localhost" - } + servtag := getDSNTag(serv) var ( objectSchema string @@ -1253,10 +1207,7 @@ func (m *Mysql) gatherPerfEventWaits(db *sql.DB, serv string, acc telegraf.Accum starCount, timeWait float64 ) - servtag, err := parseDSN(serv) - if err != nil { - servtag = "localhost" - } + servtag := getDSNTag(serv) tags := map[string]string{ "server": servtag, } @@ -1291,10 +1242,7 @@ func (m *Mysql) gatherPerfFileEventsStatuses(db *sql.DB, serv string, acc telegr sumNumBytesRead, sumNumBytesWrite float64 ) - servtag, err := parseDSN(serv) - if err != nil { - servtag = "localhost" - } + servtag := getDSNTag(serv) tags := map[string]string{ "server": servtag, } @@ -1361,10 +1309,7 @@ func (m *Mysql) gatherPerfEventsStatements(db *sql.DB, serv string, acc telegraf noIndexUsed float64 ) - servtag, err := parseDSN(serv) - if err != nil { - servtag = "localhost" - } + servtag := getDSNTag(serv) tags := map[string]string{ "server": servtag, } @@ -1408,14 +1353,8 @@ func (m *Mysql) gatherPerfEventsStatements(db *sql.DB, serv string, acc telegraf // gatherTableSchema can be used to gather stats on each schema func (m *Mysql) gatherTableSchema(db *sql.DB, serv string, acc telegraf.Accumulator) error { - var ( - dbList []string - servtag string - ) - servtag, err := parseDSN(serv) - if err != nil { - servtag = "localhost" - } + var dbList []string + servtag := getDSNTag(serv) // if the list of databases if empty, then get all databases if len(m.TableSchemaDatabases) == 0 { @@ -1571,6 +1510,27 @@ func copyTags(in map[string]string) map[string]string { return out } +func dsnAddTimeout(dsn string) (string, error) { + conf, err := mysql.ParseDSN(dsn) + if err != nil { + return "", err + } + + if conf.Timeout == 0 { + conf.Timeout = time.Second * 5 + } + + return conf.FormatDSN(), nil +} + +func getDSNTag(dsn string) string { + conf, err := mysql.ParseDSN(dsn) + if err != nil { + return "127.0.0.1:3306" + } + return conf.Addr +} + func init() { inputs.Add("mysql", func() telegraf.Input { return &Mysql{} diff --git a/plugins/inputs/mysql/mysql_test.go b/plugins/inputs/mysql/mysql_test.go index 3ab9187b5e201..5356e7bd4eaba 100644 --- a/plugins/inputs/mysql/mysql_test.go +++ b/plugins/inputs/mysql/mysql_test.go @@ -26,7 +26,7 @@ func TestMysqlDefaultsToLocal(t *testing.T) { assert.True(t, acc.HasMeasurement("mysql")) } -func TestMysqlParseDSN(t *testing.T) { +func TestMysqlGetDSNTag(t *testing.T) { tests := []struct { input string output string @@ -78,9 +78,9 @@ func TestMysqlParseDSN(t *testing.T) { } for _, test := range tests { - output, _ := parseDSN(test.input) + output := getDSNTag(test.input) if output != test.output { - t.Errorf("Expected %s, got %s\n", test.output, output) + t.Errorf("Input: %s Expected %s, got %s\n", test.input, test.output, output) } } } @@ -92,7 +92,7 @@ func TestMysqlDNSAddTimeout(t *testing.T) { }{ { "", - "/?timeout=5s", + "tcp(127.0.0.1:3306)/?timeout=5s", }, { "tcp(192.168.1.1:3306)/", @@ -104,7 +104,19 @@ func TestMysqlDNSAddTimeout(t *testing.T) { }, { "root:passwd@tcp(192.168.1.1:3306)/?tls=false&timeout=10s", - "root:passwd@tcp(192.168.1.1:3306)/?tls=false&timeout=10s", + "root:passwd@tcp(192.168.1.1:3306)/?timeout=10s&tls=false", + }, + { + "tcp(10.150.1.123:3306)/", + "tcp(10.150.1.123:3306)/?timeout=5s", + }, + { + "root:@!~(*&$#%(&@#(@&#Password@tcp(10.150.1.123:3306)/", + "root:@!~(*&$#%(&@#(@&#Password@tcp(10.150.1.123:3306)/?timeout=5s", + }, + { + "root:Test3a#@!@tcp(10.150.1.123:3306)/", + "root:Test3a#@!@tcp(10.150.1.123:3306)/?timeout=5s", }, } diff --git a/plugins/inputs/mysql/parse_dsn.go b/plugins/inputs/mysql/parse_dsn.go deleted file mode 100644 index bbe9482685842..0000000000000 --- a/plugins/inputs/mysql/parse_dsn.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -import ( - "errors" - "strings" -) - -// parseDSN parses the DSN string to a config -func parseDSN(dsn string) (string, error) { - //var user, passwd string - var addr, net string - - // [user[:password]@][net[(addr)]]/dbname[?param1=value1¶mN=valueN] - // Find the last '/' (since the password or the net addr might contain a '/') - for i := len(dsn) - 1; i >= 0; i-- { - if dsn[i] == '/' { - var j, k int - - // left part is empty if i <= 0 - if i > 0 { - // [username[:password]@][protocol[(address)]] - // Find the last '@' in dsn[:i] - for j = i; j >= 0; j-- { - if dsn[j] == '@' { - // username[:password] - // Find the first ':' in dsn[:j] - for k = 0; k < j; k++ { - if dsn[k] == ':' { - //passwd = dsn[k+1 : j] - break - } - } - //user = dsn[:k] - - break - } - } - - // [protocol[(address)]] - // Find the first '(' in dsn[j+1:i] - for k = j + 1; k < i; k++ { - if dsn[k] == '(' { - // dsn[i-1] must be == ')' if an address is specified - if dsn[i-1] != ')' { - if strings.ContainsRune(dsn[k+1:i], ')') { - return "", errors.New("Invalid DSN unescaped") - } - return "", errors.New("Invalid DSN Addr") - } - addr = dsn[k+1 : i-1] - break - } - } - net = dsn[j+1 : k] - } - - break - } - } - - // Set default network if empty - if net == "" { - net = "tcp" - } - - // Set default address if empty - if addr == "" { - switch net { - case "tcp": - addr = "127.0.0.1:3306" - case "unix": - addr = "/tmp/mysql.sock" - default: - return "", errors.New("Default addr for network '" + net + "' unknown") - } - } - - return addr, nil -} diff --git a/plugins/inputs/nats_consumer/nats_consumer.go b/plugins/inputs/nats_consumer/nats_consumer.go index 232d5740f61c5..cbb85e0162e9b 100644 --- a/plugins/inputs/nats_consumer/nats_consumer.go +++ b/plugins/inputs/nats_consumer/nats_consumer.go @@ -28,12 +28,17 @@ type natsConsumer struct { Servers []string Secure bool + // Client pending limits: + PendingMessageLimit int + PendingBytesLimit int + // Legacy metric buffer support MetricBuffer int parser parsers.Parser sync.Mutex + wg sync.WaitGroup Conn *nats.Conn Subs []*nats.Subscription @@ -47,13 +52,18 @@ type natsConsumer struct { var sampleConfig = ` ## urls of NATS servers - servers = ["nats://localhost:4222"] + # servers = ["nats://localhost:4222"] ## Use Transport Layer Security - secure = false + # secure = false ## subject(s) to consume - subjects = ["telegraf"] + # subjects = ["telegraf"] ## name a queue group - queue_group = "telegraf_consumers" + # queue_group = "telegraf_consumers" + + ## Sets the limits for pending msgs and bytes for each subscription + ## These shouldn't need to be adjusted except in very high throughput scenarios + # pending_message_limit = 65536 + # pending_bytes_limit = 67108864 ## Data format to consume. ## Each data format has it's own unique set of configuration options, read @@ -91,8 +101,15 @@ func (n *natsConsumer) Start(acc telegraf.Accumulator) error { var connectErr error + // set default NATS connection options opts := nats.DefaultOptions + + // override max reconnection tries + opts.MaxReconnect = -1 + + // override servers if any were specified opts.Servers = n.Servers + opts.Secure = n.Secure if n.Conn == nil || n.Conn.IsClosed() { @@ -105,12 +122,22 @@ func (n *natsConsumer) Start(acc telegraf.Accumulator) error { n.errs = make(chan error) n.Conn.SetErrorHandler(n.natsErrHandler) - n.in = make(chan *nats.Msg) + n.in = make(chan *nats.Msg, 1000) for _, subj := range n.Subjects { - sub, err := n.Conn.ChanQueueSubscribe(subj, n.QueueGroup, n.in) + sub, err := n.Conn.QueueSubscribe(subj, n.QueueGroup, func(m *nats.Msg) { + n.in <- m + }) if err != nil { return err } + // ensure that the subscription has been processed by the server + if err = n.Conn.Flush(); err != nil { + return err + } + // set the subscription pending limits + if err = sub.SetPendingLimits(n.PendingMessageLimit, n.PendingBytesLimit); err != nil { + return err + } n.Subs = append(n.Subs, sub) } } @@ -118,8 +145,9 @@ func (n *natsConsumer) Start(acc telegraf.Accumulator) error { n.done = make(chan struct{}) // Start the message reader + n.wg.Add(1) go n.receiver() - log.Printf("Started the NATS consumer service, nats: %v, subjects: %v, queue: %v\n", + log.Printf("I! Started the NATS consumer service, nats: %v, subjects: %v, queue: %v\n", n.Conn.ConnectedUrl(), n.Subjects, n.QueueGroup) return nil @@ -128,36 +156,30 @@ func (n *natsConsumer) Start(acc telegraf.Accumulator) error { // receiver() reads all incoming messages from NATS, and parses them into // telegraf metrics. func (n *natsConsumer) receiver() { - defer n.clean() + defer n.wg.Done() for { select { case <-n.done: return case err := <-n.errs: - log.Printf("error reading from %s\n", err.Error()) + log.Printf("E! error reading from %s\n", err.Error()) case msg := <-n.in: metrics, err := n.parser.Parse(msg.Data) if err != nil { - log.Printf("subject: %s, error: %s", msg.Subject, err.Error()) + log.Printf("E! subject: %s, error: %s", msg.Subject, err.Error()) } for _, metric := range metrics { n.acc.AddFields(metric.Name(), metric.Fields(), metric.Tags(), metric.Time()) } - } } } func (n *natsConsumer) clean() { - n.Lock() - defer n.Unlock() - close(n.in) - close(n.errs) - for _, sub := range n.Subs { if err := sub.Unsubscribe(); err != nil { - log.Printf("Error unsubscribing from subject %s in queue %s: %s\n", + log.Printf("E! Error unsubscribing from subject %s in queue %s: %s\n", sub.Subject, sub.Queue, err.Error()) } } @@ -170,6 +192,8 @@ func (n *natsConsumer) clean() { func (n *natsConsumer) Stop() { n.Lock() close(n.done) + n.wg.Wait() + n.clean() n.Unlock() } @@ -179,6 +203,13 @@ func (n *natsConsumer) Gather(acc telegraf.Accumulator) error { func init() { inputs.Add("nats_consumer", func() telegraf.Input { - return &natsConsumer{} + return &natsConsumer{ + Servers: []string{"nats://localhost:4222"}, + Secure: false, + Subjects: []string{"telegraf"}, + QueueGroup: "telegraf_consumers", + PendingBytesLimit: nats.DefaultSubPendingBytesLimit, + PendingMessageLimit: nats.DefaultSubPendingMsgsLimit, + } }) } diff --git a/plugins/inputs/nats_consumer/nats_consumer_test.go b/plugins/inputs/nats_consumer/nats_consumer_test.go index 75fde66a6ca1d..206714b1a5549 100644 --- a/plugins/inputs/nats_consumer/nats_consumer_test.go +++ b/plugins/inputs/nats_consumer/nats_consumer_test.go @@ -39,6 +39,7 @@ func TestRunParser(t *testing.T) { defer close(n.done) n.parser, _ = parsers.NewInfluxParser() + n.wg.Add(1) go n.receiver() in <- natsMsg(testMsg) time.Sleep(time.Millisecond * 25) @@ -56,6 +57,7 @@ func TestRunParserInvalidMsg(t *testing.T) { defer close(n.done) n.parser, _ = parsers.NewInfluxParser() + n.wg.Add(1) go n.receiver() in <- natsMsg(invalidMsg) time.Sleep(time.Millisecond * 25) @@ -73,6 +75,7 @@ func TestRunParserAndGather(t *testing.T) { defer close(n.done) n.parser, _ = parsers.NewInfluxParser() + n.wg.Add(1) go n.receiver() in <- natsMsg(testMsg) time.Sleep(time.Millisecond * 25) @@ -91,6 +94,7 @@ func TestRunParserAndGatherGraphite(t *testing.T) { defer close(n.done) n.parser, _ = parsers.NewGraphiteParser("_", []string{}, nil) + n.wg.Add(1) go n.receiver() in <- natsMsg(testMsgGraphite) time.Sleep(time.Millisecond * 25) @@ -109,6 +113,7 @@ func TestRunParserAndGatherJSON(t *testing.T) { defer close(n.done) n.parser, _ = parsers.NewJSONParser("nats_json_test", []string{}, nil) + n.wg.Add(1) go n.receiver() in <- natsMsg(testMsgJSON) time.Sleep(time.Millisecond * 25) diff --git a/plugins/inputs/nsq_consumer/nsq_consumer.go b/plugins/inputs/nsq_consumer/nsq_consumer.go index b227b7e5029d7..d4f4e9679d884 100644 --- a/plugins/inputs/nsq_consumer/nsq_consumer.go +++ b/plugins/inputs/nsq_consumer/nsq_consumer.go @@ -62,7 +62,7 @@ func (n *NSQConsumer) Start(acc telegraf.Accumulator) error { n.consumer.AddConcurrentHandlers(nsq.HandlerFunc(func(message *nsq.Message) error { metrics, err := n.parser.Parse(message.Body) if err != nil { - log.Printf("NSQConsumer Parse Error\nmessage:%s\nerror:%s", string(message.Body), err.Error()) + log.Printf("E! NSQConsumer Parse Error\nmessage:%s\nerror:%s", string(message.Body), err.Error()) return nil } for _, metric := range metrics { diff --git a/plugins/inputs/ntpq/ntpq.go b/plugins/inputs/ntpq/ntpq.go index 0bcaa04e5ac7d..674cd7216d629 100644 --- a/plugins/inputs/ntpq/ntpq.go +++ b/plugins/inputs/ntpq/ntpq.go @@ -132,7 +132,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error { case strings.HasSuffix(when, "h"): m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "h")) if err != nil { - log.Printf("ERROR ntpq: parsing int: %s", fields[index]) + log.Printf("E! Error ntpq: parsing int: %s", fields[index]) continue } // seconds in an hour @@ -141,7 +141,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error { case strings.HasSuffix(when, "d"): m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "d")) if err != nil { - log.Printf("ERROR ntpq: parsing int: %s", fields[index]) + log.Printf("E! Error ntpq: parsing int: %s", fields[index]) continue } // seconds in a day @@ -150,7 +150,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error { case strings.HasSuffix(when, "m"): m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "m")) if err != nil { - log.Printf("ERROR ntpq: parsing int: %s", fields[index]) + log.Printf("E! Error ntpq: parsing int: %s", fields[index]) continue } // seconds in a day @@ -161,7 +161,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error { m, err := strconv.Atoi(fields[index]) if err != nil { - log.Printf("ERROR ntpq: parsing int: %s", fields[index]) + log.Printf("E! Error ntpq: parsing int: %s", fields[index]) continue } mFields[key] = int64(m) @@ -178,7 +178,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error { m, err := strconv.ParseFloat(fields[index], 64) if err != nil { - log.Printf("ERROR ntpq: parsing float: %s", fields[index]) + log.Printf("E! Error ntpq: parsing float: %s", fields[index]) continue } mFields[key] = m diff --git a/plugins/inputs/phpfpm/phpfpm.go b/plugins/inputs/phpfpm/phpfpm.go index 3b23ef92ce12c..d7a14d0eee81a 100644 --- a/plugins/inputs/phpfpm/phpfpm.go +++ b/plugins/inputs/phpfpm/phpfpm.go @@ -122,6 +122,9 @@ func (g *phpfpm) gatherServer(addr string, acc telegraf.Accumulator) error { fcgiIp := socketAddr[0] fcgiPort, _ := strconv.Atoi(socketAddr[1]) fcgi, err = newFcgiClient(fcgiIp, fcgiPort) + if err != nil { + return err + } if len(u.Path) > 1 { statusPath = strings.Trim(u.Path, "/") } else { diff --git a/plugins/inputs/ping/ping.go b/plugins/inputs/ping/ping.go index b6baa7d99df3e..089248efe237f 100644 --- a/plugins/inputs/ping/ping.go +++ b/plugins/inputs/ping/ping.go @@ -52,13 +52,13 @@ const sampleConfig = ` ## urls to ping urls = ["www.google.com"] # required ## number of pings to send per collection (ping -c ) - count = 1 # required + # count = 1 ## interval, in s, at which to ping. 0 == default (ping -i ) - ping_interval = 0.0 + # ping_interval = 1.0 ## per-ping timeout, in s. 0 == no timeout (ping -W ) - timeout = 1.0 + # timeout = 1.0 ## interface to send ping from (ping -I ) - interface = "" + # interface = "" ` func (_ *Ping) SampleConfig() string { @@ -200,6 +200,11 @@ func processPingOutput(out string) (int, int, float64, error) { func init() { inputs.Add("ping", func() telegraf.Input { - return &Ping{pingHost: hostPinger} + return &Ping{ + pingHost: hostPinger, + PingInterval: 1.0, + Count: 1, + Timeout: 1.0, + } }) } diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible.go b/plugins/inputs/postgresql_extensible/postgresql_extensible.go index ec281fca26eef..beb010fce076c 100644 --- a/plugins/inputs/postgresql_extensible/postgresql_extensible.go +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible.go @@ -29,6 +29,7 @@ type Postgresql struct { Tagvalue string Measurement string } + Debug bool } type query []struct { @@ -269,9 +270,7 @@ func (p *Postgresql) accRow(meas_name string, row scanner, acc telegraf.Accumula fields := make(map[string]interface{}) COLUMN: for col, val := range columnMap { - if acc.Debug() { - log.Printf("postgresql_extensible: column: %s = %T: %s\n", col, *val, *val) - } + log.Printf("D! postgresql_extensible: column: %s = %T: %s\n", col, *val, *val) _, ignore := ignoredColumns[col] if ignore || *val == nil { continue diff --git a/plugins/inputs/powerdns/powerdns.go b/plugins/inputs/powerdns/powerdns.go index 0824ff672da84..68b1696e0611f 100644 --- a/plugins/inputs/powerdns/powerdns.go +++ b/plugins/inputs/powerdns/powerdns.go @@ -4,6 +4,7 @@ import ( "bufio" "fmt" "io" + "log" "net" "strconv" "strings" @@ -86,10 +87,7 @@ func (p *Powerdns) gatherServer(address string, acc telegraf.Accumulator) error metrics := string(buf) // Process data - fields, err := parseResponse(metrics) - if err != nil { - return err - } + fields := parseResponse(metrics) // Add server socket as a tag tags := map[string]string{"server": address} @@ -99,22 +97,27 @@ func (p *Powerdns) gatherServer(address string, acc telegraf.Accumulator) error return nil } -func parseResponse(metrics string) (map[string]interface{}, error) { +func parseResponse(metrics string) map[string]interface{} { values := make(map[string]interface{}) s := strings.Split(metrics, ",") for _, metric := range s[:len(s)-1] { m := strings.Split(metric, "=") + if len(m) < 2 { + continue + } i, err := strconv.ParseInt(m[1], 10, 64) if err != nil { - return values, err + log.Printf("E! powerdns: Error parsing integer for metric [%s]: %s", + metric, err) + continue } values[m[0]] = i } - return values, nil + return values } func init() { diff --git a/plugins/inputs/powerdns/powerdns_test.go b/plugins/inputs/powerdns/powerdns_test.go index b0d883d0b809d..78845c23d7bac 100644 --- a/plugins/inputs/powerdns/powerdns_test.go +++ b/plugins/inputs/powerdns/powerdns_test.go @@ -25,6 +25,30 @@ var metrics = "corrupt-packets=0,deferred-cache-inserts=0,deferred-cache-lookup= "key-cache-size=0,latency=26,meta-cache-size=0,qsize-q=0," + "signature-cache-size=0,sys-msec=2889,uptime=86317,user-msec=2167," +// first metric has no "=" +var corruptMetrics = "corrupt-packets--0,deferred-cache-inserts=0,deferred-cache-lookup=0," + + "dnsupdate-answers=0,dnsupdate-changes=0,dnsupdate-queries=0," + + "dnsupdate-refused=0,packetcache-hit=0,packetcache-miss=1,packetcache-size=0," + + "query-cache-hit=0,query-cache-miss=6,rd-queries=1,recursing-answers=0," + + "recursing-questions=0,recursion-unanswered=0,security-status=3," + + "servfail-packets=0,signatures=0,tcp-answers=0,tcp-queries=0," + + "timedout-packets=0,udp-answers=1,udp-answers-bytes=50,udp-do-queries=0," + + "udp-queries=0,udp4-answers=1,udp4-queries=1,udp6-answers=0,udp6-queries=0," + + "key-cache-size=0,latency=26,meta-cache-size=0,qsize-q=0," + + "signature-cache-size=0,sys-msec=2889,uptime=86317,user-msec=2167," + +// integer overflow +var intOverflowMetrics = "corrupt-packets=18446744073709550195,deferred-cache-inserts=0,deferred-cache-lookup=0," + + "dnsupdate-answers=0,dnsupdate-changes=0,dnsupdate-queries=0," + + "dnsupdate-refused=0,packetcache-hit=0,packetcache-miss=1,packetcache-size=0," + + "query-cache-hit=0,query-cache-miss=6,rd-queries=1,recursing-answers=0," + + "recursing-questions=0,recursion-unanswered=0,security-status=3," + + "servfail-packets=0,signatures=0,tcp-answers=0,tcp-queries=0," + + "timedout-packets=0,udp-answers=1,udp-answers-bytes=50,udp-do-queries=0," + + "udp-queries=0,udp4-answers=1,udp4-queries=1,udp6-answers=0,udp6-queries=0," + + "key-cache-size=0,latency=26,meta-cache-size=0,qsize-q=0," + + "signature-cache-size=0,sys-msec=2889,uptime=86317,user-msec=2167," + func (s statServer) serverSocket(l net.Listener) { for { @@ -86,8 +110,7 @@ func TestMemcachedGeneratesMetrics(t *testing.T) { } func TestPowerdnsParseMetrics(t *testing.T) { - values, err := parseResponse(metrics) - require.NoError(t, err, "Error parsing memcached response") + values := parseResponse(metrics) tests := []struct { key string @@ -145,3 +168,121 @@ func TestPowerdnsParseMetrics(t *testing.T) { } } } + +func TestPowerdnsParseCorruptMetrics(t *testing.T) { + values := parseResponse(corruptMetrics) + + tests := []struct { + key string + value int64 + }{ + {"deferred-cache-inserts", 0}, + {"deferred-cache-lookup", 0}, + {"dnsupdate-answers", 0}, + {"dnsupdate-changes", 0}, + {"dnsupdate-queries", 0}, + {"dnsupdate-refused", 0}, + {"packetcache-hit", 0}, + {"packetcache-miss", 1}, + {"packetcache-size", 0}, + {"query-cache-hit", 0}, + {"query-cache-miss", 6}, + {"rd-queries", 1}, + {"recursing-answers", 0}, + {"recursing-questions", 0}, + {"recursion-unanswered", 0}, + {"security-status", 3}, + {"servfail-packets", 0}, + {"signatures", 0}, + {"tcp-answers", 0}, + {"tcp-queries", 0}, + {"timedout-packets", 0}, + {"udp-answers", 1}, + {"udp-answers-bytes", 50}, + {"udp-do-queries", 0}, + {"udp-queries", 0}, + {"udp4-answers", 1}, + {"udp4-queries", 1}, + {"udp6-answers", 0}, + {"udp6-queries", 0}, + {"key-cache-size", 0}, + {"latency", 26}, + {"meta-cache-size", 0}, + {"qsize-q", 0}, + {"signature-cache-size", 0}, + {"sys-msec", 2889}, + {"uptime", 86317}, + {"user-msec", 2167}, + } + + for _, test := range tests { + value, ok := values[test.key] + if !ok { + t.Errorf("Did not find key for metric %s in values", test.key) + continue + } + if value != test.value { + t.Errorf("Metric: %s, Expected: %d, actual: %d", + test.key, test.value, value) + } + } +} + +func TestPowerdnsParseIntOverflowMetrics(t *testing.T) { + values := parseResponse(intOverflowMetrics) + + tests := []struct { + key string + value int64 + }{ + {"deferred-cache-inserts", 0}, + {"deferred-cache-lookup", 0}, + {"dnsupdate-answers", 0}, + {"dnsupdate-changes", 0}, + {"dnsupdate-queries", 0}, + {"dnsupdate-refused", 0}, + {"packetcache-hit", 0}, + {"packetcache-miss", 1}, + {"packetcache-size", 0}, + {"query-cache-hit", 0}, + {"query-cache-miss", 6}, + {"rd-queries", 1}, + {"recursing-answers", 0}, + {"recursing-questions", 0}, + {"recursion-unanswered", 0}, + {"security-status", 3}, + {"servfail-packets", 0}, + {"signatures", 0}, + {"tcp-answers", 0}, + {"tcp-queries", 0}, + {"timedout-packets", 0}, + {"udp-answers", 1}, + {"udp-answers-bytes", 50}, + {"udp-do-queries", 0}, + {"udp-queries", 0}, + {"udp4-answers", 1}, + {"udp4-queries", 1}, + {"udp6-answers", 0}, + {"udp6-queries", 0}, + {"key-cache-size", 0}, + {"latency", 26}, + {"meta-cache-size", 0}, + {"qsize-q", 0}, + {"signature-cache-size", 0}, + {"sys-msec", 2889}, + {"uptime", 86317}, + {"user-msec", 2167}, + } + + for _, test := range tests { + value, ok := values[test.key] + if !ok { + t.Errorf("Did not find key for metric %s in values", test.key) + continue + } + if value != test.value { + t.Errorf("Metric: %s, Expected: %d, actual: %d", + test.key, test.value, value) + } + } +} diff --git a/plugins/inputs/procstat/README.md b/plugins/inputs/procstat/README.md index ef96500a36f67..d31120743244f 100644 --- a/plugins/inputs/procstat/README.md +++ b/plugins/inputs/procstat/README.md @@ -10,7 +10,7 @@ The plugin will tag processes by their PID and their process name. Processes can be specified either by pid file, by executable name, by command line pattern matching, or by username (in this order or priority. Procstat plugin will use `pgrep` when executable name is provided to obtain the pid. -Proctstas plugin will transmit IO, memory, cpu, file descriptor related +Procstat plugin will transmit IO, memory, cpu, file descriptor related measurements for every process specified. A prefix can be set to isolate individual process specific measurements. diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index 358dc4c0f3db6..e29b5031cbc62 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -66,7 +66,7 @@ func (_ *Procstat) Description() string { func (p *Procstat) Gather(acc telegraf.Accumulator) error { err := p.createProcesses() if err != nil { - log.Printf("Error: procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] user: [%s] %s", + log.Printf("E! Error: procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] user: [%s] %s", p.Exe, p.PidFile, p.Pattern, p.User, err.Error()) } else { for pid, proc := range p.pidmap { diff --git a/plugins/inputs/rabbitmq/rabbitmq.go b/plugins/inputs/rabbitmq/rabbitmq.go index 8a879d179dab9..237f71c66e87a 100644 --- a/plugins/inputs/rabbitmq/rabbitmq.go +++ b/plugins/inputs/rabbitmq/rabbitmq.go @@ -107,7 +107,8 @@ type Queue struct { Node string Vhost string Durable bool - AutoDelete bool `json:"auto_delete"` + AutoDelete bool `json:"auto_delete"` + IdleSince string `json:"idle_since"` } // Node ... @@ -328,6 +329,7 @@ func gatherQueues(r *RabbitMQ, acc telegraf.Accumulator, errChan chan error) { // common information "consumers": queue.Consumers, "consumer_utilisation": queue.ConsumerUtilisation, + "idle_since": queue.IdleSince, "memory": queue.Memory, // messages information "message_bytes": queue.MessageBytes, diff --git a/plugins/inputs/snmp/CONFIG-EXAMPLES.md b/plugins/inputs/snmp/CONFIG-EXAMPLES.md new file mode 100644 index 0000000000000..a0a52eeb327ef --- /dev/null +++ b/plugins/inputs/snmp/CONFIG-EXAMPLES.md @@ -0,0 +1,65 @@ +Here are a few configuration examples for different use cases. + +### Switch/router interface metrics + +This setup will collect data on all interfaces from three different tables, `IF-MIB::ifTable`, `IF-MIB::ifXTable` and `EtherLike-MIB::dot3StatsTable`. It will also add the name from `IF-MIB::ifDescr` and use that as a tag. Depending on your needs and preferences you can easily use `IF-MIB::ifName` or `IF-MIB::ifAlias` instead or in addition. The values of these are typically: + + IF-MIB::ifName = Gi0/0/0 + IF-MIB::ifDescr = GigabitEthernet0/0/0 + IF-MIB::ifAlias = ### LAN ### + +This configuration also collects the hostname from the device (`RFC1213-MIB::sysName.0`) and adds as a tag. So each metric will both have the configured host/IP as `agent_host` as well as the device self-reported hostname as `hostname` and the name of the host that has collected these metrics as `host`. + +Here is the configuration that you add to your `telegraf.conf`: + +``` +[[inputs.snmp]] + agents = [ "host.example.com" ] + version = 2 + community = "public" + + [[inputs.snmp.field]] + name = "hostname" + oid = "RFC1213-MIB::sysName.0" + is_tag = true + + [[inputs.snmp.field]] + name = "uptime" + oid = "DISMAN-EXPRESSION-MIB::sysUpTimeInstance" + + # IF-MIB::ifTable contains counters on input and output traffic as well as errors and discards. + [[inputs.snmp.table]] + name = "interface" + inherit_tags = [ "hostname" ] + oid = "IF-MIB::ifTable" + + # Interface tag - used to identify interface in metrics database + [[inputs.snmp.table.field]] + name = "ifDescr" + oid = "IF-MIB::ifDescr" + is_tag = true + + # IF-MIB::ifXTable contains newer High Capacity (HC) counters that do not overflow as fast for a few of the ifTable counters + [[inputs.snmp.table]] + name = "interface" + inherit_tags = [ "hostname" ] + oid = "IF-MIB::ifXTable" + + # Interface tag - used to identify interface in metrics database + [[inputs.snmp.table.field]] + name = "ifDescr" + oid = "IF-MIB::ifDescr" + is_tag = true + + # EtherLike-MIB::dot3StatsTable contains detailed ethernet-level information about what kind of errors have been logged on an interface (such as FCS error, frame too long, etc) + [[inputs.snmp.table]] + name = "interface" + inherit_tags = [ "hostname" ] + oid = "EtherLike-MIB::dot3StatsTable" + + # Interface tag - used to identify interface in metrics database + [[inputs.snmp.table.field]] + name = "ifDescr" + oid = "IF-MIB::ifDescr" + is_tag = true +``` diff --git a/plugins/inputs/snmp/DEBUGGING.md b/plugins/inputs/snmp/DEBUGGING.md new file mode 100644 index 0000000000000..f357c58b51c52 --- /dev/null +++ b/plugins/inputs/snmp/DEBUGGING.md @@ -0,0 +1,53 @@ +# Debugging & Testing SNMP Issues + +### Install net-snmp on your system: + +Mac: + +``` +brew install net-snmp +``` + +### Run an SNMP simulator docker image to get a full MIB on port 161: + +``` +docker run -d -p 161:161/udp xeemetric/snmp-simulator +``` + +### snmpget: + +snmpget corresponds to the inputs.snmp.field configuration. + +```bash +$ # get an snmp field with fully-qualified MIB name. +$ snmpget -v2c -c public localhost:161 system.sysUpTime.0 +DISMAN-EVENT-MIB::sysUpTimeInstance = Timeticks: (1643) 0:00:16.43 + +$ # get an snmp field, outputting the numeric OID. +$ snmpget -On -v2c -c public localhost:161 system.sysUpTime.0 +.1.3.6.1.2.1.1.3.0 = Timeticks: (1638) 0:00:16.38 +``` + +### snmptranslate: + +snmptranslate can be used to translate an OID to a MIB name: + +```bash +$ snmptranslate .1.3.6.1.2.1.1.3.0 +DISMAN-EVENT-MIB::sysUpTimeInstance +``` + +And to convert a partial MIB name to a fully qualified one: + +```bash +$ snmptranslate -IR sysUpTime.0 +DISMAN-EVENT-MIB::sysUpTimeInstance +``` + +And to convert a MIB name to an OID: + +```bash +$ snmptranslate -On -IR system.sysUpTime.0 +.1.3.6.1.2.1.1.3.0 +``` + diff --git a/plugins/inputs/snmp/README.md b/plugins/inputs/snmp/README.md index b5a694abdfa4c..473f2a52bd0c0 100644 --- a/plugins/inputs/snmp/README.md +++ b/plugins/inputs/snmp/README.md @@ -4,6 +4,8 @@ The SNMP input plugin gathers metrics from SNMP agents. ## Configuration: +See additional SNMP plugin configuration examples [here](./CONFIG-EXAMPLES.md). + ### Example: SNMP data: @@ -67,7 +69,7 @@ Resulting output: #### Configuration via MIB: -This example uses the SNMP data above, but is configured via the MIB. +This example uses the SNMP data above, but is configured via the MIB. The example MIB file can be found in the `testdata` directory. See the [MIB lookups](#mib-lookups) section for more information. Telegraf config: @@ -95,70 +97,75 @@ Resulting output: ### Config parameters -* `agents`: Default: `[]` +* `agents`: Default: `[]` List of SNMP agents to connect to in the form of `IP[:PORT]`. If `:PORT` is unspecified, it defaults to `161`. -* `version`: Default: `2` +* `version`: Default: `2` SNMP protocol version to use. -* `community`: Default: `"public"` +* `community`: Default: `"public"` SNMP community to use. -* `max_repetitions`: Default: `50` +* `max_repetitions`: Default: `50` Maximum number of iterations for repeating variables. -* `sec_name`: +* `sec_name`: Security name for authenticated SNMPv3 requests. -* `auth_protocol`: Values: `"MD5"`,`"SHA"`,`""`. Default: `""` +* `auth_protocol`: Values: `"MD5"`,`"SHA"`,`""`. Default: `""` Authentication protocol for authenticated SNMPv3 requests. -* `auth_password`: +* `auth_password`: Authentication password for authenticated SNMPv3 requests. -* `sec_level`: Values: `"noAuthNoPriv"`,`"authNoPriv"`,`"authPriv"`. Default: `"noAuthNoPriv"` +* `sec_level`: Values: `"noAuthNoPriv"`,`"authNoPriv"`,`"authPriv"`. Default: `"noAuthNoPriv"` Security level used for SNMPv3 messages. -* `context_name`: +* `context_name`: Context name used for SNMPv3 requests. -* `priv_protocol`: Values: `"DES"`,`"AES"`,`""`. Default: `""` +* `priv_protocol`: Values: `"DES"`,`"AES"`,`""`. Default: `""` Privacy protocol used for encrypted SNMPv3 messages. -* `priv_password`: +* `priv_password`: Privacy password used for encrypted SNMPv3 messages. -* `name`: +* `name`: Output measurement name. #### Field parameters: -* `oid`: +* `oid`: OID to get. May be a numeric or textual OID. -* `name`: +* `oid_index_suffix`: +The OID sub-identifier to strip off so that the index can be matched against other fields in the table. + +* `name`: Output field/tag name. If not specified, it defaults to the value of `oid`. If `oid` is numeric, an attempt to translate the numeric OID into a texual OID will be made. -* `is_tag`: +* `is_tag`: Output this field as a tag. -* `conversion`: Values: `"float(X)"`,`"float"`,`"int"`,`""`. Default: `""` +* `conversion`: Values: `"float(X)"`,`"float"`,`"int"`,`""`. Default: `""` Converts the value according to the given specification. - `float(X)`: Converts the input value into a float and divides by the Xth power of 10. Efficively just moves the decimal left X places. For example a value of `123` with `float(2)` will result in `1.23`. - `float`: Converts the value into a float with no adjustment. Same as `float(0)`. - `int`: Convertes the value into an integer. + - `hwaddr`: Converts the value to a MAC address. + - `ipaddr`: Converts the value to an IP address. #### Table parameters: -* `oid`: +* `oid`: Automatically populates the table's fields using data from the MIB. -* `name`: +* `name`: Output measurement name. If not specified, it defaults to the value of `oid`. If `oid` is numeric, an attempt to translate the numeric OID into a texual OID will be made. -* `inherit_tags`: +* `inherit_tags`: Which tags to inherit from the top-level config and to use in the output of this table's measurement. ### MIB lookups diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index 3cd8968b42b29..cc750e7693823 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -20,25 +20,29 @@ import ( const description = `Retrieves SNMP values from remote agents` const sampleConfig = ` agents = [ "127.0.0.1:161" ] + ## Timeout for each SNMP query. timeout = "5s" + ## Number of retries to attempt within timeout. + retries = 3 + ## SNMP version, values can be 1, 2, or 3 version = 2 - # SNMPv1 & SNMPv2 parameters + ## SNMP community string. community = "public" - # SNMPv2 & SNMPv3 parameters - max_repetitions = 50 + ## The GETBULK max-repetitions parameter + max_repetitions = 10 - # SNMPv3 parameters + ## SNMPv3 auth parameters #sec_name = "myuser" - #auth_protocol = "md5" # Values: "MD5", "SHA", "" - #auth_password = "password123" - #sec_level = "authNoPriv" # Values: "noAuthNoPriv", "authNoPriv", "authPriv" + #auth_protocol = "md5" # Values: "MD5", "SHA", "" + #auth_password = "pass" + #sec_level = "authNoPriv" # Values: "noAuthNoPriv", "authNoPriv", "authPriv" #context_name = "" - #priv_protocol = "" # Values: "DES", "AES", "" + #priv_protocol = "" # Values: "DES", "AES", "" #priv_password = "" - # measurement name + ## measurement name name = "system" [[inputs.snmp.field]] name = "hostname" @@ -53,7 +57,7 @@ const sampleConfig = ` oid = "HOST-RESOURCES-MIB::hrMemorySize" [[inputs.snmp.table]] - # measurement name + ## measurement name name = "remote_servers" inherit_tags = [ "hostname" ] [[inputs.snmp.table.field]] @@ -68,7 +72,7 @@ const sampleConfig = ` oid = ".1.0.0.0.1.2" [[inputs.snmp.table]] - # auto populate table's fields using the MIB + ## auto populate table's fields using the MIB oid = "HOST-RESOURCES-MIB::hrNetworkTable" ` @@ -105,7 +109,7 @@ type Snmp struct { Community string // Parameters for Version 2 & 3 - MaxRepetitions uint + MaxRepetitions uint8 // Parameters for Version 3 ContextName string @@ -174,33 +178,48 @@ type Table struct { initialized bool } -// init() populates Fields if a table OID is provided. +// init() builds & initializes the nested fields. func (t *Table) init() error { if t.initialized { return nil } + + if err := t.initBuild(); err != nil { + return err + } + + // initialize all the nested fields + for i := range t.Fields { + if err := t.Fields[i].init(); err != nil { + return err + } + } + + t.initialized = true + return nil +} + +// init() populates Fields if a table OID is provided. +func (t *Table) initBuild() error { if t.Oid == "" { - t.initialized = true return nil } - mibPrefix := "" - if err := snmpTranslate(&mibPrefix, &t.Oid, &t.Name); err != nil { - return err + mibName, _, oidText, _, err := snmpTranslate(t.Oid) + if err != nil { + return Errorf(err, "translating %s", t.Oid) } + if t.Name == "" { + t.Name = oidText + } + mibPrefix := mibName + "::" + oidFullName := mibPrefix + oidText // first attempt to get the table's tags tagOids := map[string]struct{}{} // We have to guess that the "entry" oid is `t.Oid+".1"`. snmptable and snmptranslate don't seem to have a way to provide the info. - if out, err := execCmd("snmptranslate", "-m", "all", "-Td", t.Oid+".1"); err == nil { + if out, err := execCmd("snmptranslate", "-Td", oidFullName+".1"); err == nil { lines := bytes.Split(out, []byte{'\n'}) - // get the MIB name if we didn't get it above - if mibPrefix == "" { - if i := bytes.Index(lines[0], []byte("::")); i != -1 { - mibPrefix = string(lines[0][:i+2]) - } - } - for _, line := range lines { if !bytes.HasPrefix(line, []byte(" INDEX")) { continue @@ -223,7 +242,7 @@ func (t *Table) init() error { } // this won't actually try to run a query. The `-Ch` will just cause it to dump headers. - out, err := execCmd("snmptable", "-m", "all", "-Ch", "-Cl", "-c", "public", "127.0.0.1", t.Oid) + out, err := execCmd("snmptable", "-Ch", "-Cl", "-c", "public", "127.0.0.1", oidFullName) if err != nil { return Errorf(err, "getting table columns for %s", t.Oid) } @@ -240,14 +259,6 @@ func (t *Table) init() error { t.Fields = append(t.Fields, Field{Name: col, Oid: mibPrefix + col, IsTag: isTag}) } - // initialize all the nested fields - for i := range t.Fields { - if err := t.Fields[i].init(); err != nil { - return err - } - } - - t.initialized = true return nil } @@ -260,12 +271,16 @@ type Field struct { // off the OID prefix, and use the remainder as the index. For multiple fields // to show up in the same row, they must share the same index. Oid string + // OidIndexSuffix is the trailing sub-identifier on a table record OID that will be stripped off to get the record's index. + OidIndexSuffix string // IsTag controls whether this OID is output as a tag or a value. IsTag bool // Conversion controls any type conversion that is done on the value. // "float"/"float(0)" will convert the value into a float. // "float(X)" will convert the value into a float, and then move the decimal before Xth right-most digit. // "int" will conver the value into an integer. + // "hwaddr" will convert a 6-byte string to a MAC address. + // "ipaddr" will convert the value to an IPv4 or IPv6 address. Conversion string initialized bool @@ -277,8 +292,16 @@ func (f *Field) init() error { return nil } - if err := snmpTranslate(nil, &f.Oid, &f.Name); err != nil { - return err + _, oidNum, oidText, conversion, err := snmpTranslate(f.Oid) + if err != nil { + return Errorf(err, "translating %s", f.Oid) + } + f.Oid = oidNum + if f.Name == "" { + f.Name = oidText + } + if f.Conversion == "" { + f.Conversion = conversion } //TODO use textual convention conversion from the MIB @@ -330,8 +353,8 @@ func Errorf(err error, msg string, format ...interface{}) error { func init() { inputs.Add("snmp", func() telegraf.Input { return &Snmp{ - Retries: 5, - MaxRepetitions: 50, + Retries: 3, + MaxRepetitions: 10, Timeout: internal.Duration{Duration: 5 * time.Second}, Version: 2, Community: "public", @@ -446,16 +469,38 @@ func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) { // index, and being added on the same row. if pkt, err := gs.Get([]string{oid}); err != nil { return nil, Errorf(err, "performing get") - } else if pkt != nil && len(pkt.Variables) > 0 && pkt.Variables[0].Type != gosnmp.NoSuchObject { + } else if pkt != nil && len(pkt.Variables) > 0 && pkt.Variables[0].Type != gosnmp.NoSuchObject && pkt.Variables[0].Type != gosnmp.NoSuchInstance { ent := pkt.Variables[0] - ifv[ent.Name[len(oid):]] = fieldConvert(f.Conversion, ent.Value) + fv, err := fieldConvert(f.Conversion, ent.Value) + if err != nil { + return nil, Errorf(err, "converting %q", ent.Value) + } + if fvs, ok := fv.(string); !ok || fvs != "" { + ifv[""] = fv + } } } else { err := gs.Walk(oid, func(ent gosnmp.SnmpPDU) error { if len(ent.Name) <= len(oid) || ent.Name[:len(oid)+1] != oid+"." { return NestedError{} // break the walk } - ifv[ent.Name[len(oid):]] = fieldConvert(f.Conversion, ent.Value) + + idx := ent.Name[len(oid):] + if f.OidIndexSuffix != "" { + if !strings.HasSuffix(idx, f.OidIndexSuffix) { + // this entry doesn't match our OidIndexSuffix. skip it + return nil + } + idx = idx[:len(idx)-len(f.OidIndexSuffix)] + } + + fv, err := fieldConvert(f.Conversion, ent.Value) + if err != nil { + return Errorf(err, "converting %q", ent.Value) + } + if fvs, ok := fv.(string); !ok || fvs != "" { + ifv[idx] = fv + } return nil }) if err != nil { @@ -610,7 +655,7 @@ func (s *Snmp) getConnection(agent string) (snmpConnection, error) { } } - gs.MaxRepetitions = int(s.MaxRepetitions) + gs.MaxRepetitions = s.MaxRepetitions if s.Version == 3 { gs.ContextName = s.ContextName @@ -677,14 +722,15 @@ func (s *Snmp) getConnection(agent string) (snmpConnection, error) { // "float"/"float(0)" will convert the value into a float. // "float(X)" will convert the value into a float, and then move the decimal before Xth right-most digit. // "int" will convert the value into an integer. +// "hwaddr" will convert the value into a MAC address. +// "ipaddr" will convert the value into into an IP address. // "" will convert a byte slice into a string. -// Any other conv will return the input value unchanged. -func fieldConvert(conv string, v interface{}) interface{} { +func fieldConvert(conv string, v interface{}) (interface{}, error) { if conv == "" { if bs, ok := v.([]byte); ok { - return string(bs) + return string(bs), nil } - return v + return v, nil } var d int @@ -721,7 +767,9 @@ func fieldConvert(conv string, v interface{}) interface{} { vf, _ := strconv.ParseFloat(vt, 64) v = vf / math.Pow10(d) } + return v, nil } + if conv == "int" { switch vt := v.(type) { case float32: @@ -753,39 +801,112 @@ func fieldConvert(conv string, v interface{}) interface{} { case string: v, _ = strconv.Atoi(vt) } + return v, nil + } + + if conv == "hwaddr" { + switch vt := v.(type) { + case string: + v = net.HardwareAddr(vt).String() + case []byte: + v = net.HardwareAddr(vt).String() + default: + return nil, fmt.Errorf("invalid type (%T) for hwaddr conversion", v) + } + return v, nil + } + + if conv == "ipaddr" { + var ipbs []byte + + switch vt := v.(type) { + case string: + ipbs = []byte(vt) + case []byte: + ipbs = vt + default: + return nil, fmt.Errorf("invalid type (%T) for ipaddr conversion", v) + } + + switch len(ipbs) { + case 4, 16: + v = net.IP(ipbs).String() + default: + return nil, fmt.Errorf("invalid length (%d) for ipaddr conversion", len(ipbs)) + } + + return v, nil } - return v + return nil, fmt.Errorf("invalid conversion type '%s'", conv) } // snmpTranslate resolves the given OID. -// The contents of the oid parameter will be replaced with the numeric oid value. -// If name is empty, the textual OID value is stored in it. If the textual OID cannot be translated, the numeric OID is stored instead. -// If mibPrefix is non-nil, the MIB in which the OID was found is stored, with a suffix of "::". -func snmpTranslate(mibPrefix *string, oid *string, name *string) error { - if strings.ContainsAny(*oid, ":abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") { - out, err := execCmd("snmptranslate", "-m", "all", "-On", *oid) - if err != nil { - return Errorf(err, "translating %s", *oid) +func snmpTranslate(oid string) (mibName string, oidNum string, oidText string, conversion string, err error) { + var out []byte + if strings.ContainsAny(oid, ":abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") { + out, err = execCmd("snmptranslate", "-Td", "-Ob", oid) + } else { + out, err = execCmd("snmptranslate", "-Td", "-Ob", "-m", "all", oid) + } + if err != nil { + return "", "", "", "", err + } + + bb := bytes.NewBuffer(out) + + oidText, err = bb.ReadString('\n') + if err != nil { + return "", "", "", "", Errorf(err, "getting OID text") + } + oidText = oidText[:len(oidText)-1] + + i := strings.Index(oidText, "::") + if i == -1 { + // was not found in MIB. + if bytes.Index(bb.Bytes(), []byte(" [TRUNCATED]")) >= 0 { + return "", oid, oid, "", nil } - *oid = string(bytes.TrimSuffix(out, []byte{'\n'})) + // not truncated, but not fully found. We still need to parse out numeric OID, so keep going + oidText = oid + } else { + mibName = oidText[:i] + oidText = oidText[i+2:] } - if *name == "" { - out, err := execCmd("snmptranslate", "-m", "all", *oid) + if i := bytes.Index(bb.Bytes(), []byte(" -- TEXTUAL CONVENTION ")); i != -1 { + bb.Next(i + len(" -- TEXTUAL CONVENTION ")) + tc, err := bb.ReadString('\n') if err != nil { - //TODO debug message - *name = *oid + return "", "", "", "", Errorf(err, "getting textual convention") + } + tc = tc[:len(tc)-1] + switch tc { + case "MacAddress", "PhysAddress": + conversion = "hwaddr" + case "InetAddressIPv4", "InetAddressIPv6", "InetAddress": + conversion = "ipaddr" + } + } + + i = bytes.Index(bb.Bytes(), []byte("::= { ")) + bb.Next(i + len("::= { ")) + objs, err := bb.ReadString('}') + if err != nil { + return "", "", "", "", Errorf(err, "getting numeric oid") + } + objs = objs[:len(objs)-1] + for _, obj := range strings.Split(objs, " ") { + if len(obj) == 0 { + continue + } + if i := strings.Index(obj, "("); i != -1 { + obj = obj[i+1:] + oidNum += "." + obj[:strings.Index(obj, ")")] } else { - if i := bytes.Index(out, []byte("::")); i != -1 { - if mibPrefix != nil { - *mibPrefix = string(out[:i+2]) - } - out = out[i+2:] - } - *name = string(bytes.TrimSuffix(out, []byte{'\n'})) + oidNum += "." + obj } } - return nil + return mibName, oidNum, oidText, conversion, nil } diff --git a/plugins/inputs/snmp/snmp_mocks_generate.go b/plugins/inputs/snmp/snmp_mocks_generate.go new file mode 100644 index 0000000000000..c3c041279c99d --- /dev/null +++ b/plugins/inputs/snmp/snmp_mocks_generate.go @@ -0,0 +1,100 @@ +// +build generate + +package main + +import ( + "bufio" + "bytes" + "fmt" + "os" + "os/exec" + "strings" +) + +// This file is a generator used to generate the mocks for the commands used by the tests. + +// These are the commands to be mocked. +var mockedCommands = [][]string{ + {"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.0"}, + {"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.1.1"}, + {"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.1.2"}, + {"snmptranslate", "-Td", "-Ob", "-m", "all", "1.0.0.1.1"}, + {"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.0.1.1"}, + {"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.0.1.1.0"}, + {"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.0.1.4"}, + {"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.2.3"}, + {"snmptranslate", "-Td", "-Ob", ".iso.2.3"}, + {"snmptranslate", "-Td", "-Ob", "-m", "all", ".999"}, + {"snmptranslate", "-Td", "-Ob", "TEST::server"}, + {"snmptranslate", "-Td", "-Ob", "TEST::server.0"}, + {"snmptranslate", "-Td", "-Ob", "TEST::testTable"}, + {"snmptranslate", "-Td", "-Ob", "TEST::connections"}, + {"snmptranslate", "-Td", "-Ob", "TEST::latency"}, + {"snmptranslate", "-Td", "-Ob", "TEST::hostname"}, + {"snmptranslate", "-Td", "-Ob", "IF-MIB::ifPhysAddress.1"}, + {"snmptranslate", "-Td", "-Ob", "BRIDGE-MIB::dot1dTpFdbAddress.1"}, + {"snmptranslate", "-Td", "-Ob", "TCP-MIB::tcpConnectionLocalAddress.1"}, + {"snmptranslate", "-Td", "TEST::testTable.1"}, + {"snmptable", "-Ch", "-Cl", "-c", "public", "127.0.0.1", "TEST::testTable"}, +} + +type mockedCommandResult struct { + stdout string + stderr string + exitError bool +} + +func main() { + if err := generate(); err != nil { + fmt.Fprintf(os.Stderr, "error: %s\n", err) + os.Exit(1) + } +} + +func generate() error { + f, err := os.OpenFile("snmp_mocks_test.go", os.O_RDWR, 0644) + if err != nil { + return err + } + br := bufio.NewReader(f) + var i int64 + for l, err := br.ReadString('\n'); err == nil; l, err = br.ReadString('\n') { + i += int64(len(l)) + if l == "// BEGIN GO GENERATE CONTENT\n" { + break + } + } + f.Truncate(i) + f.Seek(i, 0) + + fmt.Fprintf(f, "var mockedCommandResults = map[string]mockedCommandResult{\n") + + for _, cmd := range mockedCommands { + ec := exec.Command(cmd[0], cmd[1:]...) + out := bytes.NewBuffer(nil) + err := bytes.NewBuffer(nil) + ec.Stdout = out + ec.Stderr = err + ec.Env = []string{ + "MIBDIRS=+./testdata", + } + + var mcr mockedCommandResult + if err := ec.Run(); err != nil { + if err, ok := err.(*exec.ExitError); !ok { + mcr.exitError = true + } else { + return fmt.Errorf("executing %v: %s", cmd, err) + } + } + mcr.stdout = string(out.Bytes()) + mcr.stderr = string(err.Bytes()) + cmd0 := strings.Join(cmd, "\000") + mcrv := fmt.Sprintf("%#v", mcr)[5:] // trim `main.` prefix + fmt.Fprintf(f, "%#v: %s,\n", cmd0, mcrv) + } + f.Write([]byte("}\n")) + f.Close() + + return exec.Command("gofmt", "-w", "snmp_mocks_test.go").Run() +} diff --git a/plugins/inputs/snmp/snmp_mocks_test.go b/plugins/inputs/snmp/snmp_mocks_test.go new file mode 100644 index 0000000000000..2f67335d4f148 --- /dev/null +++ b/plugins/inputs/snmp/snmp_mocks_test.go @@ -0,0 +1,85 @@ +package snmp + +import ( + "fmt" + "os" + "os/exec" + "strings" + "testing" +) + +type mockedCommandResult struct { + stdout string + stderr string + exitError bool +} + +func mockExecCommand(arg0 string, args ...string) *exec.Cmd { + args = append([]string{"-test.run=TestMockExecCommand", "--", arg0}, args...) + cmd := exec.Command(os.Args[0], args...) + cmd.Stderr = os.Stderr // so the test output shows errors + return cmd +} + +// This is not a real test. This is just a way of mocking out commands. +// +// Idea based on https://github.com/golang/go/blob/7c31043/src/os/exec/exec_test.go#L568 +func TestMockExecCommand(t *testing.T) { + var cmd []string + for _, arg := range os.Args { + if string(arg) == "--" { + cmd = []string{} + continue + } + if cmd == nil { + continue + } + cmd = append(cmd, string(arg)) + } + if cmd == nil { + return + } + + cmd0 := strings.Join(cmd, "\000") + mcr, ok := mockedCommandResults[cmd0] + if !ok { + cv := fmt.Sprintf("%#v", cmd)[8:] // trim `[]string` prefix + fmt.Fprintf(os.Stderr, "Unmocked command. Please add the following to `mockedCommands` in snmp_mocks_generate.go, and then run `go generate`:\n\t%s,\n", cv) + os.Exit(1) + } + fmt.Printf("%s", mcr.stdout) + fmt.Fprintf(os.Stderr, "%s", mcr.stderr) + if mcr.exitError { + os.Exit(1) + } + os.Exit(0) +} + +func init() { + execCommand = mockExecCommand +} + +// BEGIN GO GENERATE CONTENT +var mockedCommandResults = map[string]mockedCommandResult{ + "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0": mockedCommandResult{stdout: "TEST::testTable\ntestTable OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 0 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.1.1": mockedCommandResult{stdout: "TEST::hostname\nhostname OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 1 1 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.1.2": mockedCommandResult{stdout: "TEST::1.2\nanonymous#1 OBJECT-TYPE\n -- FROM\tTEST\n::= { iso(1) 0 testOID(0) 1 2 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x001.0.0.1.1": mockedCommandResult{stdout: "TEST::hostname\nhostname OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 1 1 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0.1.1": mockedCommandResult{stdout: "TEST::server\nserver OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 1 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0.1.1.0": mockedCommandResult{stdout: "TEST::server.0\nserver OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) server(1) 0 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0.1.4": mockedCommandResult{stdout: "TEST::testTableEntry.4\ntestTableEntry OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n INDEX\t\t{ server }\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 4 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.2.3": mockedCommandResult{stdout: "iso.2.3\niso OBJECT-TYPE\n -- FROM\t#-1\n::= { iso(1) 2 3 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00.iso.2.3": mockedCommandResult{stdout: "iso.2.3\niso OBJECT-TYPE\n -- FROM\t#-1\n::= { iso(1) 2 3 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.999": mockedCommandResult{stdout: ".999\n [TRUNCATED]\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00TEST::server": mockedCommandResult{stdout: "TEST::server\nserver OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 1 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00TEST::server.0": mockedCommandResult{stdout: "TEST::server.0\nserver OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) server(1) 0 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00TEST::testTable": mockedCommandResult{stdout: "TEST::testTable\ntestTable OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 0 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00TEST::connections": mockedCommandResult{stdout: "TEST::connections\nconnections OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tINTEGER\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 2 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00TEST::latency": mockedCommandResult{stdout: "TEST::latency\nlatency OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 3 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00TEST::hostname": mockedCommandResult{stdout: "TEST::hostname\nhostname OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 1 1 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00IF-MIB::ifPhysAddress.1": mockedCommandResult{stdout: "IF-MIB::ifPhysAddress.1\nifPhysAddress OBJECT-TYPE\n -- FROM\tIF-MIB\n -- TEXTUAL CONVENTION PhysAddress\n SYNTAX\tOCTET STRING\n DISPLAY-HINT\t\"1x:\"\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n DESCRIPTION\t\"The interface's address at its protocol sub-layer. For\n example, for an 802.x interface, this object normally\n contains a MAC address. The interface's media-specific MIB\n must define the bit and byte ordering and the format of the\n value of this object. For interfaces which do not have such\n an address (e.g., a serial line), this object should contain\n an octet string of zero length.\"\n::= { iso(1) org(3) dod(6) internet(1) mgmt(2) mib-2(1) interfaces(2) ifTable(2) ifEntry(1) ifPhysAddress(6) 1 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00BRIDGE-MIB::dot1dTpFdbAddress.1": mockedCommandResult{stdout: "BRIDGE-MIB::dot1dTpFdbAddress.1\ndot1dTpFdbAddress OBJECT-TYPE\n -- FROM\tBRIDGE-MIB\n -- TEXTUAL CONVENTION MacAddress\n SYNTAX\tOCTET STRING (6) \n DISPLAY-HINT\t\"1x:\"\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n DESCRIPTION\t\"A unicast MAC address for which the bridge has\n forwarding and/or filtering information.\"\n::= { iso(1) org(3) dod(6) internet(1) mgmt(2) mib-2(1) dot1dBridge(17) dot1dTp(4) dot1dTpFdbTable(3) dot1dTpFdbEntry(1) dot1dTpFdbAddress(1) 1 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00TCP-MIB::tcpConnectionLocalAddress.1": mockedCommandResult{stdout: "TCP-MIB::tcpConnectionLocalAddress.1\ntcpConnectionLocalAddress OBJECT-TYPE\n -- FROM\tTCP-MIB\n -- TEXTUAL CONVENTION InetAddress\n SYNTAX\tOCTET STRING (0..255) \n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n DESCRIPTION\t\"The local IP address for this TCP connection. The type\n of this address is determined by the value of\n tcpConnectionLocalAddressType.\n\n As this object is used in the index for the\n tcpConnectionTable, implementors should be\n careful not to create entries that would result in OIDs\n with more than 128 subidentifiers; otherwise the information\n cannot be accessed by using SNMPv1, SNMPv2c, or SNMPv3.\"\n::= { iso(1) org(3) dod(6) internet(1) mgmt(2) mib-2(1) tcp(6) tcpConnectionTable(19) tcpConnectionEntry(1) tcpConnectionLocalAddress(2) 1 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00TEST::testTable.1": mockedCommandResult{stdout: "TEST::testTableEntry\ntestTableEntry OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n INDEX\t\t{ server }\n::= { iso(1) 0 testOID(0) testTable(0) 1 }\n", stderr: "", exitError: false}, + "snmptable\x00-Ch\x00-Cl\x00-c\x00public\x00127.0.0.1\x00TEST::testTable": mockedCommandResult{stdout: "server connections latency \nTEST::testTable: No entries\n", stderr: "", exitError: false}, +} diff --git a/plugins/inputs/snmp/snmp_test.go b/plugins/inputs/snmp/snmp_test.go index 62f3e6c2fda52..6839fdd8f1ef3 100644 --- a/plugins/inputs/snmp/snmp_test.go +++ b/plugins/inputs/snmp/snmp_test.go @@ -1,11 +1,9 @@ +//go:generate go run -tags generate snmp_mocks_generate.go package snmp import ( "fmt" "net" - "os" - "os/exec" - "strings" "sync" "testing" "time" @@ -18,77 +16,6 @@ import ( "github.com/stretchr/testify/require" ) -func mockExecCommand(arg0 string, args ...string) *exec.Cmd { - args = append([]string{"-test.run=TestMockExecCommand", "--", arg0}, args...) - cmd := exec.Command(os.Args[0], args...) - cmd.Stderr = os.Stderr // so the test output shows errors - return cmd -} -func TestMockExecCommand(t *testing.T) { - var cmd []string - for _, arg := range os.Args { - if string(arg) == "--" { - cmd = []string{} - continue - } - if cmd == nil { - continue - } - cmd = append(cmd, string(arg)) - } - if cmd == nil { - return - } - - // will not properly handle args with spaces, but it's good enough - cmdStr := strings.Join(cmd, " ") - switch cmdStr { - case "snmptranslate -m all .1.0.0.0": - fmt.Printf("TEST::testTable\n") - case "snmptranslate -m all .1.0.0.0.1.1": - fmt.Printf("server\n") - case "snmptranslate -m all .1.0.0.0.1.1.0": - fmt.Printf("server.0\n") - case "snmptranslate -m all .1.0.0.1.1": - fmt.Printf("hostname\n") - case "snmptranslate -m all .999": - fmt.Printf(".999\n") - case "snmptranslate -m all -On TEST::testTable": - fmt.Printf(".1.0.0.0\n") - case "snmptranslate -m all -On TEST::hostname": - fmt.Printf(".1.0.0.1.1\n") - case "snmptranslate -m all -On TEST::server": - fmt.Printf(".1.0.0.0.1.1\n") - case "snmptranslate -m all -On TEST::connections": - fmt.Printf(".1.0.0.0.1.2\n") - case "snmptranslate -m all -On TEST::latency": - fmt.Printf(".1.0.0.0.1.3\n") - case "snmptranslate -m all -On TEST::server.0": - fmt.Printf(".1.0.0.0.1.1.0\n") - case "snmptranslate -m all -Td .1.0.0.0.1": - fmt.Printf(`TEST::testTableEntry -testTableEntry OBJECT-TYPE - -- FROM TEST - MAX-ACCESS not-accessible - STATUS current - INDEX { server } -::= { iso(1) 2 testOID(3) testTable(0) 1 } -`) - case "snmptable -m all -Ch -Cl -c public 127.0.0.1 .1.0.0.0": - fmt.Printf(`server connections latency -TEST::testTable: No entries -`) - default: - fmt.Fprintf(os.Stderr, "Command not mocked: `%s`\n", cmdStr) - // you get the expected output by running the missing command with `-M testdata` in the plugin directory. - os.Exit(1) - } - os.Exit(0) -} -func init() { - execCommand = mockExecCommand -} - type testSNMPConnection struct { host string values map[string]interface{} @@ -133,18 +60,23 @@ func (tsc *testSNMPConnection) Walk(oid string, wf gosnmp.WalkFunc) error { var tsc = &testSNMPConnection{ host: "tsc", values: map[string]interface{}{ - ".1.0.0.0.1.1.0": "foo", - ".1.0.0.0.1.1.1": []byte("bar"), - ".1.0.0.0.1.102": "bad", - ".1.0.0.0.1.2.0": 1, - ".1.0.0.0.1.2.1": 2, - ".1.0.0.0.1.3.0": "0.123", - ".1.0.0.0.1.3.1": "0.456", - ".1.0.0.0.1.3.2": "9.999", - ".1.0.0.0.1.4.0": 123456, - ".1.0.0.1.1": "baz", - ".1.0.0.1.2": 234, - ".1.0.0.1.3": []byte("byte slice"), + ".1.0.0.0.1.1.0": "foo", + ".1.0.0.0.1.1.1": []byte("bar"), + ".1.0.0.0.1.1.2": []byte(""), + ".1.0.0.0.1.102": "bad", + ".1.0.0.0.1.2.0": 1, + ".1.0.0.0.1.2.1": 2, + ".1.0.0.0.1.2.2": 0, + ".1.0.0.0.1.3.0": "0.123", + ".1.0.0.0.1.3.1": "0.456", + ".1.0.0.0.1.3.2": "0.000", + ".1.0.0.0.1.3.3": "9.999", + ".1.0.0.0.1.4.0": 123456, + ".1.0.0.1.1": "baz", + ".1.0.0.1.2": 234, + ".1.0.0.1.3": []byte("byte slice"), + ".1.0.0.2.1.5.0.9.9": 11, + ".1.0.0.2.1.5.1.9.9": 22, }, } @@ -162,7 +94,8 @@ func TestSampleConfig(t *testing.T) { Timeout: internal.Duration{Duration: 5 * time.Second}, Version: 2, Community: "public", - MaxRepetitions: 50, + MaxRepetitions: 10, + Retries: 3, Name: "system", Fields: []Field{ @@ -191,27 +124,35 @@ func TestSampleConfig(t *testing.T) { func TestFieldInit(t *testing.T) { translations := []struct { - inputOid string - inputName string - expectedOid string - expectedName string + inputOid string + inputName string + inputConversion string + expectedOid string + expectedName string + expectedConversion string }{ - {".1.0.0.0.1.1", "", ".1.0.0.0.1.1", "server"}, - {".1.0.0.0.1.1.0", "", ".1.0.0.0.1.1.0", "server.0"}, - {".999", "", ".999", ".999"}, - {"TEST::server", "", ".1.0.0.0.1.1", "server"}, - {"TEST::server.0", "", ".1.0.0.0.1.1.0", "server.0"}, - {"TEST::server", "foo", ".1.0.0.0.1.1", "foo"}, + {".1.2.3", "foo", "", ".1.2.3", "foo", ""}, + {".iso.2.3", "foo", "", ".1.2.3", "foo", ""}, + {".1.0.0.0.1.1", "", "", ".1.0.0.0.1.1", "server", ""}, + {".1.0.0.0.1.1.0", "", "", ".1.0.0.0.1.1.0", "server.0", ""}, + {".999", "", "", ".999", ".999", ""}, + {"TEST::server", "", "", ".1.0.0.0.1.1", "server", ""}, + {"TEST::server.0", "", "", ".1.0.0.0.1.1.0", "server.0", ""}, + {"TEST::server", "foo", "", ".1.0.0.0.1.1", "foo", ""}, + {"IF-MIB::ifPhysAddress.1", "", "", ".1.3.6.1.2.1.2.2.1.6.1", "ifPhysAddress.1", "hwaddr"}, + {"IF-MIB::ifPhysAddress.1", "", "none", ".1.3.6.1.2.1.2.2.1.6.1", "ifPhysAddress.1", "none"}, + {"BRIDGE-MIB::dot1dTpFdbAddress.1", "", "", ".1.3.6.1.2.1.17.4.3.1.1.1", "dot1dTpFdbAddress.1", "hwaddr"}, + {"TCP-MIB::tcpConnectionLocalAddress.1", "", "", ".1.3.6.1.2.1.6.19.1.2.1", "tcpConnectionLocalAddress.1", "ipaddr"}, } for _, txl := range translations { - f := Field{Oid: txl.inputOid, Name: txl.inputName} + f := Field{Oid: txl.inputOid, Name: txl.inputName, Conversion: txl.inputConversion} err := f.init() if !assert.NoError(t, err, "inputOid='%s' inputName='%s'", txl.inputOid, txl.inputName) { continue } - assert.Equal(t, txl.expectedOid, f.Oid, "inputOid='%s' inputName='%s'", txl.inputOid, txl.inputName) - assert.Equal(t, txl.expectedName, f.Name, "inputOid='%s' inputName='%s'", txl.inputOid, txl.inputName) + assert.Equal(t, txl.expectedOid, f.Oid, "inputOid='%s' inputName='%s' inputConversion='%s'", txl.inputOid, txl.inputName, txl.inputConversion) + assert.Equal(t, txl.expectedName, f.Name, "inputOid='%s' inputName='%s' inputConversion='%s'", txl.inputOid, txl.inputName, txl.inputConversion) } } @@ -302,7 +243,7 @@ func TestGetSNMPConnection_v3(t *testing.T) { assert.Equal(t, gs.Version, gosnmp.Version3) sp := gs.SecurityParameters.(*gosnmp.UsmSecurityParameters) assert.Equal(t, "1.2.3.4", gsc.Host()) - assert.Equal(t, 20, gs.MaxRepetitions) + assert.EqualValues(t, 20, gs.MaxRepetitions) assert.Equal(t, "mycontext", gs.ContextName) assert.Equal(t, gosnmp.AuthPriv, gs.MsgFlags&gosnmp.AuthPriv) assert.Equal(t, "myuser", sp.UserName) @@ -437,6 +378,11 @@ func TestTableBuild_walk(t *testing.T) { Oid: ".1.0.0.0.1.3", Conversion: "float", }, + { + Name: "myfield4", + Oid: ".1.0.0.2.1.5", + OidIndexSuffix: ".9.9", + }, }, } @@ -445,12 +391,20 @@ func TestTableBuild_walk(t *testing.T) { assert.Equal(t, tb.Name, "mytable") rtr1 := RTableRow{ - Tags: map[string]string{"myfield1": "foo"}, - Fields: map[string]interface{}{"myfield2": 1, "myfield3": float64(0.123)}, + Tags: map[string]string{"myfield1": "foo"}, + Fields: map[string]interface{}{ + "myfield2": 1, + "myfield3": float64(0.123), + "myfield4": 11, + }, } rtr2 := RTableRow{ - Tags: map[string]string{"myfield1": "bar"}, - Fields: map[string]interface{}{"myfield2": 2, "myfield3": float64(0.456)}, + Tags: map[string]string{"myfield1": "bar"}, + Fields: map[string]interface{}{ + "myfield2": 2, + "myfield3": float64(0.456), + "myfield4": 22, + }, } assert.Len(t, tb.Rows, 2) assert.Contains(t, tb.Rows, rtr1) @@ -475,6 +429,14 @@ func TestTableBuild_noWalk(t *testing.T) { Oid: ".1.0.0.1.2", IsTag: true, }, + { + Name: "empty", + Oid: ".1.0.0.0.1.1.2", + }, + { + Name: "noexist", + Oid: ".1.2.3.4.5", + }, }, } @@ -619,10 +581,18 @@ func TestFieldConvert(t *testing.T) { {uint16(123), "int", int64(123)}, {uint32(123), "int", int64(123)}, {uint64(123), "int", int64(123)}, + {[]byte("abcdef"), "hwaddr", "61:62:63:64:65:66"}, + {"abcdef", "hwaddr", "61:62:63:64:65:66"}, + {[]byte("abcd"), "ipaddr", "97.98.99.100"}, + {"abcd", "ipaddr", "97.98.99.100"}, + {[]byte("abcdefghijklmnop"), "ipaddr", "6162:6364:6566:6768:696a:6b6c:6d6e:6f70"}, } for _, tc := range testTable { - act := fieldConvert(tc.conv, tc.input) + act, err := fieldConvert(tc.conv, tc.input) + if !assert.NoError(t, err, "input=%T(%v) conv=%s expected=%T(%v)", tc.input, tc.input, tc.conv, tc.expected, tc.expected) { + continue + } assert.EqualValues(t, tc.expected, act, "input=%T(%v) conv=%s expected=%T(%v)", tc.input, tc.input, tc.conv, tc.expected, tc.expected) } } diff --git a/plugins/inputs/snmp_legacy/snmp_legacy.go b/plugins/inputs/snmp_legacy/snmp_legacy.go index b8b9a12324d58..e5dbbc459dbc4 100644 --- a/plugins/inputs/snmp_legacy/snmp_legacy.go +++ b/plugins/inputs/snmp_legacy/snmp_legacy.go @@ -296,7 +296,7 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error { data, err := ioutil.ReadFile(s.SnmptranslateFile) if err != nil { - log.Printf("Reading SNMPtranslate file error: %s", err) + log.Printf("E! Reading SNMPtranslate file error: %s", err) return err } else { for _, line := range strings.Split(string(data), "\n") { @@ -394,16 +394,16 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error { // only if len(s.OidInstanceMapping) == 0 if len(host.OidInstanceMapping) >= 0 { if err := host.SNMPMap(acc, s.nameToOid, s.subTableMap); err != nil { - log.Printf("SNMP Mapping error for host '%s': %s", host.Address, err) + log.Printf("E! SNMP Mapping error for host '%s': %s", host.Address, err) continue } } // Launch Get requests if err := host.SNMPGet(acc, s.initNode); err != nil { - log.Printf("SNMP Error for host '%s': %s", host.Address, err) + log.Printf("E! SNMP Error for host '%s': %s", host.Address, err) } if err := host.SNMPBulk(acc, s.initNode); err != nil { - log.Printf("SNMP Error for host '%s': %s", host.Address, err) + log.Printf("E! SNMP Error for host '%s': %s", host.Address, err) } } return nil @@ -800,7 +800,7 @@ func (h *Host) HandleResponse( acc.AddFields(field_name, fields, tags) case gosnmp.NoSuchObject, gosnmp.NoSuchInstance: // Oid not found - log.Printf("[snmp input] Oid not found: %s", oid_key) + log.Printf("E! [snmp input] Oid not found: %s", oid_key) default: // delete other data } diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index 0fed01311e33f..e428d309851af 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -626,26 +626,22 @@ const sqlDatabaseIO string = `SET NOCOUNT ON; SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; DECLARE @secondsBetween tinyint = 5; DECLARE @delayInterval char(8) = CONVERT(Char(8), DATEADD(SECOND, @secondsBetween, '00:00:00'), 108); - IF OBJECT_ID('tempdb..#baseline') IS NOT NULL DROP TABLE #baseline; IF OBJECT_ID('tempdb..#baselinewritten') IS NOT NULL DROP TABLE #baselinewritten; - SELECT DB_NAME(mf.database_id) AS databaseName , mf.physical_name, divfs.num_of_bytes_read, divfs.num_of_bytes_written, divfs.num_of_reads, divfs.num_of_writes, - GETDATE() AS baselineDate + GETDATE() AS baselinedate INTO #baseline FROM sys.dm_io_virtual_file_stats(NULL, NULL) AS divfs INNER JOIN sys.master_files AS mf ON mf.database_id = divfs.database_id AND mf.file_id = divfs.file_id - WAITFOR DELAY @delayInterval; - ;WITH currentLine AS ( SELECT DB_NAME(mf.database_id) AS databaseName , @@ -655,12 +651,11 @@ WAITFOR DELAY @delayInterval; divfs.num_of_bytes_written, divfs.num_of_reads, divfs.num_of_writes, - GETDATE() AS currentlineDate + GETDATE() AS currentlinedate FROM sys.dm_io_virtual_file_stats(NULL, NULL) AS divfs INNER JOIN sys.master_files AS mf ON mf.database_id = divfs.database_id AND mf.file_id = divfs.file_id ) - SELECT database_name , datafile_type , num_of_bytes_read_persec = SUM(num_of_bytes_read_persec) @@ -673,23 +668,21 @@ FROM SELECT database_name = currentLine.databaseName , datafile_type = type_desc -, num_of_bytes_read_persec = (currentLine.num_of_bytes_read - T1.num_of_bytes_read) / (DATEDIFF(SECOND,baseLineDate,currentLineDate)) -, num_of_bytes_written_persec = (currentLine.num_of_bytes_written - T1.num_of_bytes_written) / (DATEDIFF(SECOND,baseLineDate,currentLineDate)) -, num_of_reads_persec = (currentLine.num_of_reads - T1.num_of_reads) / (DATEDIFF(SECOND,baseLineDate,currentLineDate)) -, num_of_writes_persec = (currentLine.num_of_writes - T1.num_of_writes) / (DATEDIFF(SECOND,baseLineDate,currentLineDate)) +, num_of_bytes_read_persec = (currentLine.num_of_bytes_read - T1.num_of_bytes_read) / (DATEDIFF(SECOND,baselinedate,currentlinedate)) +, num_of_bytes_written_persec = (currentLine.num_of_bytes_written - T1.num_of_bytes_written) / (DATEDIFF(SECOND,baselinedate,currentlinedate)) +, num_of_reads_persec = (currentLine.num_of_reads - T1.num_of_reads) / (DATEDIFF(SECOND,baselinedate,currentlinedate)) +, num_of_writes_persec = (currentLine.num_of_writes - T1.num_of_writes) / (DATEDIFF(SECOND,baselinedate,currentlinedate)) FROM currentLine INNER JOIN #baseline T1 ON T1.databaseName = currentLine.databaseName AND T1.physical_name = currentLine.physical_name ) as T GROUP BY database_name, datafile_type - DECLARE @DynamicPivotQuery AS NVARCHAR(MAX) DECLARE @ColumnName AS NVARCHAR(MAX), @ColumnName2 AS NVARCHAR(MAX) SELECT @ColumnName = ISNULL(@ColumnName + ',','') + QUOTENAME(database_name) FROM (SELECT DISTINCT database_name FROM #baselinewritten) AS bl SELECT @ColumnName2 = ISNULL(@ColumnName2 + '+','') + QUOTENAME(database_name) FROM (SELECT DISTINCT database_name FROM #baselinewritten) AS bl - SET @DynamicPivotQuery = N' SELECT measurement = ''Log writes (bytes/sec)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database IO'' , ' + @ColumnName + ', Total = ' + @ColumnName2 + ' FROM @@ -699,9 +692,7 @@ FROM #baselinewritten WHERE datafile_type = ''LOG'' ) as V PIVOT(SUM(num_of_bytes_written_persec) FOR database_name IN (' + @ColumnName + ')) AS PVTTable - UNION ALL - SELECT measurement = ''Rows writes (bytes/sec)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database IO'' , ' + @ColumnName + ', Total = ' + @ColumnName2 + ' FROM ( @@ -710,9 +701,7 @@ FROM #baselinewritten WHERE datafile_type = ''ROWS'' ) as V PIVOT(SUM(num_of_bytes_written_persec) FOR database_name IN (' + @ColumnName + ')) AS PVTTable - UNION ALL - SELECT measurement = ''Log reads (bytes/sec)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database IO'' , ' + @ColumnName + ', Total = ' + @ColumnName2 + ' FROM ( @@ -721,9 +710,7 @@ FROM #baselinewritten WHERE datafile_type = ''LOG'' ) as V PIVOT(SUM(num_of_bytes_read_persec) FOR database_name IN (' + @ColumnName + ')) AS PVTTable - UNION ALL - SELECT measurement = ''Rows reads (bytes/sec)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database IO'' , ' + @ColumnName + ', Total = ' + @ColumnName2 + ' FROM ( @@ -732,9 +719,7 @@ FROM #baselinewritten WHERE datafile_type = ''ROWS'' ) as V PIVOT(SUM(num_of_bytes_read_persec) FOR database_name IN (' + @ColumnName + ')) AS PVTTable - UNION ALL - SELECT measurement = ''Log (writes/sec)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database IO'' , ' + @ColumnName + ', Total = ' + @ColumnName2 + ' FROM ( @@ -743,9 +728,7 @@ FROM #baselinewritten WHERE datafile_type = ''LOG'' ) as V PIVOT(SUM(num_of_writes_persec) FOR database_name IN (' + @ColumnName + ')) AS PVTTable - UNION ALL - SELECT measurement = ''Rows (writes/sec)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database IO'' , ' + @ColumnName + ', Total = ' + @ColumnName2 + ' FROM ( @@ -754,9 +737,7 @@ FROM #baselinewritten WHERE datafile_type = ''ROWS'' ) as V PIVOT(SUM(num_of_writes_persec) FOR database_name IN (' + @ColumnName + ')) AS PVTTabl - UNION ALL - SELECT measurement = ''Log (reads/sec)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database IO'' , ' + @ColumnName + ', Total = ' + @ColumnName2 + ' FROM ( @@ -765,9 +746,7 @@ FROM #baselinewritten WHERE datafile_type = ''LOG'' ) as V PIVOT(SUM(num_of_reads_persec) FOR database_name IN (' + @ColumnName + ')) AS PVTTable - UNION ALL - SELECT measurement = ''Rows (reads/sec)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database IO'' , ' + @ColumnName + ', Total = ' + @ColumnName2 + ' FROM ( @@ -777,7 +756,6 @@ WHERE datafile_type = ''ROWS'' ) as V PIVOT(SUM(num_of_reads_persec) FOR database_name IN (' + @ColumnName + ')) AS PVTTable ' - EXEC sp_executesql @DynamicPivotQuery; ` @@ -1161,7 +1139,7 @@ DECLARE @w4 TABLE ) DECLARE @w5 TABLE ( - WaitCategory nvarchar(16) NOT NULL, + WaitCategory nvarchar(64) NOT NULL, WaitTimeInMs bigint NOT NULL, WaitTaskCount bigint NOT NULL ) diff --git a/plugins/inputs/statsd/README.md b/plugins/inputs/statsd/README.md index ba0c8e746f899..a17f8c888e5bd 100644 --- a/plugins/inputs/statsd/README.md +++ b/plugins/inputs/statsd/README.md @@ -93,7 +93,6 @@ tags in a manner similar to the line-protocol, like this: users.current,service=payroll,region=us-west:32|g ``` -COMING SOON: there will be a way to specify multiple fields.